Better error message.
[mono.git] / mono / mini / mini-x86.c
index b847659773cf0dc7f3798df395b5e98175ca83c2..23176ccbe9ae6b582537f40c7f0781ab857c485b 100644 (file)
 #include "mini.h"
 #include <string.h>
 #include <math.h>
-
-#ifndef PLATFORM_WIN32
+#ifdef HAVE_UNISTD_H
 #include <unistd.h>
-#include <sys/mman.h>
 #endif
 
 #include <mono/metadata/appdomain.h>
 #include "trace.h"
 #include "mini-x86.h"
 #include "inssel.h"
-#include "cpu-pentium.h"
+#include "cpu-x86.h"
 
 /* On windows, these hold the key returned by TlsAlloc () */
 static gint lmf_tls_offset = -1;
+static gint lmf_addr_tls_offset = -1;
 static gint appdomain_tls_offset = -1;
 static gint thread_tls_offset = -1;
 
+#ifdef MONO_XEN_OPT
+static gboolean optimize_for_xen = TRUE;
+#else
+#define optimize_for_xen 0
+#endif
+
+#ifdef PLATFORM_WIN32
+static gboolean is_win32 = TRUE;
+#else
+static gboolean is_win32 = FALSE;
+#endif
+
+/* This mutex protects architecture specific caches */
+#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
+#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
+static CRITICAL_SECTION mini_arch_mutex;
+
 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
 
+#define ARGS_OFFSET 8
+
 #ifdef PLATFORM_WIN32
 /* Under windows, the default pinvoke calling convention is stdcall */
 #define CALLCONV_IS_STDCALL(sig) ((((sig)->call_convention) == MONO_CALL_STDCALL) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_DEFAULT))
@@ -42,8 +60,6 @@ static gint thread_tls_offset = -1;
 #define CALLCONV_IS_STDCALL(sig) (((sig)->call_convention) == MONO_CALL_STDCALL)
 #endif
 
-#define SIGNAL_STACK_SIZE (64 * 1024)
-
 #define NOT_IMPLEMENTED g_assert_not_reached ()
 
 const char*
@@ -92,6 +108,7 @@ typedef struct {
        guint32 reg_usage;
        guint32 freg_usage;
        gboolean need_stack_align;
+       guint32 stack_align_amount;
        ArgInfo ret;
        ArgInfo sig_cookie;
        ArgInfo args [1];
@@ -103,7 +120,8 @@ typedef struct {
 
 static X86_Reg_No param_regs [] = { 0 };
 
-#ifdef PLATFORM_WIN32
+#if defined(PLATFORM_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
+#define SMALL_STRUCTS_IN_REGS
 static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX };
 #endif
 
@@ -141,7 +159,7 @@ add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
 
     if (*gr >= FLOAT_PARAM_REGS) {
                ainfo->storage = ArgOnStack;
-               (*stack_size) += sizeof (gpointer);
+               (*stack_size) += is_double ? 8 : 4;
     }
     else {
                /* A double register */
@@ -156,7 +174,7 @@ add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
 
 
 static void
-add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
+add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
               gboolean is_return,
               guint32 *gr, guint32 *fr, guint32 *stack_size)
 {
@@ -167,9 +185,9 @@ add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
        if (sig->pinvoke) 
                size = mono_type_native_stack_size (&klass->byval_arg, NULL);
        else 
-               size = mono_type_stack_size (&klass->byval_arg, NULL);
+               size = mini_type_stack_size (gsctx, &klass->byval_arg, NULL);
 
-#ifdef PLATFORM_WIN32
+#ifdef SMALL_STRUCTS_IN_REGS
        if (sig->pinvoke && is_return) {
                MonoMarshalType *info;
 
@@ -221,15 +239,19 @@ add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
  * For x86 win32, see ???.
  */
 static CallInfo*
-get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoCompile *cfg, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
 {
        guint32 i, gr, fr;
        MonoType *ret_type;
        int n = sig->hasthis + sig->param_count;
        guint32 stack_size = 0;
        CallInfo *cinfo;
+       MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
 
-       cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+       if (mp)
+               cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+       else
+               cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
 
        gr = 0;
        fr = 0;
@@ -237,6 +259,7 @@ get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
        /* return value */
        {
                ret_type = mono_type_get_underlying_type (sig->ret);
+               ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
                switch (ret_type->type) {
                case MONO_TYPE_BOOLEAN:
                case MONO_TYPE_I1:
@@ -269,10 +292,17 @@ get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
                case MONO_TYPE_R8:
                        cinfo->ret.storage = ArgOnDoubleFpStack;
                        break;
+               case MONO_TYPE_GENERICINST:
+                       if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+                               cinfo->ret.storage = ArgInIReg;
+                               cinfo->ret.reg = X86_EAX;
+                               break;
+                       }
+                       /* Fall through */
                case MONO_TYPE_VALUETYPE: {
                        guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
 
-                       add_valuetype (sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+                       add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
                        if (cinfo->ret.storage == ArgOnStack)
                                /* The caller passes the address where the value is stored */
                                add_general (&gr, &stack_size, &cinfo->ret);
@@ -325,6 +355,7 @@ get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
                        continue;
                }
                ptype = mono_type_get_underlying_type (sig->params [i]);
+               ptype = mini_get_basic_type_from_generic (gsctx, ptype);
                switch (ptype->type) {
                case MONO_TYPE_BOOLEAN:
                case MONO_TYPE_I1:
@@ -351,8 +382,14 @@ get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
                case MONO_TYPE_ARRAY:
                        add_general (&gr, &stack_size, ainfo);
                        break;
+               case MONO_TYPE_GENERICINST:
+                       if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+                               add_general (&gr, &stack_size, ainfo);
+                               break;
+                       }
+                       /* Fall through */
                case MONO_TYPE_VALUETYPE:
-                       add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
+                       add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
                        break;
                case MONO_TYPE_TYPEDBYREF:
                        stack_size += sizeof (MonoTypedRef);
@@ -382,6 +419,13 @@ get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
                add_general (&gr, &stack_size, &cinfo->sig_cookie);
        }
 
+#if defined(__APPLE__)
+       if ((stack_size % 16) != 0) { 
+               cinfo->need_stack_align = TRUE;
+               stack_size += cinfo->stack_align_amount = 16-(stack_size % 16);
+       }
+#endif
+
        cinfo->stack_usage = stack_size;
        cinfo->reg_usage = gr;
        cinfo->freg_usage = fr;
@@ -403,11 +447,12 @@ int
 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
 {
        int k, frame_size = 0;
-       int size, align, pad;
+       int size, pad;
+       guint32 align;
        int offset = 8;
        CallInfo *cinfo;
 
-       cinfo = get_call_info (csig, FALSE);
+       cinfo = get_call_info (NULL, NULL, csig, FALSE);
 
        if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
                frame_size += sizeof (gpointer);
@@ -427,8 +472,11 @@ mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJit
                
                if (csig->pinvoke)
                        size = mono_type_native_stack_size (csig->params [k], &align);
-               else
-                       size = mono_type_stack_size (csig->params [k], &align);
+               else {
+                       int ialign;
+                       size = mini_type_stack_size (NULL, csig->params [k], &ialign);
+                       align = ialign;
+               }
 
                /* ignore alignment for now */
                align = 1;
@@ -513,16 +561,15 @@ cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
 #endif
        if (have_cpuid) {
                /* Have to use the code manager to get around WinXP DEP */
-               MonoCodeManager *codeman = mono_code_manager_new_dynamic ();
-               CpuidFunc func;
-               void *ptr = mono_code_manager_reserve (codeman, sizeof (cpuid_impl));
-               memcpy (ptr, cpuid_impl, sizeof (cpuid_impl));
-
-               func = (CpuidFunc)ptr;
+               static CpuidFunc func = NULL;
+               void *ptr;
+               if (!func) {
+                       ptr = mono_global_codeman_reserve (sizeof (cpuid_impl));
+                       memcpy (ptr, cpuid_impl, sizeof (cpuid_impl));
+                       func = (CpuidFunc)ptr;
+               }
                func (id, p_eax, p_ebx, p_ecx, p_edx);
 
-               mono_code_manager_destroy (codeman);
-
                /*
                 * We use this approach because of issues with gcc and pic code, see:
                 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
@@ -555,6 +602,24 @@ mono_arch_cpu_init (void)
 #endif
 }
 
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+       InitializeCriticalSection (&mini_arch_mutex);
+}
+
+/*
+ * Cleanup architecture specific code.
+ */
+void
+mono_arch_cleanup (void)
+{
+       DeleteCriticalSection (&mini_arch_mutex);
+}
+
 /*
  * This function returns the optimizations supported on this cpu.
  */
@@ -575,6 +640,10 @@ mono_arch_cpu_optimizazions (guint32 *exclude_mask)
                                *exclude_mask |= MONO_OPT_FCMOV;
                } else
                        *exclude_mask |= MONO_OPT_CMOV;
+               if (edx & (1 << 26))
+                       opts |= MONO_OPT_SSE2;
+               else
+                       *exclude_mask |= MONO_OPT_SSE2;
        }
        return opts;
 }
@@ -628,30 +697,6 @@ mono_arch_is_int_overflow (void *sigctx, void *info)
        return FALSE;
 }
 
-static gboolean
-is_regsize_var (MonoType *t) {
-       if (t->byref)
-               return TRUE;
-       switch (mono_type_get_underlying_type (t)->type) {
-       case MONO_TYPE_I4:
-       case MONO_TYPE_U4:
-       case MONO_TYPE_I:
-       case MONO_TYPE_U:
-       case MONO_TYPE_PTR:
-       case MONO_TYPE_FNPTR:
-               return TRUE;
-       case MONO_TYPE_OBJECT:
-       case MONO_TYPE_STRING:
-       case MONO_TYPE_CLASS:
-       case MONO_TYPE_SZARRAY:
-       case MONO_TYPE_ARRAY:
-               return TRUE;
-       case MONO_TYPE_VALUETYPE:
-               return FALSE;
-       }
-       return FALSE;
-}
-
 GList *
 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
 {
@@ -672,9 +717,7 @@ mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
 
                /* we dont allocate I1 to registers because there is no simply way to sign extend 
                 * 8bit quantities in caller saved registers on x86 */
-               if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) || 
-                   (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
-                   (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
+               if (mono_is_regsize_var (ins->inst_vtype) && (ins->inst_vtype->type != MONO_TYPE_I1)) {
                        g_assert (MONO_VARINFO (cfg, i)->reg == -1);
                        g_assert (i == vmv->idx);
                        vars = g_list_prepend (vars, vmv);
@@ -730,72 +773,19 @@ mono_arch_allocate_vars (MonoCompile *cfg)
        MonoMethodHeader *header;
        MonoInst *inst;
        guint32 locals_stack_size, locals_stack_align;
-       int i, offset, curinst, size, align;
+       int i, offset;
        gint32 *offsets;
        CallInfo *cinfo;
 
        header = mono_method_get_header (cfg->method);
        sig = mono_method_signature (cfg->method);
 
-       offset = 8;
-       curinst = 0;
-
-       cinfo = get_call_info (sig, FALSE);
-
-       switch (cinfo->ret.storage) {
-       case ArgOnStack:
-               cfg->ret->opcode = OP_REGOFFSET;
-               cfg->ret->inst_basereg = X86_EBP;
-               cfg->ret->inst_offset = offset;
-               offset += sizeof (gpointer);
-               break;
-       case ArgValuetypeInReg:
-               break;
-       case ArgInIReg:
-               cfg->ret->opcode = OP_REGVAR;
-               cfg->ret->inst_c0 = cinfo->ret.reg;
-               break;
-       case ArgNone:
-       case ArgOnFloatFpStack:
-       case ArgOnDoubleFpStack:
-               break;
-       default:
-               g_assert_not_reached ();
-       }
-
-       if (sig->hasthis) {
-               inst = cfg->varinfo [curinst];
-               if (inst->opcode != OP_REGVAR) {
-                       inst->opcode = OP_REGOFFSET;
-                       inst->inst_basereg = X86_EBP;
-               }
-               inst->inst_offset = offset;
-               offset += sizeof (gpointer);
-               curinst++;
-       }
-
-       if (sig->call_convention == MONO_CALL_VARARG) {
-               cfg->sig_cookie = offset;
-               offset += sizeof (gpointer);
-       }
-
-       for (i = 0; i < sig->param_count; ++i) {
-               inst = cfg->varinfo [curinst];
-               if (inst->opcode != OP_REGVAR) {
-                       inst->opcode = OP_REGOFFSET;
-                       inst->inst_basereg = X86_EBP;
-               }
-               inst->inst_offset = offset;
-               size = mono_type_size (sig->params [i], &align);
-               size += 4 - 1;
-               size &= ~(4 - 1);
-               offset += size;
-               curinst++;
-       }
+       cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
 
+       cfg->frame_reg = MONO_ARCH_BASEREG;
        offset = 0;
 
-       /* reserve space to save LMF and caller saved registers */
+       /* Reserve space to save LMF and caller saved registers */
 
        if (cfg->method->save_lmf) {
                offset += sizeof (MonoLMF);
@@ -840,15 +830,50 @@ mono_arch_allocate_vars (MonoCompile *cfg)
                        //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
                }
        }
-       g_free (offsets);
        offset += locals_stack_size;
 
-       offset += (MONO_ARCH_FRAME_ALIGNMENT - 1);
-       offset &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
 
-       g_free (cinfo);
+       /*
+        * Allocate arguments+return value
+        */
 
-       cfg->frame_reg = MONO_ARCH_BASEREG;
+       switch (cinfo->ret.storage) {
+       case ArgOnStack:
+               cfg->ret->opcode = OP_REGOFFSET;
+               cfg->ret->inst_basereg = X86_EBP;
+               cfg->ret->inst_offset = cinfo->ret.offset + ARGS_OFFSET;
+               break;
+       case ArgValuetypeInReg:
+               break;
+       case ArgInIReg:
+               cfg->ret->opcode = OP_REGVAR;
+               cfg->ret->inst_c0 = cinfo->ret.reg;
+               break;
+       case ArgNone:
+       case ArgOnFloatFpStack:
+       case ArgOnDoubleFpStack:
+               break;
+       default:
+               g_assert_not_reached ();
+       }
+
+       if (sig->call_convention == MONO_CALL_VARARG) {
+               g_assert (cinfo->sig_cookie.storage == ArgOnStack);
+               cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
+       }
+
+       for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
+               ArgInfo *ainfo = &cinfo->args [i];
+               inst = cfg->args [i];
+               if (inst->opcode != OP_REGVAR) {
+                       inst->opcode = OP_REGOFFSET;
+                       inst->inst_basereg = X86_EBP;
+               }
+               inst->inst_offset = ainfo->offset + ARGS_OFFSET;
+       }
+
+       offset += (MONO_ARCH_FRAME_ALIGNMENT - 1);
+       offset &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
 
        cfg->stack_offset = offset;
 }
@@ -861,65 +886,118 @@ mono_arch_create_vars (MonoCompile *cfg)
 
        sig = mono_method_signature (cfg->method);
 
-       cinfo = get_call_info (sig, FALSE);
+       cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
 
        if (cinfo->ret.storage == ArgValuetypeInReg)
                cfg->ret_var_is_local = TRUE;
-
-       g_free (cinfo);
 }
 
 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
  * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info 
  */
 
+static void
+emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call)
+{
+       MonoInst *arg;
+       MonoMethodSignature *tmp_sig;
+       MonoInst *sig_arg;
+
+       /* FIXME: Add support for signature tokens to AOT */
+       cfg->disable_aot = TRUE;
+       MONO_INST_NEW (cfg, arg, OP_OUTARG);
+
+       /*
+        * mono_ArgIterator_Setup assumes the signature cookie is 
+        * passed first and all the arguments which were before it are
+        * passed on the stack after the signature. So compensate by 
+        * passing a different signature.
+        */
+       tmp_sig = mono_metadata_signature_dup (call->signature);
+       tmp_sig->param_count -= call->signature->sentinelpos;
+       tmp_sig->sentinelpos = 0;
+       memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+       MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+       sig_arg->inst_p0 = tmp_sig;
+
+       arg->inst_left = sig_arg;
+       arg->type = STACK_PTR;
+       /* prepend, so they get reversed */
+       arg->next = call->out_args;
+       call->out_args = arg;
+}
+
+/*
+ * It is expensive to adjust esp for each individual fp argument pushed on the stack
+ * so we try to do it just once when we have multiple fp arguments in a row.
+ * We don't use this mechanism generally because for int arguments the generated code
+ * is slightly bigger and new generation cpus optimize away the dependency chains
+ * created by push instructions on the esp value.
+ * fp_arg_setup is the first argument in the execution sequence where the esp register
+ * is modified.
+ */
+static int
+collect_fp_stack_space (MonoMethodSignature *sig, int start_arg, int *fp_arg_setup)
+{
+       int fp_space = 0;
+       MonoType *t;
+
+       for (; start_arg < sig->param_count; ++start_arg) {
+               t = mono_type_get_underlying_type (sig->params [start_arg]);
+               if (!t->byref && t->type == MONO_TYPE_R8) {
+                       fp_space += sizeof (double);
+                       *fp_arg_setup = start_arg;
+               } else {
+                       break;
+               }
+       }
+       return fp_space;
+}
+
 /* 
  * take the arguments and generate the arch-specific
  * instructions to properly call the function in call.
  * This includes pushing, moving arguments to the right register
  * etc.
- * Issue: who does the spilling if needed, and when?
  */
 MonoCallInst*
 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
        MonoInst *arg, *in;
        MonoMethodSignature *sig;
-       int i, n, stack_size, type;
-       MonoType *ptype;
+       int i, n;
        CallInfo *cinfo;
+       int sentinelpos = 0;
+       int fp_args_space = 0, fp_args_offset = 0, fp_arg_setup = -1;
 
-       stack_size = 0;
-       /* add the vararg cookie before the non-implicit args */
-       if (call->signature->call_convention == MONO_CALL_VARARG) {
-               MonoInst *sig_arg;
-               /* FIXME: Add support for signature tokens to AOT */
-               cfg->disable_aot = TRUE;
-               MONO_INST_NEW (cfg, arg, OP_OUTARG);
-               MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
-               sig_arg->inst_p0 = call->signature;
-               arg->inst_left = sig_arg;
-               arg->type = STACK_PTR;
-               /* prepend, so they get reversed */
-               arg->next = call->out_args;
-               call->out_args = arg;
-               stack_size += sizeof (gpointer);
-       }
        sig = call->signature;
        n = sig->param_count + sig->hasthis;
 
-       cinfo = get_call_info (sig, FALSE);
+       cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
 
-       if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
-               if (cinfo->ret.storage == ArgOnStack)
-                       stack_size += sizeof (gpointer);
-       }
+       if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
+               sentinelpos = sig->sentinelpos + (is_virtual ? 1 : 0);
 
        for (i = 0; i < n; ++i) {
+               ArgInfo *ainfo = cinfo->args + i;
+
+               /* Emit the signature cookie just before the implicit arguments */
+               if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) {
+                       emit_sig_cookie (cfg, call);
+               }
+
                if (is_virtual && i == 0) {
                        /* the argument will be attached to the call instrucion */
                        in = call->args [i];
-                       stack_size += 4;
                } else {
+                       MonoType *t;
+
+                       if (i >= sig->hasthis)
+                               t = sig->params [i - sig->hasthis];
+                       else
+                               t = &mono_defaults.int_class->byval_arg;
+                       t = mono_type_get_underlying_type (t);
+
                        MONO_INST_NEW (cfg, arg, OP_OUTARG);
                        in = call->args [i];
                        arg->cil_code = in->cil_code;
@@ -928,77 +1006,70 @@ mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call,
                        /* prepend, so they get reversed */
                        arg->next = call->out_args;
                        call->out_args = arg;
-                       if (i >= sig->hasthis) {
-                               MonoType *t = sig->params [i - sig->hasthis];
-                               ptype = mono_type_get_underlying_type (t);
-                               if (t->byref)
-                                       type = MONO_TYPE_U;
-                               else
-                                       type = ptype->type;
-                               /* FIXME: validate arguments... */
-                               switch (type) {
-                               case MONO_TYPE_I:
-                               case MONO_TYPE_U:
-                               case MONO_TYPE_BOOLEAN:
-                               case MONO_TYPE_CHAR:
-                               case MONO_TYPE_I1:
-                               case MONO_TYPE_U1:
-                               case MONO_TYPE_I2:
-                               case MONO_TYPE_U2:
-                               case MONO_TYPE_I4:
-                               case MONO_TYPE_U4:
-                               case MONO_TYPE_STRING:
-                               case MONO_TYPE_CLASS:
-                               case MONO_TYPE_OBJECT:
-                               case MONO_TYPE_PTR:
-                               case MONO_TYPE_FNPTR:
-                               case MONO_TYPE_ARRAY:
-                               case MONO_TYPE_SZARRAY:
-                                       stack_size += 4;
-                                       break;
-                               case MONO_TYPE_I8:
-                               case MONO_TYPE_U8:
-                                       stack_size += 8;
-                                       break;
-                               case MONO_TYPE_R4:
-                                       stack_size += 4;
-                                       arg->opcode = OP_OUTARG_R4;
-                                       break;
-                               case MONO_TYPE_R8:
-                                       stack_size += 8;
-                                       arg->opcode = OP_OUTARG_R8;
-                                       break;
-                               case MONO_TYPE_VALUETYPE: {
-                                       int size;
-                                       if (sig->pinvoke) 
-                                               size = mono_type_native_stack_size (&in->klass->byval_arg, NULL);
-                                       else 
-                                               size = mono_type_stack_size (&in->klass->byval_arg, NULL);
 
-                                       stack_size += size;
-                                       arg->opcode = OP_OUTARG_VT;
-                                       arg->klass = in->klass;
-                                       arg->unused = sig->pinvoke;
-                                       arg->inst_imm = size; 
-                                       break;
+                       if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) {
+                               guint32 size, align;
+
+                               if (t->type == MONO_TYPE_TYPEDBYREF) {
+                                       size = sizeof (MonoTypedRef);
+                                       align = sizeof (gpointer);
                                }
-                               case MONO_TYPE_TYPEDBYREF:
-                                       stack_size += sizeof (MonoTypedRef);
-                                       arg->opcode = OP_OUTARG_VT;
-                                       arg->klass = in->klass;
-                                       arg->unused = sig->pinvoke;
-                                       arg->inst_imm = sizeof (MonoTypedRef); 
+                               else
+                                       if (sig->pinvoke)
+                                               size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+                                       else {
+                                               int ialign;
+                                               size = mini_type_stack_size (cfg->generic_sharing_context, &in->klass->byval_arg, &ialign);
+                                               align = ialign;
+                                       }
+                               arg->opcode = OP_OUTARG_VT;
+                               arg->klass = in->klass;
+                               arg->backend.is_pinvoke = sig->pinvoke;
+                               arg->inst_imm = size; 
+                       }
+                       else {
+                               switch (ainfo->storage) {
+                               case ArgOnStack:
+                                       arg->opcode = OP_OUTARG;
+                                       if (!t->byref) {
+                                               if (t->type == MONO_TYPE_R4) {
+                                                       arg->opcode = OP_OUTARG_R4;
+                                               } else if (t->type == MONO_TYPE_R8) {
+                                                       arg->opcode = OP_OUTARG_R8;
+                                                       /* we store in the upper bits of backen.arg_info the needed
+                                                        * esp adjustment and in the lower bits the offset from esp
+                                                        * where the arg needs to be stored
+                                                        */
+                                                       if (!fp_args_space) {
+                                                               fp_args_space = collect_fp_stack_space (sig, i - sig->hasthis, &fp_arg_setup);
+                                                               fp_args_offset = fp_args_space;
+                                                       }
+                                                       arg->backend.arg_info = fp_args_space - fp_args_offset;
+                                                       fp_args_offset -= sizeof (double);
+                                                       if (i - sig->hasthis == fp_arg_setup) {
+                                                               arg->backend.arg_info |= fp_args_space << 16;
+                                                       }
+                                                       if (fp_args_offset == 0) {
+                                                               /* the allocated esp stack is finished:
+                                                                * prepare for an eventual second run of fp args
+                                                                */
+                                                               fp_args_space = 0;
+                                                       }
+                                               }
+                                       }
                                        break;
                                default:
-                                       g_error ("unknown type 0x%02x in mono_arch_call_opcode\n", type);
+                                       g_assert_not_reached ();
                                }
-                       } else {
-                               /* the this argument */
-                               stack_size += 4;
                        }
                }
        }
 
+       /* Handle the case where there are no implicit arguments */
+       if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) {
+               emit_sig_cookie (cfg, call);
+       }
+
        if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
                if (cinfo->ret.storage == ArgValuetypeInReg) {
                        MonoInst *zero_inst;
@@ -1021,16 +1092,19 @@ mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call,
                else
                        /* if the function returns a struct, the called method already does a ret $0x4 */
                        if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret))
-                               stack_size -= 4;
+                               cinfo->stack_usage -= 4;
        }
+       
+       call->stack_usage = cinfo->stack_usage;
 
-       call->stack_usage = stack_size;
-       g_free (cinfo);
-
-       /* 
-        * should set more info in call, such as the stack space
-        * used by the args that needs to be added back to esp
-        */
+#if defined(__APPLE__)
+       if (cinfo->need_stack_align) {
+               MONO_INST_NEW (cfg, arg, OP_X86_OUTARG_ALIGN_STACK);
+               arg->inst_c0 = cinfo->stack_align_amount;
+               arg->next = call->out_args;
+               call->out_args = arg;
+        }
+#endif 
 
        return call;
 }
@@ -1043,6 +1117,10 @@ mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean ena
 {
        guchar *code = p;
 
+#if __APPLE__
+       x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
+#endif
+
        /* if some args are passed in registers, we need to save them here */
        x86_push_reg (code, X86_EBP);
 
@@ -1056,7 +1134,11 @@ mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean ena
                mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
                x86_call_code (code, 0);
        }
+#if __APPLE__
+       x86_alu_reg_imm (code, X86_ADD, X86_ESP, 16);
+#else
        x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
+#endif
 
        return code;
 }
@@ -1092,6 +1174,12 @@ mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean ena
        case MONO_TYPE_R8:
                save_mode = SAVE_FP;
                break;
+       case MONO_TYPE_GENERICINST:
+               if (!mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
+                       save_mode = SAVE_EAX;
+                       break;
+               }
+               /* Fall through */
        case MONO_TYPE_VALUETYPE:
                save_mode = SAVE_STRUCT;
                break;
@@ -1194,12 +1282,20 @@ if (ins->flags & MONO_INST_BRLABEL) { \
         } \
 }
 
-/* emit an exception if condition is fail */
+/*  
+ *     Emit an exception if condition is fail and
+ *  if possible do a directly branch to target 
+ */
 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name)            \
-        do {                                                        \
-               mono_add_patch_info (cfg, code - cfg->native_code,   \
-                                   MONO_PATCH_INFO_EXC, exc_name);  \
-               x86_branch32 (code, cond, 0, signed);               \
+       do {                                                        \
+               MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
+               if (tins == NULL) {                                                                             \
+                       mono_add_patch_info (cfg, code - cfg->native_code,   \
+                                       MONO_PATCH_INFO_EXC, exc_name);  \
+                       x86_branch32 (code, cond, 0, signed);               \
+               } else {        \
+                       EMIT_COND_BRANCH (tins, cond, signed);  \
+               }                       \
        } while (0); 
 
 #define EMIT_FPCOMPARE(code) do { \
@@ -1211,35 +1307,228 @@ if (ins->flags & MONO_INST_BRLABEL) { \
 static guint8*
 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
 {
-       if (cfg->compile_aot) {
-               guint32 got_reg = X86_EAX;
+       mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
+       x86_call_code (code, 0);
 
-               if (cfg->compile_aot) {          
-                       /*
-                        * Since the patches are generated by the back end, there is
-                        * no way to generate a got_var at this point.
+       return code;
+}
+
+#define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_IADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_ISBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB_IMM)))
+
+/*
+ * peephole_pass_1:
+ *
+ *   Perform peephole opts which should/can be performed before local regalloc
+ */
+static void
+peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+       MonoInst *ins, *last_ins = NULL;
+       ins = bb->code;
+
+       while (ins) {
+               switch (ins->opcode) {
+               case OP_IADD_IMM:
+               case OP_ADD_IMM:
+                       if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
+                               /* 
+                                * X86_LEA is like ADD, but doesn't have the
+                                * sreg1==dreg restriction.
+                                */
+                               ins->opcode = OP_X86_LEA_MEMBASE;
+                               ins->inst_basereg = ins->sreg1;
+                       } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
+                               ins->opcode = OP_X86_INC_REG;
+                       break;
+               case OP_SUB_IMM:
+               case OP_ISUB_IMM:
+                       if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
+                               ins->opcode = OP_X86_LEA_MEMBASE;
+                               ins->inst_basereg = ins->sreg1;
+                               ins->inst_imm = -ins->inst_imm;
+                       } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
+                               ins->opcode = OP_X86_DEC_REG;
+                       break;
+               case OP_COMPARE_IMM:
+               case OP_ICOMPARE_IMM:
+                       /* OP_COMPARE_IMM (reg, 0) 
+                        * --> 
+                        * OP_X86_TEST_NULL (reg) 
                         */
-                       g_assert (cfg->got_var);
+                       if (!ins->inst_imm)
+                               ins->opcode = OP_X86_TEST_NULL;
+                       break;
+               case OP_X86_COMPARE_MEMBASE_IMM:
+                       /* 
+                        * OP_STORE_MEMBASE_REG reg, offset(basereg)
+                        * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
+                        * -->
+                        * OP_STORE_MEMBASE_REG reg, offset(basereg)
+                        * OP_COMPARE_IMM reg, imm
+                        *
+                        * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
+                        */
+                       if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
+                           ins->inst_basereg == last_ins->inst_destbasereg &&
+                           ins->inst_offset == last_ins->inst_offset) {
+                                       ins->opcode = OP_COMPARE_IMM;
+                                       ins->sreg1 = last_ins->sreg1;
 
-                       if (cfg->got_var->opcode == OP_REGOFFSET)
-                               x86_mov_reg_membase (code, X86_EAX, cfg->got_var->inst_basereg, cfg->got_var->inst_offset, 4);
-                       else
-                               got_reg = cfg->got_var->dreg;
-               }
+                                       /* check if we can remove cmp reg,0 with test null */
+                                       if (!ins->inst_imm)
+                                               ins->opcode = OP_X86_TEST_NULL;
+                               }
 
-               mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
-               x86_call_membase (code, got_reg, 0xf0f0f0f0);
-       }
-       else {
-               mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
-               x86_call_code (code, 0);
-       }
+                       break;
+               case OP_LOAD_MEMBASE:
+               case OP_LOADI4_MEMBASE:
+                       /* 
+                        * Note: if reg1 = reg2 the load op is removed
+                        *
+                        * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
+                        * OP_LOAD_MEMBASE offset(basereg), reg2
+                        * -->
+                        * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+                        * OP_MOVE reg1, reg2
+                        */
+                       if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG 
+                                        || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
+                           ins->inst_basereg == last_ins->inst_destbasereg &&
+                           ins->inst_offset == last_ins->inst_offset) {
+                               if (ins->dreg == last_ins->sreg1) {
+                                       last_ins->next = ins->next;                             
+                                       ins = ins->next;                                
+                                       continue;
+                               } else {
+                                       //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
+                                       ins->opcode = OP_MOVE;
+                                       ins->sreg1 = last_ins->sreg1;
+                               }
 
-       return code;
-}
+                       /* 
+                        * Note: reg1 must be different from the basereg in the second load
+                        * Note: if reg1 = reg2 is equal then second load is removed
+                        *
+                        * OP_LOAD_MEMBASE offset(basereg), reg1
+                        * OP_LOAD_MEMBASE offset(basereg), reg2
+                        * -->
+                        * OP_LOAD_MEMBASE offset(basereg), reg1
+                        * OP_MOVE reg1, reg2
+                        */
+                       } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
+                                          || last_ins->opcode == OP_LOAD_MEMBASE) &&
+                             ins->inst_basereg != last_ins->dreg &&
+                             ins->inst_basereg == last_ins->inst_basereg &&
+                             ins->inst_offset == last_ins->inst_offset) {
+
+                               if (ins->dreg == last_ins->dreg) {
+                                       last_ins->next = ins->next;                             
+                                       ins = ins->next;                                
+                                       continue;
+                               } else {
+                                       ins->opcode = OP_MOVE;
+                                       ins->sreg1 = last_ins->dreg;
+                               }
 
-/* FIXME: Add more instructions */
-#define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI4_MEMBASE_REG))
+                               //g_assert_not_reached ();
+
+#if 0
+                       /* 
+                        * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
+                        * OP_LOAD_MEMBASE offset(basereg), reg
+                        * -->
+                        * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
+                        * OP_ICONST reg, imm
+                        */
+                       } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
+                                               || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
+                                  ins->inst_basereg == last_ins->inst_destbasereg &&
+                                  ins->inst_offset == last_ins->inst_offset) {
+                               //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
+                               ins->opcode = OP_ICONST;
+                               ins->inst_c0 = last_ins->inst_imm;
+                               g_assert_not_reached (); // check this rule
+#endif
+                       }
+                       break;
+               case OP_LOADU1_MEMBASE:
+               case OP_LOADI1_MEMBASE:
+                       /* 
+                        * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
+                        * OP_LOAD_MEMBASE offset(basereg), reg2
+                        * -->
+                        * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+                        * CONV_I2/U2 reg1, reg2
+                        */
+                       if (last_ins && X86_IS_BYTE_REG (last_ins->sreg1) &&
+                               (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
+                                       ins->inst_basereg == last_ins->inst_destbasereg &&
+                                       ins->inst_offset == last_ins->inst_offset) {
+                               ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? CEE_CONV_I1 : CEE_CONV_U1;
+                               ins->sreg1 = last_ins->sreg1;
+                       }
+                       break;
+               case OP_LOADU2_MEMBASE:
+               case OP_LOADI2_MEMBASE:
+                       /* 
+                        * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
+                        * OP_LOAD_MEMBASE offset(basereg), reg2
+                        * -->
+                        * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+                        * CONV_I2/U2 reg1, reg2
+                        */
+                       if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
+                                       ins->inst_basereg == last_ins->inst_destbasereg &&
+                                       ins->inst_offset == last_ins->inst_offset) {
+                               ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? CEE_CONV_I2 : CEE_CONV_U2;
+                               ins->sreg1 = last_ins->sreg1;
+                       }
+                       break;
+               case CEE_CONV_I4:
+               case CEE_CONV_U4:
+               case OP_ICONV_TO_I4:
+               case OP_MOVE:
+                       /*
+                        * Removes:
+                        *
+                        * OP_MOVE reg, reg 
+                        */
+                       if (ins->dreg == ins->sreg1) {
+                               if (last_ins)
+                                       last_ins->next = ins->next;                             
+                               ins = ins->next;
+                               continue;
+                       }
+                       /* 
+                        * Removes:
+                        *
+                        * OP_MOVE sreg, dreg 
+                        * OP_MOVE dreg, sreg
+                        */
+                       if (last_ins && last_ins->opcode == OP_MOVE &&
+                           ins->sreg1 == last_ins->dreg &&
+                           ins->dreg == last_ins->sreg1) {
+                               last_ins->next = ins->next;                             
+                               ins = ins->next;                                
+                               continue;
+                       }
+                       break;
+                       
+               case OP_X86_PUSH_MEMBASE:
+                       if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG ||
+                                        last_ins->opcode == OP_STORE_MEMBASE_REG) &&
+                           ins->inst_basereg == last_ins->inst_destbasereg &&
+                           ins->inst_offset == last_ins->inst_offset) {
+                                   ins->opcode = OP_X86_PUSH;
+                                   ins->sreg1 = last_ins->sreg1;
+                       }
+                       break;
+               }
+               last_ins = ins;
+               ins = ins->next;
+       }
+       bb->last_ins = last_ins;
+}
 
 static void
 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
@@ -1253,31 +1542,43 @@ peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_ICONST:
                        /* reg = 0 -> XOR (reg, reg) */
                        /* XOR sets cflags on x86, so we cant do it always */
-                       if (ins->inst_c0 == 0 && ins->next && INST_IGNORES_CFLAGS (ins->next)) {
-                               ins->opcode = CEE_XOR;
+                       if (ins->inst_c0 == 0 && (!ins->next || (ins->next && INST_IGNORES_CFLAGS (ins->next->opcode)))) {
+                               MonoInst *ins2;
+
+                               ins->opcode = OP_IXOR;
                                ins->sreg1 = ins->dreg;
                                ins->sreg2 = ins->dreg;
-                       }
-                       break;
-               case OP_MUL_IMM: 
-                       /* remove unnecessary multiplication with 1 */
-                       if (ins->inst_imm == 1) {
-                               if (ins->dreg != ins->sreg1) {
-                                       ins->opcode = OP_MOVE;
-                               } else {
-                                       last_ins->next = ins->next;
-                                       ins = ins->next;
-                                       continue;
+
+                               /* 
+                                * Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG 
+                                * since it takes 3 bytes instead of 7.
+                                */
+                               for (ins2 = ins->next; ins2; ins2 = ins2->next) {
+                                       if ((ins2->opcode == OP_STORE_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
+                                               ins2->opcode = OP_STORE_MEMBASE_REG;
+                                               ins2->sreg1 = ins->dreg;
+                                       }
+                                       else if ((ins2->opcode == OP_STOREI4_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
+                                               ins2->opcode = OP_STOREI4_MEMBASE_REG;
+                                               ins2->sreg1 = ins->dreg;
+                                       }
+                                       else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM)) {
+                                               /* Continue iteration */
+                                       }
+                                       else
+                                               break;
                                }
                        }
                        break;
-               case OP_COMPARE_IMM:
-                       /* OP_COMPARE_IMM (reg, 0) 
-                        * --> 
-                        * OP_X86_TEST_NULL (reg) 
-                        */
-                       if (!ins->inst_imm)
-                               ins->opcode = OP_X86_TEST_NULL;
+               case OP_IADD_IMM:
+               case OP_ADD_IMM:
+                       if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
+                               ins->opcode = OP_X86_INC_REG;
+                       break;
+               case OP_ISUB_IMM:
+               case OP_SUB_IMM:
+                       if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
+                               ins->opcode = OP_X86_DEC_REG;
                        break;
                case OP_X86_COMPARE_MEMBASE_IMM:
                        /* 
@@ -1407,6 +1708,7 @@ peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case CEE_CONV_I4:
                case CEE_CONV_U4:
+               case OP_ICONV_TO_I4:
                case OP_MOVE:
                        /*
                         * Removes:
@@ -1433,7 +1735,6 @@ peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                                continue;
                        }
                        break;
-                       
                case OP_X86_PUSH_MEMBASE:
                        if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG ||
                                         last_ins->opcode == OP_STORE_MEMBASE_REG) &&
@@ -1457,18 +1758,46 @@ branch_cc_table [] = {
        X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
 };
 
-static const char*const * ins_spec = pentium_desc;
+/* Maps CMP_... constants to X86_CC_... constants */
+static const int
+cc_table [] = {
+       X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT,
+       X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT
+};
+
+static const int
+cc_signed_table [] = {
+       TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
+       FALSE, FALSE, FALSE, FALSE
+};
 
-/*#include "cprop.c"*/
 void
 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
 {
+       if (cfg->opt & MONO_OPT_PEEPHOLE)
+               peephole_pass_1 (cfg, bb);
+
        mono_local_regalloc (cfg, bb);
 }
 
 static unsigned char*
 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int size, gboolean is_signed)
 {
+#define XMM_TEMP_REG 0
+       if (cfg->opt & MONO_OPT_SSE2 && size < 8) {
+               /* optimize by assigning a local var for this use so we avoid
+                * the stack manipulations */
+               x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
+               x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
+               x86_movsd_reg_membase (code, XMM_TEMP_REG, X86_ESP, 0);
+               x86_cvttsd2si (code, dreg, XMM_TEMP_REG);
+               x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
+               if (size == 1)
+                       x86_widen_reg (code, dreg, dreg, is_signed, FALSE);
+               else if (size == 2)
+                       x86_widen_reg (code, dreg, dreg, is_signed, TRUE);
+               return code;
+       }
        x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
        x86_fnstcw_membase(code, X86_ESP, 0);
        x86_mov_reg_membase (code, dreg, X86_ESP, 0, 2);
@@ -1622,7 +1951,7 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
        case OP_VCALL:
        case OP_VCALL_REG:
        case OP_VCALL_MEMBASE:
-               cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
+               cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
                if (cinfo->ret.storage == ArgValuetypeInReg) {
                        /* Pop the destination address from the stack */
                        x86_pop_reg (code, X86_ECX);
@@ -1640,7 +1969,6 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
                                }
                        }
                }
-               g_free (cinfo);
        default:
                break;
        }
@@ -1648,6 +1976,17 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
        return code;
 }
 
+/*
+ * emit_tls_get:
+ * @code: buffer to store code to
+ * @dreg: hard register where to place the result
+ * @tls_offset: offset info
+ *
+ * emit_tls_get emits in @code the native code that puts in the dreg register
+ * the item in the thread local storage identified by tls_offset.
+ *
+ * Returns: a pointer to the end of the stored code
+ */
 static guint8*
 emit_tls_get (guint8* code, int dreg, int tls_offset)
 {
@@ -1663,12 +2002,65 @@ emit_tls_get (guint8* code, int dreg, int tls_offset)
        x86_alu_membase_imm (code, X86_AND, dreg, 0x34, 0);
        x86_mov_reg_membase (code, dreg, dreg, 3600 + (tls_offset * 4), 4);
 #else
-       x86_prefix (code, X86_GS_PREFIX);
-       x86_mov_reg_mem (code, dreg, tls_offset, 4);                    
+       if (optimize_for_xen) {
+               x86_prefix (code, X86_GS_PREFIX);
+               x86_mov_reg_mem (code, dreg, 0, 4);
+               x86_mov_reg_membase (code, dreg, dreg, tls_offset, 4);
+       } else {
+               x86_prefix (code, X86_GS_PREFIX);
+               x86_mov_reg_mem (code, dreg, tls_offset, 4);
+       }
 #endif
        return code;
 }
 
+/*
+ * emit_load_volatile_arguments:
+ *
+ *  Load volatile arguments from the stack to the original input registers.
+ * Required before a tail call.
+ */
+static guint8*
+emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
+{
+       MonoMethod *method = cfg->method;
+       MonoMethodSignature *sig;
+       MonoInst *inst;
+       CallInfo *cinfo;
+       guint32 i;
+
+       /* FIXME: Generate intermediate code instead */
+
+       sig = mono_method_signature (method);
+
+       cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
+       
+       /* This is the opposite of the code in emit_prolog */
+
+       for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
+               ArgInfo *ainfo = cinfo->args + i;
+               MonoType *arg_type;
+               inst = cfg->args [i];
+
+               if (sig->hasthis && (i == 0))
+                       arg_type = &mono_defaults.object_class->byval_arg;
+               else
+                       arg_type = sig->params [i - sig->hasthis];
+
+               /*
+                * On x86, the arguments are either in their original stack locations, or in
+                * global regs.
+                */
+               if (inst->opcode == OP_REGVAR) {
+                       g_assert (ainfo->storage == ArgOnStack);
+                       
+                       x86_mov_membase_reg (code, X86_EBP, inst->inst_offset, inst->dreg, 4);
+               }
+       }
+
+       return code;
+}
+
 #define REAL_PRINT_REG(text,reg) \
 mono_assert (reg >= 0); \
 x86_push_reg (code, X86_EAX); \
@@ -1731,11 +2123,13 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 
        offset = code - cfg->native_code;
 
+       mono_debug_open_block (cfg, bb, offset);
+
        ins = bb->code;
        while (ins) {
                offset = code - cfg->native_code;
 
-               max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
+               max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
 
                if (offset > (cfg->code_size - max_len - 16)) {
                        cfg->code_size *= 2;
@@ -1852,6 +2246,15 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_X86_SUB_MEMBASE:
                        x86_alu_reg_membase (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset);
                        break;
+               case OP_X86_AND_MEMBASE_IMM:
+                       x86_alu_membase_imm (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
+                       break;
+               case OP_X86_OR_MEMBASE_IMM:
+                       x86_alu_membase_imm (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
+                       break;
+               case OP_X86_XOR_MEMBASE_IMM:
+                       x86_alu_membase_imm (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
+                       break;
                case OP_X86_INC_MEMBASE:
                        x86_inc_membase (code, ins->inst_basereg, ins->inst_offset);
                        break;
@@ -1867,7 +2270,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_X86_MUL_MEMBASE:
                        x86_imul_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
                        break;
-               case CEE_BREAK:
+               case OP_BREAK:
                        x86_breakpoint (code);
                        break;
                case OP_ADDCC:
@@ -1937,6 +2340,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        x86_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
                        break;
                case CEE_XOR:
+               case OP_IXOR:
                        x86_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
                        break;
                case OP_XOR_IMM:
@@ -1967,14 +2371,14 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        guint8 *jump_to_end;
 
                        /* handle shifts below 32 bits */
-                       x86_shld_reg (code, ins->unused, ins->sreg1);
+                       x86_shld_reg (code, ins->backend.reg3, ins->sreg1);
                        x86_shift_reg (code, X86_SHL, ins->sreg1);
 
                        x86_test_reg_imm (code, X86_ECX, 32);
                        jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
 
                        /* handle shift over 32 bit */
-                       x86_mov_reg_reg (code, ins->unused, ins->sreg1, 4);
+                       x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
                        x86_clear_reg (code, ins->sreg1);
                        
                        x86_patch (jump_to_end, code);
@@ -1984,15 +2388,15 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        guint8 *jump_to_end;
 
                        /* handle shifts below 32 bits */
-                       x86_shrd_reg (code, ins->sreg1, ins->unused);
-                       x86_shift_reg (code, X86_SAR, ins->unused);
+                       x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
+                       x86_shift_reg (code, X86_SAR, ins->backend.reg3);
 
                        x86_test_reg_imm (code, X86_ECX, 32);
                        jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
 
                        /* handle shifts over 31 bits */
-                       x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
-                       x86_shift_reg_imm (code, X86_SAR, ins->unused, 31);
+                       x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
+                       x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 31);
                        
                        x86_patch (jump_to_end, code);
                        }
@@ -2001,47 +2405,47 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        guint8 *jump_to_end;
 
                        /* handle shifts below 32 bits */
-                       x86_shrd_reg (code, ins->sreg1, ins->unused);
-                       x86_shift_reg (code, X86_SHR, ins->unused);
+                       x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
+                       x86_shift_reg (code, X86_SHR, ins->backend.reg3);
 
                        x86_test_reg_imm (code, X86_ECX, 32);
                        jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
 
                        /* handle shifts over 31 bits */
-                       x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
-                       x86_shift_reg_imm (code, X86_SHR, ins->unused, 31);
+                       x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
+                       x86_clear_reg (code, ins->backend.reg3);
                        
                        x86_patch (jump_to_end, code);
                        }
                        break;
                case OP_LSHL_IMM:
                        if (ins->inst_imm >= 32) {
-                               x86_mov_reg_reg (code, ins->unused, ins->sreg1, 4);
+                               x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
                                x86_clear_reg (code, ins->sreg1);
-                               x86_shift_reg_imm (code, X86_SHL, ins->unused, ins->inst_imm - 32);
+                               x86_shift_reg_imm (code, X86_SHL, ins->backend.reg3, ins->inst_imm - 32);
                        } else {
-                               x86_shld_reg_imm (code, ins->unused, ins->sreg1, ins->inst_imm);
+                               x86_shld_reg_imm (code, ins->backend.reg3, ins->sreg1, ins->inst_imm);
                                x86_shift_reg_imm (code, X86_SHL, ins->sreg1, ins->inst_imm);
                        }
                        break;
                case OP_LSHR_IMM:
                        if (ins->inst_imm >= 32) {
-                               x86_mov_reg_reg (code, ins->sreg1, ins->unused,  4);
-                               x86_shift_reg_imm (code, X86_SAR, ins->unused, 0x1f);
+                               x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3,  4);
+                               x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 0x1f);
                                x86_shift_reg_imm (code, X86_SAR, ins->sreg1, ins->inst_imm - 32);
                        } else {
-                               x86_shrd_reg_imm (code, ins->sreg1, ins->unused, ins->inst_imm);
-                               x86_shift_reg_imm (code, X86_SAR, ins->unused, ins->inst_imm);
+                               x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
+                               x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, ins->inst_imm);
                        }
                        break;
                case OP_LSHR_UN_IMM:
                        if (ins->inst_imm >= 32) {
-                               x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
-                               x86_clear_reg (code, ins->unused);
+                               x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
+                               x86_clear_reg (code, ins->backend.reg3);
                                x86_shift_reg_imm (code, X86_SHR, ins->sreg1, ins->inst_imm - 32);
                        } else {
-                               x86_shrd_reg_imm (code, ins->sreg1, ins->unused, ins->inst_imm);
-                               x86_shift_reg_imm (code, X86_SHR, ins->unused, ins->inst_imm);
+                               x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
+                               x86_shift_reg_imm (code, X86_SHR, ins->backend.reg3, ins->inst_imm);
                        }
                        break;
                case CEE_NOT:
@@ -2060,7 +2464,62 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
                        break;
                case OP_MUL_IMM:
-                       x86_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
+                       switch (ins->inst_imm) {
+                       case 2:
+                               /* MOV r1, r2 */
+                               /* ADD r1, r1 */
+                               if (ins->dreg != ins->sreg1)
+                                       x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
+                               x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
+                               break;
+                       case 3:
+                               /* LEA r1, [r2 + r2*2] */
+                               x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
+                               break;
+                       case 5:
+                               /* LEA r1, [r2 + r2*4] */
+                               x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+                               break;
+                       case 6:
+                               /* LEA r1, [r2 + r2*2] */
+                               /* ADD r1, r1          */
+                               x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
+                               x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
+                               break;
+                       case 9:
+                               /* LEA r1, [r2 + r2*8] */
+                               x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3);
+                               break;
+                       case 10:
+                               /* LEA r1, [r2 + r2*4] */
+                               /* ADD r1, r1          */
+                               x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+                               x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
+                               break;
+                       case 12:
+                               /* LEA r1, [r2 + r2*2] */
+                               /* SHL r1, 2           */
+                               x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
+                               x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
+                               break;
+                       case 25:
+                               /* LEA r1, [r2 + r2*4] */
+                               /* LEA r1, [r1 + r1*4] */
+                               x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+                               x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
+                               break;
+                       case 100:
+                               /* LEA r1, [r2 + r2*4] */
+                               /* SHL r1, 2           */
+                               /* LEA r1, [r1 + r1*4] */
+                               x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+                               x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
+                               x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
+                               break;
+                       default:
+                               x86_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
+                               break;
+                       }
                        break;
                case CEE_MUL_OVF:
                        x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
@@ -2136,7 +2595,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case CEE_CONV_U4:
                        g_assert_not_reached ();
-               case CEE_JMP: {
+               case OP_JMP: {
                        /*
                         * Note: this 'frame destruction' logic is useful for tail calls, too.
                         * Keep in sync with the code in emit_epilog.
@@ -2151,6 +2610,8 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 
                        g_assert (!cfg->method->save_lmf);
 
+                       code = emit_load_volatile_arguments (cfg, code);
+
                        if (cfg->used_int_regs & (1 << X86_EBX))
                                pos -= 4;
                        if (cfg->used_int_regs & (1 << X86_EDI))
@@ -2282,7 +2743,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        x86_pop_reg (code, X86_EDI);
                        break;
                case OP_X86_LEA:
-                       x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
+                       x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
                        break;
                case OP_X86_LEA_MEMBASE:
                        x86_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
@@ -2292,15 +2753,15 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case OP_LOCALLOC:
                        /* keep alignment */
-                       x86_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
-                       x86_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
+                       x86_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1);
+                       x86_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
                        code = mono_emit_stack_alloc (code, ins);
                        x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
                        break;
                case CEE_RET:
                        x86_ret (code);
                        break;
-               case CEE_THROW: {
+               case OP_THROW: {
                        x86_push_reg (code, ins->sreg1);
                        code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
                                                          (gpointer)"mono_arch_throw_exception");
@@ -2313,13 +2774,20 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                }
                case OP_CALL_HANDLER: 
+                       /* Align stack */
+#ifdef __APPLE__
+                       x86_alu_reg_imm (code, X86_SUB, X86_ESP, 12);
+#endif
                        mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
                        x86_call_imm (code, 0);
+#ifdef __APPLE__
+                       x86_alu_reg_imm (code, X86_ADD, X86_ESP, 12);
+#endif
                        break;
                case OP_LABEL:
                        ins->inst_c0 = code - cfg->native_code;
                        break;
-               case CEE_BR:
+               case OP_BR:
                        //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
                        //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
                        //break;
@@ -2351,27 +2819,12 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        x86_jump_reg (code, ins->sreg1);
                        break;
                case OP_CEQ:
-                       x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
-                       x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
-                       break;
                case OP_CLT:
-                       x86_set_reg (code, X86_CC_LT, ins->dreg, TRUE);
-                       x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
-                       break;
                case OP_CLT_UN:
-                       x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
-                       x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
-                       break;
                case OP_CGT:
-                       x86_set_reg (code, X86_CC_GT, ins->dreg, TRUE);
-                       x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
-                       break;
                case OP_CGT_UN:
-                       x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
-                       x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
-                       break;
                case OP_CNE:
-                       x86_set_reg (code, X86_CC_NE, ins->dreg, TRUE);
+                       x86_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
                        x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
                        break;
                case OP_COND_EXC_EQ:
@@ -2384,12 +2837,13 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_COND_EXC_GE_UN:
                case OP_COND_EXC_LE:
                case OP_COND_EXC_LE_UN:
+                       EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->inst_p1);
+                       break;
                case OP_COND_EXC_OV:
                case OP_COND_EXC_NO:
                case OP_COND_EXC_C:
                case OP_COND_EXC_NC:
-                       EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], 
-                                                   (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
+                       EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
                        break;
                case CEE_BEQ:
                case CEE_BNE_UN:
@@ -2401,7 +2855,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case CEE_BGE_UN:
                case CEE_BLE:
                case CEE_BLE_UN:
-                       EMIT_COND_BRANCH (ins, branch_cc_table [ins->opcode - CEE_BEQ], (ins->opcode < CEE_BNE_UN));
+                       EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
                        break;
 
                /* floating point opcodes */
@@ -2502,7 +2956,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
                        x86_fist_pop_membase (code, X86_ESP, 0, TRUE);
                        x86_pop_reg (code, ins->dreg);
-                       x86_pop_reg (code, ins->unused);
+                       x86_pop_reg (code, ins->backend.reg3);
                        x86_fldcw_membase (code, X86_ESP, 0);
                        x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
                        break;
@@ -2537,6 +2991,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                }
                case OP_LCONV_TO_OVF_I: {
                        guint8 *br [3], *label [1];
+                       MonoInst *tins;
 
                        /* 
                         * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
@@ -2551,8 +3006,18 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        label [0] = code;
 
                        /* throw exception */
-                       mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
-                       x86_jump32 (code, 0);
+                       tins = mono_branch_optimize_exception_target (cfg, bb, "OverflowException");
+                       if (tins) {
+                               mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, tins->inst_true_bb);
+                               if ((cfg->opt & MONO_OPT_BRANCH) && x86_is_imm8 (tins->inst_true_bb->max_offset - cpos))
+                                       x86_jump8 (code, 0);
+                               else
+                                       x86_jump32 (code, 0);
+                       } else {
+                               mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
+                               x86_jump32 (code, 0);
+                       }
+       
        
                        x86_patch (br [0], code);
                        /* our top bit is set, check that top word is 0xfffffff */
@@ -2844,7 +3309,18 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_FBGT:
                case OP_FBGT_UN:
                        if (cfg->opt & MONO_OPT_FCMOV) {
-                               EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
+                               if (ins->opcode == OP_FBGT) {
+                                       guchar *br1;
+
+                                       /* skip branch if C1=1 */
+                                       br1 = code;
+                                       x86_branch8 (code, X86_CC_P, 0, FALSE);
+                                       /* branch if (C0 | C3) = 1 */
+                                       EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
+                                       x86_patch (br1, code);
+                               } else {
+                                       EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
+                               }
                                break;
                        }
                        x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
@@ -2914,7 +3390,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
                        EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
                        break;
-               case CEE_CKFINITE: {
+               case OP_CKFINITE: {
                        x86_push_reg (code, X86_EAX);
                        x86_fxam (code);
                        x86_fnstsw (code);
@@ -2928,6 +3404,10 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        code = emit_tls_get (code, ins->dreg, ins->inst_offset);
                        break;
                }
+               case OP_MEMORY_BARRIER: {
+                       /* Not needed on x86 */
+                       break;
+               }
                case OP_ATOMIC_ADD_I4: {
                        int dreg = ins->dreg;
 
@@ -3128,6 +3608,10 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        guint8 *code;
 
        cfg->code_size =  MAX (mono_method_get_header (method)->code_size * 4, 256);
+
+       if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
+               cfg->code_size += 512;
+
        code = cfg->native_code = g_malloc (cfg->code_size);
 
        x86_push_reg (code, X86_EBP);
@@ -3154,8 +3638,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        /* FIXME: Add a separate key for LMF to avoid this */
                        x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
 #endif
-               }
-               else {
+               } else {
                        g_assert (!cfg->compile_aot);
                        x86_push_imm (code, cfg->domain);
                        code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
@@ -3176,43 +3659,50 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                x86_push_reg (code, X86_EDI);
                x86_push_reg (code, X86_EBX);
 
-               /* save method info */
-               x86_push_imm (code, method);
+               if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
+                       /*
+                        * Optimized version which uses the mono_lmf TLS variable instead of indirection
+                        * through the mono_lmf_addr TLS variable.
+                        */
+                       /* %eax = previous_lmf */
+                       x86_prefix (code, X86_GS_PREFIX);
+                       x86_mov_reg_mem (code, X86_EAX, lmf_tls_offset, 4);
+                       /* skip esp + method_info + lmf */
+                       x86_alu_reg_imm (code, X86_SUB, X86_ESP, 12);
+                       /* push previous_lmf */
+                       x86_push_reg (code, X86_EAX);
+                       /* new lmf = ESP */
+                       x86_prefix (code, X86_GS_PREFIX);
+                       x86_mov_mem_reg (code, lmf_tls_offset, X86_ESP, 4);
+               } else {
+                       /* get the address of lmf for the current thread */
+                       /* 
+                        * This is performance critical so we try to use some tricks to make
+                        * it fast.
+                        */                                                                        
 
-               /* get the address of lmf for the current thread */
-               /* 
-                * This is performance critical so we try to use some tricks to make
-                * it fast.
-                */
-               if (lmf_tls_offset != -1) {
-                       /* Load lmf quicky using the GS register */
-                       code = emit_tls_get (code, X86_EAX, lmf_tls_offset);
+                       if (lmf_addr_tls_offset != -1) {
+                               /* Load lmf quicky using the GS register */
+                               code = emit_tls_get (code, X86_EAX, lmf_addr_tls_offset);
 #ifdef PLATFORM_WIN32
-                       /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
-                       /* FIXME: Add a separate key for LMF to avoid this */
-                       x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
+                               /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
+                               /* FIXME: Add a separate key for LMF to avoid this */
+                               x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
 #endif
-               }
-               else {
-                       if (cfg->compile_aot) {
-                               /* The GOT var does not exist yet */
-                               x86_call_imm (code, 0);
-                               mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL);
-                               x86_pop_reg (code, X86_EAX);
-                               x86_alu_reg_imm (code, X86_ADD, X86_EAX, 0);
-                               mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_get_lmf_addr");
-                               x86_call_membase (code, X86_EAX, 0xf0f0f0f0);
-                       }
-                       else
+                       } else {
                                code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_get_lmf_addr");
-               }
+                       }
+
+                       /* Skip esp + method info */
+                       x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
 
-               /* push lmf */
-               x86_push_reg (code, X86_EAX); 
-               /* push *lfm (previous_lmf) */
-               x86_push_membase (code, X86_EAX, 0);
-               /* *(lmf) = ESP */
-               x86_mov_membase_reg (code, X86_EAX, 0, X86_ESP, 4);
+                       /* push lmf */
+                       x86_push_reg (code, X86_EAX); 
+                       /* push *lfm (previous_lmf) */
+                       x86_push_membase (code, X86_EAX, 0);
+                       /* *(lmf) = ESP */
+                       x86_mov_membase_reg (code, X86_EAX, 0, X86_ESP, 4);
+               }
        } else {
 
                if (cfg->used_int_regs & (1 << X86_EBX)) {
@@ -3233,6 +3723,20 @@ mono_arch_emit_prolog (MonoCompile *cfg)
 
        alloc_size -= pos;
 
+#if __APPLE__
+       /* the original alloc_size is already aligned: there is %ebp and retip pushed, so realign */
+       {
+               int tot = alloc_size + pos + 4 + 4; /* ret ip + ebp */
+               if (tot & 4) {
+                       tot += 4;
+                       alloc_size += 4;
+               }
+               if (tot & 8) {
+                       alloc_size += 8;
+               }
+       }
+#endif
+
        if (alloc_size) {
                /* See mono_emit_stack_alloc */
 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
@@ -3249,6 +3753,15 @@ mono_arch_emit_prolog (MonoCompile *cfg)
 #endif
        }
 
+#if __APPLE_
+       /* check the stack is aligned */
+       x86_mov_reg_reg (code, X86_EDX, X86_ESP, 4);
+       x86_alu_reg_imm (code, X86_AND, X86_EDX, 15);
+       x86_alu_reg_imm (code, X86_CMP, X86_EDX, 0);
+       x86_branch_disp (code, X86_CC_EQ, 3, FALSE);
+       x86_breakpoint (code);
+#endif
+
         /* compute max_offset in order to use short forward jumps */
        max_offset = 0;
        if (cfg->opt & MONO_OPT_BRANCH) {
@@ -3266,7 +3779,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                                if (ins->opcode == OP_LABEL)
                                        ins->inst_c1 = max_offset;
                                
-                               max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
+                               max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
                                ins = ins->next;
                        }
                }
@@ -3280,7 +3793,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        pos = 0;
 
        for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
-               inst = cfg->varinfo [pos];
+               inst = cfg->args [pos];
                if (inst->opcode == OP_REGVAR) {
                        x86_mov_reg_membase (code, inst->dreg, X86_EBP, inst->inst_offset, 4);
                        if (cfg->verbose_level > 2)
@@ -3307,9 +3820,6 @@ mono_arch_emit_epilog (MonoCompile *cfg)
        
        if (cfg->method->save_lmf)
                max_epilog_size += 128;
-       
-       if (mono_jit_trace_calls != NULL)
-               max_epilog_size += 50;
 
        while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
                cfg->code_size *= 2;
@@ -3322,43 +3832,57 @@ mono_arch_emit_epilog (MonoCompile *cfg)
        if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
                code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
 
-       /* the code restoring the registers must be kept in sync with CEE_JMP */
+       /* the code restoring the registers must be kept in sync with OP_JMP */
        pos = 0;
        
        if (method->save_lmf) {
                gint32 prev_lmf_reg;
+               gint32 lmf_offset = -sizeof (MonoLMF);
 
-               /* Find a spare register */
-               switch (sig->ret->type) {
-               case MONO_TYPE_I8:
-               case MONO_TYPE_U8:
-                       prev_lmf_reg = X86_EDI;
-                       cfg->used_int_regs |= (1 << X86_EDI);
-                       break;
-               default:
-                       prev_lmf_reg = X86_EDX;
-                       break;
-               }
+               if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
+                       /*
+                        * Optimized version which uses the mono_lmf TLS variable instead of indirection
+                        * through the mono_lmf_addr TLS variable.
+                        */
+                       /* reg = previous_lmf */
+                       x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
+
+                       /* lmf = previous_lmf */
+                       x86_prefix (code, X86_GS_PREFIX);
+                       x86_mov_mem_reg (code, lmf_tls_offset, X86_ECX, 4);
+               } else {
+                       /* Find a spare register */
+                       switch (sig->ret->type) {
+                       case MONO_TYPE_I8:
+                       case MONO_TYPE_U8:
+                               prev_lmf_reg = X86_EDI;
+                               cfg->used_int_regs |= (1 << X86_EDI);
+                               break;
+                       default:
+                               prev_lmf_reg = X86_EDX;
+                               break;
+                       }
 
-               /* reg = previous_lmf */
-               x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, -32, 4);
+                       /* reg = previous_lmf */
+                       x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
 
-               /* ecx = lmf */
-               x86_mov_reg_membase (code, X86_ECX, X86_EBP, -28, 4);
+                       /* ecx = lmf */
+                       x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 4);
 
-               /* *(lmf) = previous_lmf */
-               x86_mov_membase_reg (code, X86_ECX, 0, prev_lmf_reg, 4);
+                       /* *(lmf) = previous_lmf */
+                       x86_mov_membase_reg (code, X86_ECX, 0, prev_lmf_reg, 4);
+               }
 
                /* restore caller saved regs */
                if (cfg->used_int_regs & (1 << X86_EBX)) {
-                       x86_mov_reg_membase (code, X86_EBX, X86_EBP, -20, 4);
+                       x86_mov_reg_membase (code, X86_EBX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), 4);
                }
 
                if (cfg->used_int_regs & (1 << X86_EDI)) {
-                       x86_mov_reg_membase (code, X86_EDI, X86_EBP, -16, 4);
+                       x86_mov_reg_membase (code, X86_EDI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), 4);
                }
                if (cfg->used_int_regs & (1 << X86_ESI)) {
-                       x86_mov_reg_membase (code, X86_ESI, X86_EBP, -12, 4);
+                       x86_mov_reg_membase (code, X86_ESI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), 4);
                }
 
                /* EBP is restored by LEAVE */
@@ -3388,7 +3912,7 @@ mono_arch_emit_epilog (MonoCompile *cfg)
        }
 
        /* Load returned vtypes into registers if needed */
-       cinfo = get_call_info (sig, FALSE);
+       cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
        if (cinfo->ret.storage == ArgValuetypeInReg) {
                for (quad = 0; quad < 2; quad ++) {
                        switch (cinfo->ret.pair_storage [quad]) {
@@ -3425,8 +3949,6 @@ mono_arch_emit_epilog (MonoCompile *cfg)
        else
                x86_ret (code);
 
-       g_free (cinfo);
-
        cfg->code_len = code - cfg->native_code;
 
        g_assert (cfg->code_len < cfg->code_size);
@@ -3490,19 +4012,10 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                                patch_info->type = MONO_PATCH_INFO_NONE;
                        }
                        else {
-                               guint32 got_reg = X86_EAX;
                                guint32 size;
 
                                /* Compute size of code following the push <OFFSET> */
-                               if (cfg->compile_aot) {
-                                       size = 5 + 6;
-                                       if (!cfg->got_var)
-                                               size += 32;
-                                       else if (cfg->got_var->opcode == OP_REGOFFSET)
-                                               size += 6;
-                               }
-                               else
-                                       size = 5 + 5;
+                               size = 5 + 5;
 
                                if ((code - cfg->native_code) - throw_ip < 126 - size) {
                                        /* Use the shorter form */
@@ -3520,32 +4033,11 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                                        exc_throw_start [nthrows] = code;
                                }
 
-                               if (cfg->compile_aot) {          
-                                       /*
-                                        * Since the patches are generated by the back end, there is                                     * no way to generate a got_var at this point.   
-                                        */
-                                       if (!cfg->got_var) {
-                                               x86_call_imm (code, 0);
-                                               mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL);
-                                               x86_pop_reg (code, X86_EAX);
-                                               x86_alu_reg_imm (code, X86_ADD, X86_EAX, 0);
-                                       }
-                                       else {
-                                               if (cfg->got_var->opcode == OP_REGOFFSET)
-                                                       x86_mov_reg_membase (code, X86_EAX, cfg->got_var->inst_basereg, cfg->got_var->inst_offset, 4);
-                                               else
-                                                       got_reg = cfg->got_var->dreg;
-                                       }
-                               }
-
-                               x86_push_imm (code, exc_class->type_token);
+                               x86_push_imm (code, exc_class->type_token - MONO_TOKEN_TYPE_DEF);
                                patch_info->data.name = "mono_arch_throw_corlib_exception";
                                patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
                                patch_info->ip.i = code - cfg->native_code;
-                               if (cfg->compile_aot)
-                                       x86_call_membase (code, got_reg, 0xf0f0f0f0);
-                               else
-                                       x86_call_code (code, 0);
+                               x86_call_code (code, 0);
                                x86_push_imm (buf, (code - cfg->native_code) - throw_ip);
                                while (buf < buf2)
                                        x86_nop (buf);
@@ -3579,66 +4071,6 @@ mono_arch_flush_register_windows (void)
 {
 }
 
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
-
-static void
-setup_stack (MonoJitTlsData *tls)
-{
-       pthread_t self = pthread_self();
-       pthread_attr_t attr;
-       size_t stsize = 0;
-       struct sigaltstack sa;
-       guint8 *staddr = NULL;
-       guint8 *current = (guint8*)&staddr;
-
-       if (mono_running_on_valgrind ())
-               return;
-
-       /* Determine stack boundaries */
-       pthread_attr_init( &attr );
-#ifdef HAVE_PTHREAD_GETATTR_NP
-       pthread_getattr_np( self, &attr );
-#else
-#ifdef HAVE_PTHREAD_ATTR_GET_NP
-       pthread_attr_get_np( self, &attr );
-#elif defined(sun)
-       pthread_attr_getstacksize( &attr, &stsize );
-#else
-#error "Not implemented"
-#endif
-#endif
-#ifndef sun
-       pthread_attr_getstack( &attr, (void**)&staddr, &stsize );
-#endif
-
-       g_assert (staddr);
-
-       g_assert ((current > staddr) && (current < staddr + stsize));
-
-       tls->end_of_stack = staddr + stsize;
-
-       /*
-        * threads created by nptl does not seem to have a guard page, and
-        * since the main thread is not created by us, we can't even set one.
-        * Increasing stsize fools the SIGSEGV signal handler into thinking this
-        * is a stack overflow exception.
-        */
-       tls->stack_size = stsize + getpagesize ();
-
-       /* Setup an alternate signal stack */
-       tls->signal_stack = mmap (0, SIGNAL_STACK_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-       tls->signal_stack_size = SIGNAL_STACK_SIZE;
-
-       g_assert (tls->signal_stack);
-
-       sa.ss_sp = tls->signal_stack;
-       sa.ss_size = SIGNAL_STACK_SIZE;
-       sa.ss_flags = SS_ONSTACK;
-       sigaltstack (&sa, NULL);
-}
-
-#endif
-
 /*
  * Support for fast access to the thread-local lmf structure using the GS
  * segment register on NPTL + kernel 2.6.x.
@@ -3668,50 +4100,52 @@ mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
                        if (thread_tls_offset >= 64)
                                thread_tls_offset = -1;
 #else
+#if MONO_XEN_OPT
+                       optimize_for_xen = access ("/proc/xen", F_OK) == 0;
+#endif
                        tls_offset_inited = TRUE;
                        appdomain_tls_offset = mono_domain_get_tls_offset ();
                        lmf_tls_offset = mono_get_lmf_tls_offset ();
+                       lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
                        thread_tls_offset = mono_thread_get_tls_offset ();
 #endif
                }
        }               
-
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
-       setup_stack (tls);
-#endif
 }
 
 void
 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
 {
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
-       struct sigaltstack sa;
-
-       sa.ss_sp = tls->signal_stack;
-       sa.ss_size = SIGNAL_STACK_SIZE;
-       sa.ss_flags = SS_DISABLE;
-       sigaltstack  (&sa, NULL);
-
-       if (tls->signal_stack)
-               munmap (tls->signal_stack, SIGNAL_STACK_SIZE);
-#endif
 }
 
 void
 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
 {
+       MonoCallInst *call = (MonoCallInst*)inst;
+       CallInfo *cinfo = get_call_info (cfg, cfg->mempool, inst->signature, FALSE);
 
        /* add the this argument */
        if (this_reg != -1) {
-               MonoInst *this;
-               MONO_INST_NEW (cfg, this, OP_OUTARG);
-               this->type = this_type;
-               this->sreg1 = this_reg;
-               mono_bblock_add_inst (cfg->cbb, this);
+               if (cinfo->args [0].storage == ArgInIReg) {
+                       MonoInst *this;
+                       MONO_INST_NEW (cfg, this, OP_MOVE);
+                       this->type = this_type;
+                       this->sreg1 = this_reg;
+                       this->dreg = mono_regstate_next_int (cfg->rs);
+                       mono_bblock_add_inst (cfg->cbb, this);
+
+                       mono_call_inst_add_outarg_reg (cfg, call, this->dreg, cinfo->args [0].reg, FALSE);
+               }
+               else {
+                       MonoInst *this;
+                       MONO_INST_NEW (cfg, this, OP_OUTARG);
+                       this->type = this_type;
+                       this->sreg1 = this_reg;
+                       mono_bblock_add_inst (cfg->cbb, this);
+               }
        }
 
        if (vt_reg != -1) {
-               CallInfo * cinfo = get_call_info (inst->signature, FALSE);
                MonoInst *vtarg;
 
                if (cinfo->ret.storage == ArgValuetypeInReg) {
@@ -3726,18 +4160,154 @@ mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_re
                        vtarg->sreg1 = vt_reg;
                        mono_bblock_add_inst (cfg->cbb, vtarg);
                }
-               else {
+               else if (cinfo->ret.storage == ArgInIReg) {
+                       /* The return address is passed in a register */
+                       MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+                       vtarg->sreg1 = vt_reg;
+                       vtarg->dreg = mono_regstate_next_int (cfg->rs);
+                       mono_bblock_add_inst (cfg->cbb, vtarg);
+
+                       mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
+               } else {
                        MonoInst *vtarg;
                        MONO_INST_NEW (cfg, vtarg, OP_OUTARG);
                        vtarg->type = STACK_MP;
                        vtarg->sreg1 = vt_reg;
                        mono_bblock_add_inst (cfg->cbb, vtarg);
                }
+       }
+}
+
+#ifdef MONO_ARCH_HAVE_IMT
+
+// Linear handler, the bsearch head compare is shorter
+//[2 + 4] x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
+//[1 + 1] x86_branch8(inst,cond,imm,is_signed)
+//        x86_patch(ins,target)
+//[1 + 5] x86_jump_mem(inst,mem)
 
-               g_free (cinfo);
+#define CMP_SIZE 6
+#define BR_SMALL_SIZE 2
+#define BR_LARGE_SIZE 5
+#define JUMP_IMM_SIZE 6
+#define ENABLE_WRONG_METHOD_CHECK 0
+
+static int
+imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
+{
+       int i, distance = 0;
+       for (i = start; i < target; ++i)
+               distance += imt_entries [i]->chunk_size;
+       return distance;
+}
+
+/*
+ * LOCKING: called with the domain lock held
+ */
+gpointer
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+{
+       int i;
+       int size = 0;
+       guint8 *code, *start;
+
+       for (i = 0; i < count; ++i) {
+               MonoIMTCheckItem *item = imt_entries [i];
+               if (item->is_equals) {
+                       if (item->check_target_idx) {
+                               if (!item->compare_done)
+                                       item->chunk_size += CMP_SIZE;
+                               item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
+                       } else {
+                               item->chunk_size += JUMP_IMM_SIZE;
+#if ENABLE_WRONG_METHOD_CHECK
+                               item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
+#endif
+                       }
+               } else {
+                       item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
+                       imt_entries [item->check_target_idx]->compare_done = TRUE;
+               }
+               size += item->chunk_size;
+       }
+       code = mono_code_manager_reserve (domain->code_mp, size);
+       start = code;
+       for (i = 0; i < count; ++i) {
+               MonoIMTCheckItem *item = imt_entries [i];
+               item->code_target = code;
+               if (item->is_equals) {
+                       if (item->check_target_idx) {
+                               if (!item->compare_done)
+                                       x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->method);
+                               item->jmp_code = code;
+                               x86_branch8 (code, X86_CC_NE, 0, FALSE);
+                               x86_jump_mem (code, & (vtable->vtable [item->vtable_slot]));
+                       } else {
+                               /* enable the commented code to assert on wrong method */
+#if ENABLE_WRONG_METHOD_CHECK
+                               x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->method);
+                               item->jmp_code = code;
+                               x86_branch8 (code, X86_CC_NE, 0, FALSE);
+#endif
+                               x86_jump_mem (code, & (vtable->vtable [item->vtable_slot]));
+#if ENABLE_WRONG_METHOD_CHECK
+                               x86_patch (item->jmp_code, code);
+                               x86_breakpoint (code);
+                               item->jmp_code = NULL;
+#endif
+                       }
+               } else {
+                       x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->method);
+                       item->jmp_code = code;
+                       if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
+                               x86_branch8 (code, X86_CC_GE, 0, FALSE);
+                       else
+                               x86_branch32 (code, X86_CC_GE, 0, FALSE);
+               }
+       }
+       /* patch the branches to get to the target items */
+       for (i = 0; i < count; ++i) {
+               MonoIMTCheckItem *item = imt_entries [i];
+               if (item->jmp_code) {
+                       if (item->check_target_idx) {
+                               x86_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
+                       }
+               }
        }
+               
+       mono_stats.imt_thunks_size += code - start;
+       g_assert (code - start <= size);
+       return start;
+}
+
+MonoMethod*
+mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+{
+       return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
 }
 
+MonoObject*
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
+{
+       MonoMethodSignature *sig = mono_method_signature (method);
+       CallInfo *cinfo = get_call_info (NULL, NULL, sig, FALSE);
+       int this_argument_offset;
+       MonoObject *this_argument;
+
+       /* 
+        * this is the offset of the this arg from esp as saved at the start of 
+        * mono_arch_create_trampoline_code () in tramp-x86.c.
+        */
+       this_argument_offset = 5;
+       if (MONO_TYPE_ISSTRUCT (sig->ret) && (cinfo->ret.storage == ArgOnStack))
+               this_argument_offset++;
+
+       this_argument = * (MonoObject**) (((guint8*) regs [X86_ESP]) + this_argument_offset * sizeof (gpointer));
+
+       g_free (cinfo);
+       return this_argument;
+}
+#endif
 
 MonoInst*
 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
@@ -3772,6 +4342,9 @@ mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethod
                        ins->inst_i1 = args [1];
                }
 #endif
+       } else if (cmethod->klass == mono_defaults.thread_class &&
+                          strcmp (cmethod->name, "MemoryBarrier") == 0) {
+               MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
        } else if(cmethod->klass->image == mono_defaults.corlib &&
                           (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
                           (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
@@ -3800,7 +4373,7 @@ mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethod
                        ins->inst_i0 = args [0];
                        ins->inst_i1 = args [1];
                } else if (strcmp (cmethod->name, "Add") == 0 && fsig->params [0]->type == MONO_TYPE_I4) {
-                       MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
+                       MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_NEW_I4);
 
                        ins->inst_i0 = args [0];
                        ins->inst_i1 = args [1];
@@ -3872,12 +4445,14 @@ mono_arch_get_patch_offset (guint8 *code)
        }
 }
 
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
+gpointer
+mono_arch_get_vcall_slot (guint8 *code, gpointer *regs, int *displacement)
 {
        guint8 reg = 0;
        gint32 disp = 0;
 
+       *displacement = 0;
+
        /* go to the start of the call instruction
         *
         * address_byte = (m << 6) | (o << 3) | reg
@@ -3886,7 +4461,30 @@ mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
         * 0xff m=2,o=2 imm32
         */
        code -= 6;
-       if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
+
+       /* 
+        * A given byte sequence can match more than case here, so we have to be
+        * really careful about the ordering of the cases. Longer sequences
+        * come first.
+        */
+       if ((code [-2] == 0x8b) && (x86_modrm_mod (code [-1]) == 0x2) && (code [4] == 0xff) && (x86_modrm_reg (code [5]) == 0x2) && (x86_modrm_mod (code [5]) == 0x0)) {
+               /*
+                * This is an interface call
+                * 8b 80 0c e8 ff ff       mov    0xffffe80c(%eax),%eax
+                * ff 10                   call   *(%eax)
+                */
+               reg = x86_modrm_rm (code [5]);
+               disp = 0;
+#ifdef MONO_ARCH_HAVE_IMT
+       } else if ((code [-2] == 0xba) && (code [3] == 0xff) && (x86_modrm_mod (code [4]) == 1) && (x86_modrm_reg (code [4]) == 2) && ((signed char)code [5] < 0)) {
+               /* IMT-based interface calls: with MONO_ARCH_IMT_REG == edx
+                * ba 14 f8 28 08          mov    $0x828f814,%edx
+                * ff 50 fc                call   *0xfffffffc(%eax)
+                */
+               reg = code [4] & 0x07;
+               disp = (signed char)code [5];
+#endif
+       } else if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
                reg = code [4] & 0x07;
                disp = (signed char)code [5];
        } else {
@@ -3897,7 +4495,7 @@ mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
                        return NULL;
                } else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
                        /*
-                        * This is a interface call: should check the above code can't catch it earlier 
+                        * This is a interface call
                         * 8b 40 30   mov    0x30(%eax),%eax
                         * ff 10      call   *(%eax)
                         */
@@ -3908,25 +4506,136 @@ mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
                        return NULL;
        }
 
-       return (gpointer*)(((gint32)(regs [reg])) + disp);
+       *displacement = disp;
+       return regs [reg];
 }
 
-gpointer* 
-mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
 {
-       guint8 reg = 0;
-       gint32 disp = 0;
+       gpointer vt;
+       int displacement;
+       vt = mono_arch_get_vcall_slot (code, regs, &displacement);
+       if (!vt)
+               return NULL;
+       return (gpointer*)((char*)vt + displacement);
+}
 
-       code -= 7;
-       if ((code [0] == 0x8b) && (x86_modrm_mod (code [1]) == 3) && (x86_modrm_reg (code [1]) == X86_EAX) && (code [2] == 0x8b) && (code [3] == 0x40) && (code [5] == 0xff) && (code [6] == 0xd0)) {
-               reg = x86_modrm_rm (code [1]);
-               disp = code [4];
+gpointer
+mono_arch_get_this_arg_from_call (MonoMethodSignature *sig, gssize *regs, guint8 *code)
+{
+       guint32 esp = regs [X86_ESP];
+       CallInfo *cinfo;
+       gpointer res;
 
-               if (reg == X86_EAX)
-                       return NULL;
-               else
-                       return (gpointer*)(((gint32)(regs [reg])) + disp);
+       cinfo = get_call_info (NULL, NULL, sig, FALSE);
+
+       /*
+        * The stack looks like:
+        * <other args>
+        * <this=delegate>
+        * <possible vtype return address>
+        * <return addr>
+        * <4 pointers pushed by mono_arch_create_trampoline_code ()>
+        */
+       res = (((MonoObject**)esp) [5 + (cinfo->args [0].offset / 4)]);
+       g_free (cinfo);
+       return res;
+}
+
+#define MAX_ARCH_DELEGATE_PARAMS 10
+
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+{
+       guint8 *code, *start;
+
+       if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
+               return NULL;
+
+       /* FIXME: Support more cases */
+       if (MONO_TYPE_ISSTRUCT (sig->ret))
+               return NULL;
+
+       /*
+        * The stack contains:
+        * <delegate>
+        * <return addr>
+        */
+
+       if (has_target) {
+               static guint8* cached = NULL;
+               mono_mini_arch_lock ();
+               if (cached) {
+                       mono_mini_arch_unlock ();
+                       return cached;
+               }
+               
+               start = code = mono_global_codeman_reserve (64);
+
+               /* Replace the this argument with the target */
+               x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
+               x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, target), 4);
+               x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4);
+               x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+
+               g_assert ((code - start) < 64);
+
+               cached = start;
+
+               mono_mini_arch_unlock ();
+       } else {
+               static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
+               int i = 0;
+               /* 8 for mov_reg and jump, plus 8 for each parameter */
+               int code_reserve = 8 + (sig->param_count * 8);
+
+               for (i = 0; i < sig->param_count; ++i)
+                       if (!mono_is_regsize_var (sig->params [i]))
+                               return NULL;
+
+               mono_mini_arch_lock ();
+               code = cache [sig->param_count];
+               if (code) {
+                       mono_mini_arch_unlock ();
+                       return code;
+               }
+
+               /*
+                * The stack contains:
+                * <args in reverse order>
+                * <delegate>
+                * <return addr>
+                *
+                * and we need:
+                * <args in reverse order>
+                * <return addr>
+                * 
+                * without unbalancing the stack.
+                * So move each arg up a spot in the stack (overwriting un-needed 'this' arg)
+                * and leaving original spot of first arg as placeholder in stack so
+                * when callee pops stack everything works.
+                */
+
+               start = code = mono_global_codeman_reserve (code_reserve);
+
+               /* store delegate for access to method_ptr */
+               x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4);
+
+               /* move args up */
+               for (i = 0; i < sig->param_count; ++i) {
+                       x86_mov_reg_membase (code, X86_EAX, X86_ESP, (i+2)*4, 4);
+                       x86_mov_membase_reg (code, X86_ESP, (i+1)*4, X86_EAX, 4);
+               }
+
+               x86_jump_membase (code, X86_ECX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+
+               g_assert ((code - start) < code_reserve);
+
+               cache [sig->param_count] = start;
+
+               mono_mini_arch_unlock ();
        }
 
-       return NULL;
+       return start;
 }