Merge remote branch 'upstream/master'
[mono.git] / mono / mini / mini-amd64.c
index 4320946b8be554ee87cf6f8388446e780b444596..49c3a53c715324b74493490b6898225eaf745142 100644 (file)
@@ -23,6 +23,7 @@
 #include <mono/metadata/threads.h>
 #include <mono/metadata/profiler-private.h>
 #include <mono/metadata/mono-debug.h>
+#include <mono/metadata/gc-internal.h>
 #include <mono/utils/mono-math.h>
 #include <mono/utils/mono-mmap.h>
 
 #include "mini-amd64.h"
 #include "cpu-amd64.h"
 #include "debugger-agent.h"
-
-/* 
- * Can't define this in mini-amd64.h cause that would turn on the generic code in
- * method-to-ir.c.
- */
-#define MONO_ARCH_IMT_REG AMD64_R11
+#include "mini-gc.h"
 
 static gint lmf_tls_offset = -1;
 static gint lmf_addr_tls_offset = -1;
@@ -209,11 +205,278 @@ amd64_is_near_call (guint8 *code)
        return code [0] == 0xe8;
 }
 
+#ifdef __native_client_codegen__
+
+/* Keep track of instruction "depth", that is, the level of sub-instruction */
+/* for any given instruction.  For instance, amd64_call_reg resolves to     */
+/* amd64_call_reg_internal, which uses amd64_alu_* macros, etc.             */
+/* We only want to force bundle alignment for the top level instruction,    */
+/* so NaCl pseudo-instructions can be implemented with sub instructions.    */
+static guint32 nacl_instruction_depth;
+
+static guint32 nacl_rex_tag;
+static guint32 nacl_legacy_prefix_tag;
+
+void
+amd64_nacl_clear_legacy_prefix_tag ()
+{
+       TlsSetValue (nacl_legacy_prefix_tag, NULL);
+}
+
+void
+amd64_nacl_tag_legacy_prefix (guint8* code)
+{
+       if (TlsGetValue (nacl_legacy_prefix_tag) == NULL)
+               TlsSetValue (nacl_legacy_prefix_tag, code);
+}
+
+void
+amd64_nacl_tag_rex (guint8* code)
+{
+       TlsSetValue (nacl_rex_tag, code);
+}
+
+guint8*
+amd64_nacl_get_legacy_prefix_tag ()
+{
+       return (guint8*)TlsGetValue (nacl_legacy_prefix_tag);
+}
+
+guint8*
+amd64_nacl_get_rex_tag ()
+{
+       return (guint8*)TlsGetValue (nacl_rex_tag);
+}
+
+/* Increment the instruction "depth" described above */
+void
+amd64_nacl_instruction_pre ()
+{
+       intptr_t depth = (intptr_t) TlsGetValue (nacl_instruction_depth);
+       depth++;
+       TlsSetValue (nacl_instruction_depth, (gpointer)depth);
+}
+
+/* amd64_nacl_instruction_post: Decrement instruction "depth", force bundle */
+/* alignment if depth == 0 (top level instruction)                          */
+/* IN: start, end    pointers to instruction beginning and end              */
+/* OUT: start, end   pointers to beginning and end after possible alignment */
+/* GLOBALS: nacl_instruction_depth     defined above                        */
+void
+amd64_nacl_instruction_post (guint8 **start, guint8 **end)
+{
+       intptr_t depth = (intptr_t) TlsGetValue(nacl_instruction_depth);
+       depth--;
+       TlsSetValue (nacl_instruction_depth, (void*)depth);
+
+       g_assert ( depth >= 0 );
+       if (depth == 0) {
+               uintptr_t space_in_block;
+               uintptr_t instlen;
+               guint8 *prefix = amd64_nacl_get_legacy_prefix_tag ();
+               /* if legacy prefix is present, and if it was emitted before */
+               /* the start of the instruction sequence, adjust the start   */
+               if (prefix != NULL && prefix < *start) {
+                       g_assert (*start - prefix <= 3);/* only 3 are allowed */
+                       *start = prefix;
+               }
+               space_in_block = kNaClAlignment - ((uintptr_t)(*start) & kNaClAlignmentMask);
+               instlen = (uintptr_t)(*end - *start);
+               /* Only check for instructions which are less than        */
+               /* kNaClAlignment. The only instructions that should ever */
+               /* be that long are call sequences, which are already     */
+               /* padded out to align the return to the next bundle.     */
+               if (instlen > space_in_block && instlen < kNaClAlignment) {
+                       const size_t MAX_NACL_INST_LENGTH = kNaClAlignment;
+                       guint8 copy_of_instruction[MAX_NACL_INST_LENGTH];
+                       const size_t length = (size_t)((*end)-(*start));
+                       g_assert (length < MAX_NACL_INST_LENGTH);
+                       
+                       memcpy (copy_of_instruction, *start, length);
+                       *start = mono_arch_nacl_pad (*start, space_in_block);
+                       memcpy (*start, copy_of_instruction, length);
+                       *end = *start + length;
+               }
+               amd64_nacl_clear_legacy_prefix_tag ();
+               amd64_nacl_tag_rex (NULL);
+       }
+}
+
+/* amd64_nacl_membase_handler: ensure all access to memory of the form      */
+/*   OFFSET(%rXX) is sandboxed.  For allowable base registers %rip, %rbp,   */
+/*   %rsp, and %r15, emit the membase as usual.  For all other registers,   */
+/*   make sure the upper 32-bits are cleared, and use that register in the  */
+/*   index field of a new address of this form: OFFSET(%r15,%eXX,1)         */
+/* IN:      code                                                            */
+/*             pointer to current instruction stream (in the                */
+/*             middle of an instruction, after opcode is emitted)           */
+/*          basereg/offset/dreg                                             */
+/*             operands of normal membase address                           */
+/* OUT:     code                                                            */
+/*             pointer to the end of the membase/memindex emit              */
+/* GLOBALS: nacl_rex_tag                                                    */
+/*             position in instruction stream that rex prefix was emitted   */
+/*          nacl_legacy_prefix_tag                                          */
+/*             (possibly NULL) position in instruction of legacy x86 prefix */
+void
+amd64_nacl_membase_handler (guint8** code, gint8 basereg, gint32 offset, gint8 dreg)
+{
+       gint8 true_basereg = basereg;
+
+       /* Cache these values, they might change  */
+       /* as new instructions are emitted below. */
+       guint8* rex_tag = amd64_nacl_get_rex_tag ();
+       guint8* legacy_prefix_tag = amd64_nacl_get_legacy_prefix_tag ();
+
+       /* 'basereg' is given masked to 0x7 at this point, so check */
+       /* the rex prefix to see if this is an extended register.   */
+       if ((rex_tag != NULL) && IS_REX(*rex_tag) && (*rex_tag & AMD64_REX_B)) {
+               true_basereg |= 0x8;
+       }
+
+#define X86_LEA_OPCODE (0x8D)
+
+       if (!amd64_is_valid_nacl_base (true_basereg) && (*(*code-1) != X86_LEA_OPCODE)) {
+               guint8* old_instruction_start;
+               
+               /* This will hold the 'mov %eXX, %eXX' that clears the upper */
+               /* 32-bits of the old base register (new index register)     */
+               guint8 buf[32];
+               guint8* buf_ptr = buf;
+               size_t insert_len;
+
+               g_assert (rex_tag != NULL);
+
+               if (IS_REX(*rex_tag)) {
+                       /* The old rex.B should be the new rex.X */
+                       if (*rex_tag & AMD64_REX_B) {
+                               *rex_tag |= AMD64_REX_X;
+                       }
+                       /* Since our new base is %r15 set rex.B */
+                       *rex_tag |= AMD64_REX_B;
+               } else {
+                       /* Shift the instruction by one byte  */
+                       /* so we can insert a rex prefix      */
+                       memmove (rex_tag + 1, rex_tag, (size_t)(*code - rex_tag));
+                       *code += 1;
+                       /* New rex prefix only needs rex.B for %r15 base */
+                       *rex_tag = AMD64_REX(AMD64_REX_B);
+               }
+
+               if (legacy_prefix_tag) {
+                       old_instruction_start = legacy_prefix_tag;
+               } else {
+                       old_instruction_start = rex_tag;
+               }
+               
+               /* Clears the upper 32-bits of the previous base register */
+               amd64_mov_reg_reg_size (buf_ptr, true_basereg, true_basereg, 4);
+               insert_len = buf_ptr - buf;
+               
+               /* Move the old instruction forward to make */
+               /* room for 'mov' stored in 'buf_ptr'       */
+               memmove (old_instruction_start + insert_len, old_instruction_start, (size_t)(*code - old_instruction_start));
+               *code += insert_len;
+               memcpy (old_instruction_start, buf, insert_len);
+
+               /* Sandboxed replacement for the normal membase_emit */
+               x86_memindex_emit (*code, dreg, AMD64_R15, offset, basereg, 0);
+               
+       } else {
+               /* Normal default behavior, emit membase memory location */
+               x86_membase_emit_body (*code, dreg, basereg, offset);
+       }
+}
+
+
+static inline unsigned char*
+amd64_skip_nops (unsigned char* code)
+{
+       guint8 in_nop;
+       do {
+               in_nop = 0;
+               if (   code[0] == 0x90) {
+                       in_nop = 1;
+                       code += 1;
+               }
+               if (   code[0] == 0x66 && code[1] == 0x90) {
+                       in_nop = 1;
+                       code += 2;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x00) {
+                       in_nop = 1;
+                       code += 3;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x40 && code[3] == 0x00) {
+                       in_nop = 1;
+                       code += 4;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x44 && code[3] == 0x00
+                && code[4] == 0x00) {
+                       in_nop = 1;
+                       code += 5;
+               }
+               if (code[0] == 0x66 && code[1] == 0x0f
+                && code[2] == 0x1f && code[3] == 0x44
+                && code[4] == 0x00 && code[5] == 0x00) {
+                       in_nop = 1;
+                       code += 6;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x80 && code[3] == 0x00
+                && code[4] == 0x00 && code[5] == 0x00
+                && code[6] == 0x00) {
+                       in_nop = 1;
+                       code += 7;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x84 && code[3] == 0x00
+                && code[4] == 0x00 && code[5] == 0x00
+                && code[6] == 0x00 && code[7] == 0x00) {
+                       in_nop = 1;
+                       code += 8;
+               }
+       } while ( in_nop );
+       return code;
+}
+
+guint8*
+mono_arch_nacl_skip_nops (guint8* code)
+{
+  return amd64_skip_nops(code);
+}
+
+#endif /*__native_client_codegen__*/
+
 static inline void 
 amd64_patch (unsigned char* code, gpointer target)
 {
        guint8 rex = 0;
 
+#ifdef __native_client_codegen__
+       code = amd64_skip_nops (code);
+#endif
+#if defined(__native_client_codegen__) && defined(__native_client__)
+       if (nacl_is_code_address (code)) {
+               /* For tail calls, code is patched after being installed */
+               /* but not through the normal "patch callsite" method.   */
+               unsigned char buf[kNaClAlignment];
+               unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
+               int ret;
+               memcpy (buf, aligned_code, kNaClAlignment);
+               /* Patch a temp buffer of bundle size, */
+               /* then install to actual location.    */
+               amd64_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), target);
+               ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
+               g_assert (ret == 0);
+               return;
+       }
+       target = nacl_modify_patch_target (target);
+#endif
+
        /* Skip REX */
        if ((code [0] >= 0x40) && (code [0] <= 0x4f)) {
                rex = code [0];
@@ -266,6 +529,7 @@ typedef struct {
        /* Only if storage == ArgValuetypeInReg */
        ArgStorage pair_storage [2];
        gint8 pair_regs [2];
+       int nregs;
 } ArgInfo;
 
 typedef struct {
@@ -275,6 +539,8 @@ typedef struct {
        guint32 freg_usage;
        gboolean need_stack_align;
        gboolean vtype_retaddr;
+       /* The index of the vret arg in the argument list */
+       int vret_arg_index;
        ArgInfo ret;
        ArgInfo sig_cookie;
        ArgInfo args [1];
@@ -303,7 +569,9 @@ add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
 
     if (*gr >= PARAM_REGS) {
                ainfo->storage = ArgOnStack;
-               (*stack_size) += sizeof (gpointer);
+               /* Since the same stack slot size is used for all arg */
+               /*  types, it needs to be big enough to hold them all */
+               (*stack_size) += sizeof(mgreg_t);
     }
     else {
                ainfo->storage = ArgInIReg;
@@ -325,7 +593,9 @@ add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
 
     if (*gr >= FLOAT_PARAM_REGS) {
                ainfo->storage = ArgOnStack;
-               (*stack_size) += sizeof (gpointer);
+               /* Since the same stack slot size is used for both float */
+               /*  types, it needs to be big enough to hold them both */
+               (*stack_size) += sizeof(mgreg_t);
     }
     else {
                /* A double register */
@@ -420,6 +690,32 @@ merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
 
        return class1;
 }
+#ifdef __native_client_codegen__
+const guint kNaClAlignment = kNaClAlignmentAMD64;
+const guint kNaClAlignmentMask = kNaClAlignmentMaskAMD64;
+
+/* Default alignment for Native Client is 32-byte. */
+gint8 nacl_align_byte = -32; /* signed version of 0xe0 */
+
+/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code,  */
+/* Check that alignment doesn't cross an alignment boundary.             */
+guint8*
+mono_arch_nacl_pad(guint8 *code, int pad)
+{
+       const int kMaxPadding = 8; /* see amd64-codegen.h:amd64_padding_size() */
+
+       if (pad == 0) return code;
+       /* assertion: alignment cannot cross a block boundary */
+       g_assert (((uintptr_t)code & (~kNaClAlignmentMask)) ==
+                (((uintptr_t)code + pad - 1) & (~kNaClAlignmentMask)));
+       while (pad >= kMaxPadding) {
+               amd64_padding (code, kMaxPadding);
+               pad -= kMaxPadding;
+       }
+       if (pad != 0) amd64_padding (code, pad);
+       return code;
+}
+#endif
 
 static void
 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
@@ -427,6 +723,9 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
                           guint32 *gr, guint32 *fr, guint32 *stack_size)
 {
        guint32 size, quad, nquads, i;
+       /* Keep track of the size used in each quad so we can */
+       /* use the right size when copying args/return vars.  */
+       guint32 quadsize [2] = {8, 8};
        ArgumentClass args [2];
        MonoMarshalType *info = NULL;
        MonoClass *klass;
@@ -455,6 +754,24 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
        }
 #endif
 
+       /* If this struct can't be split up naturally into 8-byte */
+       /* chunks (registers), pass it on the stack.              */
+       if (sig->pinvoke && !pass_on_stack) {
+               info = mono_marshal_load_type_info (klass);
+               g_assert(info);
+               guint32 align;
+               guint32 field_size;
+               for (i = 0; i < info->num_fields; ++i) {
+                       field_size = mono_marshal_type_size (info->fields [i].field->type, 
+                                                          info->fields [i].mspec, 
+                                                          &align, TRUE, klass->unicode);
+                       if ((info->fields [i].offset < 8) && (info->fields [i].offset + field_size) > 8) {
+                               pass_on_stack = TRUE;
+                               break;
+                       }
+               }
+       }
+
        if (pass_on_stack) {
                /* Allways pass in memory */
                ainfo->offset = *stack_size;
@@ -554,6 +871,10 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
                                if ((quad == 1) && (info->fields [i].offset < 8))
                                        continue;
 
+                               /* How far into this quad this data extends.*/
+                               /* (8 is size of quad) */
+                               quadsize [quad] = info->fields [i].offset + size - (quad * 8);
+
                                class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
                        }
                        g_assert (class1 != ARG_CLASS_NO_CLASS);
@@ -572,6 +893,7 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
 
                ainfo->storage = ArgValuetypeInReg;
                ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
+               ainfo->nregs = nquads;
                for (quad = 0; quad < nquads; ++quad) {
                        switch (args [quad]) {
                        case ARG_CLASS_INTEGER:
@@ -590,7 +912,9 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
                                if (*fr >= FLOAT_PARAM_REGS)
                                        args [quad] = ARG_CLASS_MEMORY;
                                else {
-                                       ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
+                                       if (quadsize[quad] <= 4)
+                                               ainfo->pair_storage [quad] = ArgInFloatSSEReg;
+                                       else ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
                                        ainfo->pair_regs [quad] = *fr;
                                        (*fr) ++;
                                }
@@ -611,7 +935,7 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
                        if (sig->pinvoke)
                                *stack_size += ALIGN_TO (info->native_size, 8);
                        else
-                               *stack_size += nquads * sizeof (gpointer);
+                               *stack_size += nquads * sizeof(mgreg_t);
                        ainfo->storage = ArgOnStack;
                }
        }
@@ -625,13 +949,14 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
  * Draft Version 0.23" document for more information.
  */
 static CallInfo*
-get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
 {
-       guint32 i, gr, fr;
+       guint32 i, gr, fr, pstart;
        MonoType *ret_type;
        int n = sig->hasthis + sig->param_count;
        guint32 stack_size = 0;
        CallInfo *cinfo;
+       gboolean is_pinvoke = sig->pinvoke;
 
        if (mp)
                cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
@@ -694,14 +1019,12 @@ get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSign
                        if (cinfo->ret.storage == ArgOnStack) {
                                cinfo->vtype_retaddr = TRUE;
                                /* The caller passes the address where the value is stored */
-                               add_general (&gr, &stack_size, &cinfo->ret);
                        }
                        break;
                }
                case MONO_TYPE_TYPEDBYREF:
                        /* Same as a valuetype with size 24 */
-                       add_general (&gr, &stack_size, &cinfo->ret);
-                       ;
+                       cinfo->vtype_retaddr = TRUE;
                        break;
                case MONO_TYPE_VOID:
                        break;
@@ -710,9 +1033,31 @@ get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSign
                }
        }
 
-       /* this */
-       if (sig->hasthis)
-               add_general (&gr, &stack_size, cinfo->args + 0);
+       pstart = 0;
+       /*
+        * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
+        * the first argument, allowing 'this' to be always passed in the first arg reg.
+        * Also do this if the first argument is a reference type, since virtual calls
+        * are sometimes made using calli without sig->hasthis set, like in the delegate
+        * invoke wrappers.
+        */
+       if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+               if (sig->hasthis) {
+                       add_general (&gr, &stack_size, cinfo->args + 0);
+               } else {
+                       add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
+                       pstart = 1;
+               }
+               add_general (&gr, &stack_size, &cinfo->ret);
+               cinfo->vret_arg_index = 1;
+       } else {
+               /* this */
+               if (sig->hasthis)
+                       add_general (&gr, &stack_size, cinfo->args + 0);
+
+               if (cinfo->vtype_retaddr)
+                       add_general (&gr, &stack_size, &cinfo->ret);
+       }
 
        if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
                gr = PARAM_REGS;
@@ -722,7 +1067,7 @@ get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSign
                add_general (&gr, &stack_size, &cinfo->sig_cookie);
        }
 
-       for (i = 0; i < sig->param_count; ++i) {
+       for (i = pstart; i < sig->param_count; ++i) {
                ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
                MonoType *ptype;
 
@@ -819,11 +1164,13 @@ get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSign
        stack_size += 0x20;
 #endif
 
+#ifndef MONO_AMD64_NO_PUSHES
        if (stack_size & 0x8) {
                /* The AMD64 ABI requires each stack frame to be 16 byte aligned */
                cinfo->need_stack_align = TRUE;
                stack_size += 8;
        }
+#endif
 
        cinfo->stack_usage = stack_size;
        cinfo->reg_usage = gr;
@@ -846,7 +1193,7 @@ int
 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
 {
        int k;
-       CallInfo *cinfo = get_call_info (NULL, NULL, csig, FALSE);
+       CallInfo *cinfo = get_call_info (NULL, NULL, csig);
        guint32 args_size = cinfo->stack_usage;
 
        /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
@@ -865,9 +1212,31 @@ mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJit
        return args_size;
 }
 
+gboolean
+mono_amd64_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
+{
+       CallInfo *c1, *c2;
+       gboolean res;
+
+       c1 = get_call_info (NULL, NULL, caller_sig);
+       c2 = get_call_info (NULL, NULL, callee_sig);
+       res = c1->stack_usage >= c2->stack_usage;
+       if (callee_sig->ret && MONO_TYPE_ISSTRUCT (callee_sig->ret) && c2->ret.storage != ArgValuetypeInReg)
+               /* An address on the callee's stack is passed as the first argument */
+               res = FALSE;
+
+       g_free (c1);
+       g_free (c2);
+
+       return res;
+}
+
 static int 
 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
 {
+#if defined(MONO_CROSS_COMPILE)
+       return 0;
+#else
 #ifndef _MSC_VER
        __asm__ __volatile__ ("cpuid"
                : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
@@ -881,6 +1250,7 @@ cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
        *p_edx = info[3];
 #endif
        return 1;
+#endif
 }
 
 /*
@@ -914,6 +1284,12 @@ mono_arch_init (void)
        int flags;
 
        InitializeCriticalSection (&mini_arch_mutex);
+#if defined(__native_client_codegen__)
+       nacl_instruction_depth = TlsAlloc ();
+       TlsSetValue (nacl_instruction_depth, (gpointer)0);
+       nacl_rex_tag = TlsAlloc ();
+       nacl_legacy_prefix_tag = TlsAlloc ();
+#endif
 
 #ifdef MONO_ARCH_NOMAP32BIT
        flags = MONO_MMAP_READ;
@@ -946,6 +1322,11 @@ void
 mono_arch_cleanup (void)
 {
        DeleteCriticalSection (&mini_arch_mutex);
+#if defined(__native_client_codegen__)
+       TlsFree (nacl_instruction_depth);
+       TlsFree (nacl_rex_tag);
+       TlsFree (nacl_legacy_prefix_tag);
+#endif
 }
 
 /*
@@ -957,8 +1338,6 @@ mono_arch_cpu_optimizazions (guint32 *exclude_mask)
        int eax, ebx, ecx, edx;
        guint32 opts = 0;
 
-       /* FIXME: AMD64 */
-
        *exclude_mask = 0;
        /* Feature Flags function, flags returned in EDX. */
        if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
@@ -1070,7 +1449,7 @@ mono_arch_compute_omit_fp (MonoCompile *cfg)
        sig = mono_method_signature (cfg->method);
 
        if (!cfg->arch.cinfo)
-               cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+               cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
        cinfo = cfg->arch.cinfo;
 
        /*
@@ -1079,6 +1458,13 @@ mono_arch_compute_omit_fp (MonoCompile *cfg)
        cfg->arch.omit_fp = TRUE;
        cfg->arch.omit_fp_computed = TRUE;
 
+#ifdef __native_client_codegen__
+       /* NaCl modules may not change the value of RBP, so it cannot be */
+       /* used as a normal register, but it can be used as a frame pointer*/
+       cfg->disable_omit_fp = TRUE;
+       cfg->arch.omit_fp = FALSE;
+#endif
+
        if (cfg->disable_omit_fp)
                cfg->arch.omit_fp = FALSE;
 
@@ -1135,7 +1521,9 @@ mono_arch_get_global_int_regs (MonoCompile *cfg)
                regs = g_list_prepend (regs, (gpointer)AMD64_R12);
                regs = g_list_prepend (regs, (gpointer)AMD64_R13);
                regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+#ifndef __native_client_codegen__
                regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+#endif
  
                regs = g_list_prepend (regs, (gpointer)AMD64_R10);
                regs = g_list_prepend (regs, (gpointer)AMD64_R9);
@@ -1154,7 +1542,9 @@ mono_arch_get_global_int_regs (MonoCompile *cfg)
                regs = g_list_prepend (regs, (gpointer)AMD64_R12);
                regs = g_list_prepend (regs, (gpointer)AMD64_R13);
                regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+#ifndef __native_client_codegen__
                regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+#endif
 #ifdef HOST_WIN32
                regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
                regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
@@ -1190,7 +1580,9 @@ mono_arch_get_iregs_clobbered_by_call (MonoCallInst *call)
                regs = g_list_prepend (regs, (gpointer)AMD64_R12);
                regs = g_list_prepend (regs, (gpointer)AMD64_R13);
                regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+#ifndef __native_client_codegen__
                regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+#endif
 
                regs = g_list_prepend (regs, (gpointer)AMD64_R10);
                regs = g_list_prepend (regs, (gpointer)AMD64_R9);
@@ -1391,7 +1783,7 @@ mono_arch_allocate_vars (MonoCompile *cfg)
                /* Reserve space for caller saved registers */
                for (i = 0; i < AMD64_NREG; ++i)
                        if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
-                               offset += sizeof (gpointer);
+                               offset += sizeof(mgreg_t);
                        }
        }
 
@@ -1520,12 +1912,12 @@ mono_arch_allocate_vars (MonoCompile *cfg)
                                        ins->opcode = OP_REGOFFSET;
                                        ins->inst_basereg = cfg->frame_reg;
                                        /* These arguments are saved to the stack in the prolog */
-                                       offset = ALIGN_TO (offset, sizeof (gpointer));
+                                       offset = ALIGN_TO (offset, sizeof(mgreg_t));
                                        if (cfg->arch.omit_fp) {
                                                ins->inst_offset = offset;
-                                               offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
+                                               offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
                                        } else {
-                                               offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
+                                               offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
                                                ins->inst_offset = - offset;
                                        }
                                        break;
@@ -1597,14 +1989,14 @@ mono_arch_allocate_vars (MonoCompile *cfg)
                                ins->opcode = OP_REGOFFSET;
                                ins->inst_basereg = cfg->frame_reg;
                                /* These arguments are saved to the stack in the prolog */
-                               offset = ALIGN_TO (offset, sizeof (gpointer));
+                               offset = ALIGN_TO (offset, sizeof(mgreg_t));
                                if (cfg->arch.omit_fp) {
                                        ins->inst_offset = offset;
-                                       offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
+                                       offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
                                        // Arguments are yet supported by the stack map creation code
                                        //cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset);
                                } else {
-                                       offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
+                                       offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
                                        ins->inst_offset = - offset;
                                        //cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset);
                                }
@@ -1624,7 +2016,7 @@ mono_arch_create_vars (MonoCompile *cfg)
        sig = mono_method_signature (cfg->method);
 
        if (!cfg->arch.cinfo)
-               cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+               cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
        cinfo = cfg->arch.cinfo;
 
        if (cinfo->ret.storage == ArgValuetypeInReg)
@@ -1668,7 +2060,7 @@ add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int re
        switch (storage) {
        case ArgInIReg:
                MONO_INST_NEW (cfg, ins, OP_MOVE);
-               ins->dreg = mono_alloc_ireg (cfg);
+               ins->dreg = mono_alloc_ireg_copy (cfg, tree->dreg);
                ins->sreg1 = tree->dreg;
                MONO_ADD_INS (cfg->cbb, ins);
                mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
@@ -1700,7 +2092,11 @@ arg_storage_to_load_membase (ArgStorage storage)
 {
        switch (storage) {
        case ArgInIReg:
+#if defined(__mono_ilp32__)
+               return OP_LOADI8_MEMBASE;
+#else
                return OP_LOAD_MEMBASE;
+#endif
        case ArgInDoubleSSEReg:
                return OP_LOADR8_MEMBASE;
        case ArgInFloatSSEReg:
@@ -1733,7 +2129,7 @@ emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
         * passed on the stack after the signature. So compensate by 
         * passing a different signature.
         */
-       tmp_sig = mono_metadata_signature_dup (call->signature);
+       tmp_sig = mono_metadata_signature_dup_full (cfg->method->klass->image, call->signature);
        tmp_sig->param_count -= call->signature->sentinelpos;
        tmp_sig->sentinelpos = 0;
        memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
@@ -1779,7 +2175,7 @@ mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
 
        n = sig->param_count + sig->hasthis;
 
-       cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+       cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
 
        linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
 
@@ -1804,6 +2200,7 @@ mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
        if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage == ArgInIReg) {
                /* Vtype returned using a hidden argument */
                linfo->ret.storage = LLVMArgVtypeRetAddr;
+               linfo->vret_arg_index = cinfo->vret_arg_index;
        }
 
        for (i = 0; i < n; ++i) {
@@ -1873,7 +2270,7 @@ mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
        sig = call->signature;
        n = sig->param_count + sig->hasthis;
 
-       cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+       cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
 
        if (COMPILE_LLVM (cfg)) {
                /* We shouldn't be called in the llvm case */
@@ -1913,6 +2310,11 @@ mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
                                } else {
                                        MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
                                }
+                               if (cfg->compute_gc_maps) {
+                                       MonoInst *def;
+
+                                       EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, t);
+                               }
                        }
                }
        }
@@ -2103,7 +2505,7 @@ mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
 
                        MONO_INST_NEW (cfg, load, arg_storage_to_load_membase (ainfo->pair_storage [part]));
                        load->inst_basereg = src->dreg;
-                       load->inst_offset = part * sizeof (gpointer);
+                       load->inst_offset = part * sizeof(mgreg_t);
 
                        switch (ainfo->pair_storage [part]) {
                        case ArgInIReg:
@@ -2180,6 +2582,11 @@ mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
                                MONO_ADD_INS (cfg->cbb, arg);
                        }
                }
+
+               if (cfg->compute_gc_maps) {
+                       MonoInst *def;
+                       EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, &ins->klass->byval_arg);
+               }
        }
 }
 
@@ -2286,7 +2693,7 @@ mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
        ArchDynCallInfo *info;
        CallInfo *cinfo;
 
-       cinfo = get_call_info (NULL, NULL, sig, FALSE);
+       cinfo = get_call_info (NULL, NULL, sig);
 
        if (!dyn_call_supported (sig, cinfo)) {
                g_free (cinfo);
@@ -2315,6 +2722,15 @@ mono_arch_dyn_call_free (MonoDynCallInfo *info)
        g_free (ainfo);
 }
 
+#if !defined(__native_client__)
+#define PTR_TO_GREG(ptr) (mgreg_t)(ptr)
+#define GREG_TO_PTR(greg) (gpointer)(greg)
+#else
+/* Correctly handle casts to/from 32-bit pointers without compiler warnings */
+#define PTR_TO_GREG(ptr) (mgreg_t)(uintptr_t)(ptr)
+#define GREG_TO_PTR(greg) (gpointer)(guint32)(greg)
+#endif
+
 /*
  * mono_arch_get_start_dyn_call:
  *
@@ -2334,7 +2750,7 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g
 {
        ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
        DynCallArgs *p = (DynCallArgs*)buf;
-       int arg_index, greg, i;
+       int arg_index, greg, i, pindex;
        MonoMethodSignature *sig = dinfo->sig;
 
        g_assert (buf_len >= sizeof (DynCallArgs));
@@ -2344,20 +2760,23 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g
 
        arg_index = 0;
        greg = 0;
+       pindex = 0;
 
-       if (dinfo->cinfo->vtype_retaddr)
-               p->regs [greg ++] = (mgreg_t)ret;
-
-       if (sig->hasthis) {
-               p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
+       if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
+               p->regs [greg ++] = PTR_TO_GREG(*(args [arg_index ++]));
+               if (!sig->hasthis)
+                       pindex = 1;
        }
 
-       for (i = 0; i < sig->param_count; i++) {
+       if (dinfo->cinfo->vtype_retaddr)
+               p->regs [greg ++] = PTR_TO_GREG(ret);
+
+       for (i = pindex; i < sig->param_count; i++) {
                MonoType *t = mono_type_get_underlying_type (sig->params [i]);
                gpointer *arg = args [arg_index ++];
 
                if (t->byref) {
-                       p->regs [greg ++] = (mgreg_t)*(arg);
+                       p->regs [greg ++] = PTR_TO_GREG(*(arg));
                        continue;
                }
 
@@ -2370,11 +2789,20 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g
                case MONO_TYPE_PTR:
                case MONO_TYPE_I:
                case MONO_TYPE_U:
+#if !defined(__mono_ilp32__)
                case MONO_TYPE_I8:
                case MONO_TYPE_U8:
+#endif
                        g_assert (dinfo->cinfo->args [i + sig->hasthis].reg == param_regs [greg]);
-                       p->regs [greg ++] = (mgreg_t)*(arg);
+                       p->regs [greg ++] = PTR_TO_GREG(*(arg));
                        break;
+#if defined(__mono_ilp32__)
+               case MONO_TYPE_I8:
+               case MONO_TYPE_U8:
+                       g_assert (dinfo->cinfo->args [i + sig->hasthis].reg == param_regs [greg]);
+                       p->regs [greg ++] = *(guint64*)(arg);
+                       break;
+#endif
                case MONO_TYPE_BOOLEAN:
                case MONO_TYPE_U1:
                        p->regs [greg ++] = *(guint8*)(arg);
@@ -2397,7 +2825,7 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g
                        break;
                case MONO_TYPE_GENERICINST:
                    if (MONO_TYPE_IS_REFERENCE (t)) {
-                               p->regs [greg ++] = (mgreg_t)*(arg);
+                               p->regs [greg ++] = PTR_TO_GREG(*(arg));
                                break;
                        } else {
                                /* Fall through */
@@ -2453,7 +2881,7 @@ mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
        case MONO_TYPE_I:
        case MONO_TYPE_U:
        case MONO_TYPE_PTR:
-               *(gpointer*)ret = (gpointer)res;
+               *(gpointer*)ret = GREG_TO_PTR(res);
                break;
        case MONO_TYPE_I1:
                *(gint8*)ret = res;
@@ -2483,7 +2911,7 @@ mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
                break;
        case MONO_TYPE_GENERICINST:
                if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
-                       *(gpointer*)ret = (gpointer)res;
+                       *(gpointer*)ret = GREG_TO_PTR(res);
                        break;
                } else {
                        /* Fall through */
@@ -2636,8 +3064,10 @@ emit_call_body (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointe
                         * not span cache lines. This is required for code patching to work on SMP
                         * systems.
                         */
-                       if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0)
-                               amd64_padding (code, 4 - ((guint32)(code + 1 - cfg->native_code) % 4));
+                       if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0) {
+                               guint32 pad_size = 4 - ((guint32)(code + 1 - cfg->native_code) % 4);
+                               amd64_padding (code, pad_size);
+                       }
                        mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
                        amd64_call_code (code, 0);
                }
@@ -2823,7 +3253,7 @@ mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
                                        if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
                                                ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
                                                ins2->sreg1 = ins->dreg;
-                                       } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START)) {
+                                       } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START) || (ins2->opcode == OP_GC_LIVENESS_DEF) || (ins2->opcode == OP_GC_LIVENESS_USE)) {
                                                /* Continue */
                                        } else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) {
                                                NULLIFY_INS (ins2);
@@ -2894,8 +3324,13 @@ mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                                ins->sreg2 = temp->dreg;
                        }
                        break;
+#ifndef __mono_ilp32__
                case OP_LOAD_MEMBASE:
+#endif
                case OP_LOADI8_MEMBASE:
+#ifndef __native_client_codegen__
+               /*  Don't generate memindex opcodes (to simplify */
+               /*  read sandboxing) */
                        if (!amd64_is_imm32 (ins->inst_offset)) {
                                NEW_INS (cfg, ins, temp, OP_I8CONST);
                                temp->inst_c0 = ins->inst_offset;
@@ -2903,8 +3338,11 @@ mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                                ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
                                ins->inst_indexreg = temp->dreg;
                        }
+#endif
                        break;
+#ifndef __mono_ilp32__
                case OP_STORE_MEMBASE_IMM:
+#endif
                case OP_STOREI8_MEMBASE_IMM:
                        if (!amd64_is_imm32 (ins->inst_imm)) {
                                NEW_INS (cfg, ins, temp, OP_I8CONST);
@@ -3056,8 +3494,20 @@ mono_emit_stack_alloc (MonoCompile *cfg, guchar *code, MonoInst* tree)
                if (cfg->param_area && cfg->arch.no_pushes)
                        amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area);
                amd64_cld (code);
+#if defined(__default_codegen__)
                amd64_prefix (code, X86_REP_PREFIX);
                amd64_stosl (code);
+#elif defined(__native_client_codegen__)
+               /* NaCl stos pseudo-instruction */
+               amd64_codegen_pre(code);
+               /* First, clear the upper 32 bits of RDI (mov %edi, %edi)  */
+               amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 4);
+               /* Add %r15 to %rdi using lea, condition flags unaffected. */
+               amd64_lea_memindex_size (code, AMD64_RDI, AMD64_R15, 0, AMD64_RDI, 0, 8);
+               amd64_prefix (code, X86_REP_PREFIX);
+               amd64_stosl (code);
+               amd64_codegen_post(code);
+#endif /* __native_client_codegen__ */
                
                if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
                        amd64_pop_reg (code, AMD64_RDI);
@@ -3103,18 +3553,18 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
        case OP_VCALL2:
        case OP_VCALL2_REG:
        case OP_VCALL2_MEMBASE:
-               cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
+               cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, ((MonoCallInst*)ins)->signature);
                if (cinfo->ret.storage == ArgValuetypeInReg) {
                        MonoInst *loc = cfg->arch.vret_addr_loc;
 
                        /* Load the destination address */
                        g_assert (loc->opcode == OP_REGOFFSET);
-                       amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, 8);
+                       amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, sizeof(gpointer));
 
                        for (quad = 0; quad < 2; quad ++) {
                                switch (cinfo->ret.pair_storage [quad]) {
                                case ArgInIReg:
-                                       amd64_mov_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad], 8);
+                                       amd64_mov_membase_reg (code, AMD64_RCX, (quad * sizeof(mgreg_t)), cinfo->ret.pair_regs [quad], sizeof(mgreg_t));
                                        break;
                                case ArgInFloatSSEReg:
                                        amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
@@ -3190,6 +3640,15 @@ amd64_pop_reg (code, AMD64_RAX);
 
 #ifndef DISABLE_JIT
 
+#if defined(__native_client__) || defined(__native_client_codegen__)
+void mono_nacl_gc()
+{
+#ifdef __native_client_gc__
+       __nacl_suspend_thread_if_needed();
+#endif
+}
+#endif
+
 void
 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 {
@@ -3223,6 +3682,21 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                }
        }
 
+#if defined(__native_client_codegen__)
+       /* For Native Client, all indirect call/jump targets must be */
+       /* 32-byte aligned.  Exception handler blocks are jumped to  */
+       /* indirectly as well.                                       */
+       gboolean bb_needs_alignment = (bb->flags & BB_INDIRECT_JUMP_TARGET) ||
+                                     (bb->flags & BB_EXCEPTION_HANDLER);
+
+       if ( bb_needs_alignment && ((cfg->code_len & kNaClAlignmentMask) != 0)) {
+               int pad = kNaClAlignment - (cfg->code_len & kNaClAlignmentMask);
+               if (pad != kNaClAlignment) code = mono_arch_nacl_pad(code, pad);
+               cfg->code_len += pad;
+               bb->native_offset = cfg->code_len;
+       }
+#endif  /*__native_client_codegen__*/
+
        if (cfg->verbose_level > 2)
                g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
 
@@ -3248,9 +3722,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 
                max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
 
-               if (G_UNLIKELY (offset > (cfg->code_size - max_len - 16))) {
+#define EXTRA_CODE_SPACE (NACL_SIZE (16, 16 + kNaClAlignment))
+
+               if (G_UNLIKELY (offset > (cfg->code_size - max_len - EXTRA_CODE_SPACE))) {
                        cfg->code_size *= 2;
-                       cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+                       cfg->native_code = mono_realloc_native_code(cfg);
                        code = cfg->native_code + offset;
                        mono_jit_stats.code_reallocs++;
                }
@@ -3283,7 +3759,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_STOREI2_MEMBASE_REG:
                        amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
                        break;
+               /* In AMD64 NaCl, pointers are 4 bytes, */
+               /*  so STORE_* != STOREI8_*. Likewise below. */
                case OP_STORE_MEMBASE_REG:
+                       amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, sizeof(gpointer));
+                       break;
                case OP_STOREI8_MEMBASE_REG:
                        amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
                        break;
@@ -3291,15 +3771,32 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
                        break;
                case OP_STORE_MEMBASE_IMM:
+#ifndef __native_client_codegen__
+                       /* In NaCl, this could be a PCONST type, which could */
+                       /* mean a pointer type was copied directly into the  */
+                       /* lower 32-bits of inst_imm, so for InvalidPtr==-1  */
+                       /* the value would be 0x00000000FFFFFFFF which is    */
+                       /* not proper for an imm32 unless you cast it.       */
+                       g_assert (amd64_is_imm32 (ins->inst_imm));
+#endif
+                       amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, (gint32)ins->inst_imm, sizeof(gpointer));
+                       break;
                case OP_STOREI8_MEMBASE_IMM:
                        g_assert (amd64_is_imm32 (ins->inst_imm));
                        amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
                        break;
                case OP_LOAD_MEM:
+#ifdef __mono_ilp32__
+                       /* In ILP32, pointers are 4 bytes, so separate these */
+                       /* cases, use literal 8 below where we really want 8 */
+                       amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+                       amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, sizeof(gpointer));
+                       break;
+#endif
                case OP_LOADI8_MEM:
                        // FIXME: Decompose this earlier
                        if (amd64_is_imm32 (ins->inst_imm))
-                               amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, sizeof (gpointer));
+                               amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8);
                        else {
                                amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
                                amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8);
@@ -3323,13 +3820,20 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, FALSE);
                        break;
                case OP_LOADU2_MEM:
+                       /* For NaCl, pointers are 4 bytes, so separate these */
+                       /* cases, use literal 8 below where we really want 8 */
                        amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
                        amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, TRUE);
                        break;
                case OP_LOAD_MEMBASE:
+                       g_assert (amd64_is_imm32 (ins->inst_offset));
+                       amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof(gpointer));
+                       break;
                case OP_LOADI8_MEMBASE:
+                       /* Use literal 8 instead of sizeof pointer or */
+                       /* register, we really want 8 for this opcode */
                        g_assert (amd64_is_imm32 (ins->inst_offset));
-                       amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
+                       amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 8);
                        break;
                case OP_LOADI4_MEMBASE:
                        amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
@@ -4017,14 +4521,14 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case OP_AOTCONST:
                        mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
-                       amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, 8);
+                       amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, sizeof(gpointer));
                        break;
                case OP_JUMP_TABLE:
                        mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
                        amd64_mov_reg_imm_size (code, ins->dreg, 0, 8);
                        break;
                case OP_MOVE:
-                       amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
+                       amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof(mgreg_t));
                        break;
                case OP_AMD64_SET_XMMREG_R4: {
                        amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
@@ -4036,10 +4540,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                }
                case OP_TAILCALL: {
-                       /*
-                        * Note: this 'frame destruction' logic is useful for tail calls, too.
-                        * Keep in sync with the code in emit_epilog.
-                        */
+                       MonoCallInst *call = (MonoCallInst*)ins;
                        int pos = 0, i;
 
                        /* FIXME: no tracing support... */
@@ -4057,20 +4558,32 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                                save_offset += 8;
                                        }
                                amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
+
+                               // FIXME:
+                               if (call->stack_usage)
+                                       NOT_IMPLEMENTED;
                        }
                        else {
                                for (i = 0; i < AMD64_NREG; ++i)
                                        if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
-                                               pos -= sizeof (gpointer);
-                       
-                               if (pos)
-                                       amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
+                                               pos -= sizeof(mgreg_t);
 
-                               /* Pop registers in reverse order */
-                               for (i = AMD64_NREG - 1; i > 0; --i)
+                               /* Restore callee-saved registers */
+                               for (i = AMD64_NREG - 1; i > 0; --i) {
                                        if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
-                                               amd64_pop_reg (code, i);
+                                               amd64_mov_reg_membase (code, i, AMD64_RBP, pos, sizeof(mgreg_t));
+                                               pos += sizeof(mgreg_t);
                                        }
+                               }
+
+                               /* Copy arguments on the stack to our argument area */
+                               for (i = 0; i < call->stack_usage; i += sizeof(mgreg_t)) {
+                                       amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i, sizeof(mgreg_t));
+                                       amd64_mov_membase_reg (code, AMD64_RBP, 16 + i, AMD64_RAX, sizeof(mgreg_t));
+                               }
+                       
+                               if (pos)
+                                       amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
 
                                amd64_leave (code);
                        }
@@ -4082,6 +4595,8 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        else
                                amd64_set_reg_template (code, AMD64_R11);
                        amd64_jump_reg (code, AMD64_R11);
+                       ins->flags |= MONO_INST_GC_CALLSITE;
+                       ins->backend.pc_offset = code - cfg->native_code;
                        break;
                }
                case OP_CHECK_THIS:
@@ -4090,7 +4605,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case OP_ARGLIST: {
                        amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, cfg->sig_cookie);
-                       amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
+                       amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, sizeof(gpointer));
                        break;
                }
                case OP_CALL:
@@ -4126,6 +4641,8 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method, FALSE);
                        else
                                code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr, FALSE);
+                       ins->flags |= MONO_INST_GC_CALLSITE;
+                       ins->backend.pc_offset = code - cfg->native_code;
                        if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
                                amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
                        code = emit_move_return_value (cfg, ins, code);
@@ -4174,6 +4691,8 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        }
 
                        amd64_call_reg (code, ins->sreg1);
+                       ins->flags |= MONO_INST_GC_CALLSITE;
+                       ins->backend.pc_offset = code - cfg->native_code;
                        if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
                                amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
                        code = emit_move_return_value (cfg, ins, code);
@@ -4186,23 +4705,9 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_CALL_MEMBASE:
                        call = (MonoCallInst*)ins;
 
-                       if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
-                               /* 
-                                * Can't use R11 because it is clobbered by the trampoline 
-                                * code, and the reg value is needed by get_vcall_slot_addr.
-                                */
-                               amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
-                               ins->sreg1 = AMD64_RAX;
-                       }
-
-                       /* 
-                        * Emit a few nops to simplify get_vcall_slot ().
-                        */
-                       amd64_nop (code);
-                       amd64_nop (code);
-                       amd64_nop (code);
-
                        amd64_call_membase (code, ins->sreg1, ins->inst_offset);
+                       ins->flags |= MONO_INST_GC_CALLSITE;
+                       ins->backend.pc_offset = code - cfg->native_code;
                        if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
                                amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
                        code = emit_move_return_value (cfg, ins, code);
@@ -4223,11 +4728,14 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 
                        /* Set argument registers */
                        for (i = 0; i < PARAM_REGS; ++i)
-                               amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof (gpointer), 8);
+                               amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof(mgreg_t), sizeof(mgreg_t));
                        
                        /* Make the call */
                        amd64_call_reg (code, AMD64_R10);
 
+                       ins->flags |= MONO_INST_GC_CALLSITE;
+                       ins->backend.pc_offset = code - cfg->native_code;
+
                        /* Save result */
                        amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
                        amd64_mov_membase_reg (code, AMD64_R11, G_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
@@ -4323,12 +4831,16 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
                        code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
                                             (gpointer)"mono_arch_throw_exception", FALSE);
+                       ins->flags |= MONO_INST_GC_CALLSITE;
+                       ins->backend.pc_offset = code - cfg->native_code;
                        break;
                }
                case OP_RETHROW: {
                        amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
                        code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
                                             (gpointer)"mono_arch_rethrow_exception", FALSE);
+                       ins->flags |= MONO_INST_GC_CALLSITE;
+                       ins->backend.pc_offset = code - cfg->native_code;
                        break;
                }
                case OP_CALL_HANDLER: 
@@ -4341,8 +4853,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
                        break;
                case OP_START_HANDLER: {
+                       /* Even though we're saving RSP, use sizeof */
+                       /* gpointer because spvar is of type IntPtr */
+                       /* see: mono_create_spvar_for_region */
                        MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
-                       amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, 8);
+                       amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, sizeof(gpointer));
 
                        if ((MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY) ||
                                 MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY)) &&
@@ -4353,13 +4868,13 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                }
                case OP_ENDFINALLY: {
                        MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
-                       amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, 8);
+                       amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
                        amd64_ret (code);
                        break;
                }
                case OP_ENDFILTER: {
                        MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
-                       amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, 8);
+                       amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
                        /* The local allocator will put the result into RAX */
                        amd64_ret (code);
                        break;
@@ -4847,7 +5362,9 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                }
                case OP_MEMORY_BARRIER: {
-                       /* Not needed on amd64 */
+                       /* http://blogs.sun.com/dave/resource/NHM-Pipeline-Blog-V2.txt */
+                       x86_prefix (code, X86_LOCK_PREFIX);
+                       amd64_alu_membase_imm (code, X86_ADD, AMD64_RSP, 0, 0);
                        break;
                }
                case OP_ATOMIC_ADD_I4:
@@ -4972,6 +5489,62 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
                        break;
                }
+               case OP_CARD_TABLE_WBARRIER: {
+                       int ptr = ins->sreg1;
+                       int value = ins->sreg2;
+                       guchar *br;
+                       int nursery_shift, card_table_shift;
+                       gpointer card_table_mask;
+                       size_t nursery_size;
+
+                       gpointer card_table = mono_gc_get_card_table (&card_table_shift, &card_table_mask);
+                       guint64 nursery_start = (guint64)mono_gc_get_nursery (&nursery_shift, &nursery_size);
+
+                       /*If either point to the stack we can simply avoid the WB. This happens due to
+                        * optimizations revealing a stack store that was not visible when op_cardtable was emited.
+                        */
+                       if (ins->sreg1 == AMD64_RSP || ins->sreg2 == AMD64_RSP)
+                               continue;
+
+                       /*
+                        * We need one register we can clobber, we choose EDX and make sreg1
+                        * fixed EAX to work around limitations in the local register allocator.
+                        * sreg2 might get allocated to EDX, but that is not a problem since
+                        * we use it before clobbering EDX.
+                        */
+                       g_assert (ins->sreg1 == AMD64_RAX);
+
+                       /*
+                        * This is the code we produce:
+                        *
+                        *   edx = value
+                        *   edx >>= nursery_shift
+                        *   cmp edx, (nursery_start >> nursery_shift)
+                        *   jne done
+                        *   edx = ptr
+                        *   edx >>= card_table_shift
+                        *   edx += cardtable
+                        *   [edx] = 1
+                        * done:
+                        */
+
+                       if (value != AMD64_RDX)
+                               amd64_mov_reg_reg (code, AMD64_RDX, value, 8);
+                       amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift);
+                       amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, nursery_start >> nursery_shift);
+                       br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
+                       amd64_mov_reg_reg (code, AMD64_RDX, ptr, 8);
+                       amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, card_table_shift);
+                       if (card_table_mask)
+                               amd64_alu_reg_imm (code, X86_AND, AMD64_RDX, (guint32)(guint64)card_table_mask);
+
+                       mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, card_table);
+                       amd64_alu_reg_membase (code, X86_ADD, AMD64_RDX, AMD64_RIP, 0);
+
+                       amd64_mov_membase_imm (code, AMD64_RDX, 0, 1, 1);
+                       x86_patch (br, code);
+                       break;
+               }
 #ifdef MONO_ARCH_SIMD_INTRINSICS
                /* TODO: Some of these IR opcodes are marked as no clobber when they indeed do. */
                case OP_ADDPS:
@@ -5557,15 +6130,32 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
                        break;
                }
+               case OP_NACL_GC_SAFE_POINT: {
+#if defined(__native_client_codegen__)
+                       code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
+#endif
+                       break;
+               }
+               case OP_GC_LIVENESS_DEF:
+               case OP_GC_LIVENESS_USE:
+               case OP_GC_PARAM_SLOT_LIVENESS_DEF:
+                       ins->backend.pc_offset = code - cfg->native_code;
+                       break;
+               case OP_GC_SPILL_SLOT_LIVENESS_DEF:
+                       ins->backend.pc_offset = code - cfg->native_code;
+                       bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
+                       break;
                default:
                        g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
                        g_assert_not_reached ();
                }
 
                if ((code - cfg->native_code - offset) > max_len) {
+#if !defined(__native_client_codegen__)
                        g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
                                   mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
                        g_assert_not_reached ();
+#endif
                }
               
                last_ins = ins;
@@ -5695,10 +6285,27 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        gint32 lmf_offset = cfg->arch.lmf_offset;
        gboolean args_clobbered = FALSE;
        gboolean trace = FALSE;
+#ifdef __native_client_codegen__
+       guint alignment_check;
+#endif
 
        cfg->code_size =  MAX (cfg->header->code_size * 4, 10240);
 
+#if defined(__default_codegen__)
        code = cfg->native_code = g_malloc (cfg->code_size);
+#elif defined(__native_client_codegen__)
+       /* native_code_alloc is not 32-byte aligned, native_code is. */
+       cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
+
+       /* Align native_code to next nearest kNaclAlignment byte. */
+       cfg->native_code = (uintptr_t)cfg->native_code_alloc + kNaClAlignment;
+       cfg->native_code = (uintptr_t)cfg->native_code & ~kNaClAlignmentMask;
+
+       code = cfg->native_code;
+
+       alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
+       g_assert (alignment_check == 0);
+#endif
 
        if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
                trace = TRUE;
@@ -5730,6 +6337,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        // IP saved at CFA - 8
        mono_emit_unwind_op_offset (cfg, code, AMD64_RIP, -cfa_offset);
        async_exc_point (code);
+       mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
 
        if (!cfg->arch.omit_fp) {
                amd64_push_reg (code, AMD64_RBP);
@@ -5740,8 +6348,10 @@ mono_arch_emit_prolog (MonoCompile *cfg)
 #ifdef HOST_WIN32
                mono_arch_unwindinfo_add_push_nonvol (&cfg->arch.unwindinfo, cfg->native_code, code, AMD64_RBP);
 #endif
+               /* These are handled automatically by the stack marking code */
+               mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
                
-               amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
+               amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
                mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP);
                async_exc_point (code);
 #ifdef HOST_WIN32
@@ -5756,10 +6366,13 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                for (i = 0; i < AMD64_NREG; ++i)
                        if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
                                amd64_push_reg (code, i);
-                               pos += sizeof (gpointer);
+                               pos += 8; /* AMD64 push inst is always 8 bytes, no way to change it */
                                offset += 8;
                                mono_emit_unwind_op_offset (cfg, code, i, - offset);
                                async_exc_point (code);
+
+                               /* These are handled automatically by the stack marking code */
+                               mini_gc_set_slot_type_from_cfa (cfg, - offset, SLOT_NOREF);
                        }
        }
 
@@ -5769,21 +6382,28 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                if (cfg->arch.omit_fp)
                        // FIXME:
                        g_assert_not_reached ();
-               cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof (gpointer));
+               cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof(mgreg_t));
        }
 
        if (cfg->arch.omit_fp) {
                /* 
-                * On enter, the stack is misaligned by the the pushing of the return
+                * On enter, the stack is misaligned by the pushing of the return
                 * address. It is either made aligned by the pushing of %rbp, or by
                 * this.
                 */
                alloc_size = ALIGN_TO (cfg->stack_offset, 8);
-               if ((alloc_size % 16) == 0)
+               if ((alloc_size % 16) == 0) {
                        alloc_size += 8;
+                       /* Mark the padding slot as NOREF */
+                       mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset - sizeof (mgreg_t), SLOT_NOREF);
+               }
        } else {
                alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
-
+               if (cfg->stack_offset != alloc_size) {
+                       /* Mark the padding slot as NOREF */
+                       mini_gc_set_slot_type_from_fp (cfg, -alloc_size + cfg->param_area, SLOT_NOREF);
+               }
+               cfg->arch.sp_fp_offset = alloc_size;
                alloc_size -= pos;
        }
 
@@ -5800,7 +6420,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                if (G_UNLIKELY (required_code_size >= (cfg->code_size - offset))) {
                        while (required_code_size >= (cfg->code_size - offset))
                                cfg->code_size *= 2;
-                       cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+                       cfg->native_code = mono_realloc_native_code (cfg);
                        code = cfg->native_code + offset;
                        mono_jit_stats.code_reallocs++;
                }
@@ -5866,8 +6486,20 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8);
 
                amd64_cld (code);
+#if defined(__default_codegen__)
+               amd64_prefix (code, X86_REP_PREFIX);
+               amd64_stosl (code);
+#elif defined(__native_client_codegen__)
+               /* NaCl stos pseudo-instruction */
+               amd64_codegen_pre (code);
+               /* First, clear the upper 32 bits of RDI (mov %edi, %edi)  */
+               amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 4);
+               /* Add %r15 to %rdi using lea, condition flags unaffected. */
+               amd64_lea_memindex_size (code, AMD64_RDI, AMD64_R15, 0, AMD64_RDI, 0, 8);
                amd64_prefix (code, X86_REP_PREFIX);
                amd64_stosl (code);
+               amd64_codegen_post (code);
+#endif /* __native_client_codegen__ */
 
                amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8);
                amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
@@ -5895,7 +6527,9 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        case AMD64_R12: offset = G_STRUCT_OFFSET (MonoLMF, r12); break;
                        case AMD64_R13: offset = G_STRUCT_OFFSET (MonoLMF, r13); break;
                        case AMD64_R14: offset = G_STRUCT_OFFSET (MonoLMF, r14); break;
+#ifndef __native_client_codegen__
                        case AMD64_R15: offset = G_STRUCT_OFFSET (MonoLMF, r15); break;
+#endif
 #ifdef HOST_WIN32
                        case AMD64_RDI: offset = G_STRUCT_OFFSET (MonoLMF, rdi); break;
                        case AMD64_RSI: offset = G_STRUCT_OFFSET (MonoLMF, rsi); break;
@@ -5911,6 +6545,26 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                                        mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - (lmf_offset + offset)));
                        }
                }
+
+               /* These can't contain refs */
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF);
+
+               /* These are handled automatically by the stack marking code */
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), SLOT_NOREF);
+#ifdef HOST_WIN32
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), SLOT_NOREF);
+               mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), SLOT_NOREF);
+#endif
+
        }
 
        /* Save callee saved registers */
@@ -5924,6 +6578,10 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
                                amd64_mov_membase_reg (code, AMD64_RSP, save_area_offset, i, 8);
                                mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset));
+
+                               /* These are handled automatically by the stack marking code */
+                               mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF);
+
                                save_area_offset += 8;
                                async_exc_point (code);
                        }
@@ -5934,7 +6592,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET &&
                                (cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP));
 
-               amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, 8);
+               amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer));
        }
 
        /* compute max_length in order to use short forward jumps */
@@ -5949,8 +6607,22 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        /* max alignment for loops */
                        if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
                                max_length += LOOP_ALIGNMENT;
+#ifdef __native_client_codegen__
+                       /* max alignment for native client */
+                       max_length += kNaClAlignment;
+#endif
 
                        MONO_BB_FOR_EACH_INS (bb, ins) {
+#ifdef __native_client_codegen__
+                               {
+                                       int space_in_block = kNaClAlignment -
+                                               ((max_length + cfg->code_len) & kNaClAlignmentMask);
+                                       int max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
+                                       if (space_in_block < max_len && max_len < kNaClAlignment) {
+                                               max_length += space_in_block;
+                                       }
+                               }
+#endif  /*__native_client_codegen__*/
                                max_length += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
                        }
 
@@ -6002,13 +6674,13 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                                for (quad = 0; quad < 2; quad ++) {
                                        switch (ainfo->pair_storage [quad]) {
                                        case ArgInIReg:
-                                               amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
+                                               amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
                                                break;
                                        case ArgInFloatSSEReg:
-                                               amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
                                                break;
                                        case ArgInDoubleSSEReg:
-                                               amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
                                                break;
                                        case ArgNone:
                                                break;
@@ -6054,13 +6726,13 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                                for (quad = 0; quad < 2; quad ++) {
                                        switch (ainfo->pair_storage [quad]) {
                                        case ArgInIReg:
-                                               amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
+                                               amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
                                                break;
                                        case ArgInFloatSSEReg:
-                                               amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
                                                break;
                                        case ArgInDoubleSSEReg:
-                                               amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
                                                break;
                                        case ArgNone:
                                                break;
@@ -6188,13 +6860,13 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        }
 
                        /* Save lmf_addr */
-                       amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
+                       amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
                        /* Save previous_lmf */
-                       amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
-                       amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
+                       amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
+                       amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
                        /* Set new lmf */
                        amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
-                       amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
+                       amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
                }
        }
 
@@ -6305,7 +6977,7 @@ mono_arch_emit_epilog (MonoCompile *cfg)
 
        while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
                cfg->code_size *= 2;
-               cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+               cfg->native_code = mono_realloc_native_code (cfg);
                mono_jit_stats.code_reallocs++;
        }
 
@@ -6341,14 +7013,14 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                         * through the mono_lmf_addr TLS variable.
                         */
                        /* reg = previous_lmf */
-                       amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
+                       amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
                        x86_prefix (code, X86_FS_PREFIX);
                        amd64_mov_mem_reg (code, lmf_tls_offset, AMD64_R11, 8);
                } else {
                        /* Restore previous lmf */
-                       amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
-                       amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
-                       amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
+                       amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
+                       amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
+                       amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
                }
 
                /* Restore caller saved regs */
@@ -6368,7 +7040,11 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                        amd64_mov_reg_membase (code, AMD64_R14, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
                }
                if (cfg->used_int_regs & (1 << AMD64_R15)) {
+#if defined(__default_codegen__)
                        amd64_mov_reg_membase (code, AMD64_R15, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
+#elif defined(__native_client_codegen__)
+                       g_assert_not_reached();
+#endif
                }
 #ifdef HOST_WIN32
                if (cfg->used_int_regs & (1 << AMD64_RDI)) {
@@ -6392,10 +7068,10 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                else {
                        for (i = 0; i < AMD64_NREG; ++i)
                                if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
-                                       pos -= sizeof (gpointer);
+                                       pos -= sizeof(mgreg_t);
 
                        if (pos) {
-                               if (pos == - sizeof (gpointer)) {
+                               if (pos == - sizeof(mgreg_t)) {
                                        /* Only one register, so avoid lea */
                                        for (i = AMD64_NREG - 1; i > 0; --i)
                                                if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
@@ -6424,13 +7100,13 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                for (quad = 0; quad < 2; quad ++) {
                        switch (ainfo->pair_storage [quad]) {
                        case ArgInIReg:
-                               amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), sizeof (gpointer));
+                               amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)), sizeof(mgreg_t));
                                break;
                        case ArgInFloatSSEReg:
-                               amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
+                               amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)));
                                break;
                        case ArgInDoubleSSEReg:
-                               amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
+                               amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)));
                                break;
                        case ArgNone:
                                break;
@@ -6472,11 +7148,20 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                        code_size += 8 + 15; /* sizeof (double) + alignment */
                if (patch_info->type == MONO_PATCH_INFO_R4)
                        code_size += 4 + 15; /* sizeof (float) + alignment */
+               if (patch_info->type == MONO_PATCH_INFO_GC_CARD_TABLE_ADDR)
+                       code_size += 8 + 7; /*sizeof (void*) + alignment */
        }
 
+#ifdef __native_client_codegen__
+       /* Give us extra room on Native Client.  This could be   */
+       /* more carefully calculated, but bundle alignment makes */
+       /* it much trickier, so *2 like other places is good.    */
+       code_size *= 2;
+#endif
+
        while (cfg->code_len + code_size > (cfg->code_size - 16)) {
                cfg->code_size *= 2;
-               cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+               cfg->native_code = mono_realloc_native_code (cfg);
                mono_jit_stats.code_reallocs++;
        }
 
@@ -6516,7 +7201,7 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                                        exc_classes [nthrows] = exc_class;
                                        exc_throw_start [nthrows] = code;
                                }
-                               amd64_mov_reg_imm (code, AMD64_ARG_REG1, exc_class->type_token);
+                               amd64_mov_reg_imm (code, AMD64_ARG_REG1, exc_class->type_token - MONO_TOKEN_TYPE_DEF);
 
                                patch_info->type = MONO_PATCH_INFO_NONE;
 
@@ -6537,35 +7222,104 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                        /* do nothing */
                        break;
                }
+               g_assert(code < cfg->native_code + cfg->code_size);
        }
 
        /* Handle relocations with RIP relative addressing */
        for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
                gboolean remove = FALSE;
+               guint8 *orig_code = code;
 
                switch (patch_info->type) {
                case MONO_PATCH_INFO_R8:
                case MONO_PATCH_INFO_R4: {
-                       guint8 *pos;
+                       guint8 *pos, *patch_pos, *target_pos;
 
                        /* The SSE opcodes require a 16 byte alignment */
+#if defined(__default_codegen__)
                        code = (guint8*)ALIGN_TO (code, 16);
+#elif defined(__native_client_codegen__)
+                       {
+                               /* Pad this out with HLT instructions  */
+                               /* or we can get garbage bytes emitted */
+                               /* which will fail validation          */
+                               guint8 *aligned_code;
+                               /* extra align to make room for  */
+                               /* mov/push below                      */
+                               int extra_align = patch_info->type == MONO_PATCH_INFO_R8 ? 2 : 1;
+                               aligned_code = (guint8*)ALIGN_TO (code + extra_align, 16);
+                               /* The technique of hiding data in an  */
+                               /* instruction has a problem here: we  */
+                               /* need the data aligned to a 16-byte  */
+                               /* boundary but the instruction cannot */
+                               /* cross the bundle boundary. so only  */
+                               /* odd multiples of 16 can be used     */
+                               if ((intptr_t)aligned_code % kNaClAlignment == 0) {
+                                       aligned_code += 16;
+                               }
+                               while (code < aligned_code) {
+                                       *(code++) = 0xf4; /* hlt */
+                               }
+                       }       
+#endif
 
                        pos = cfg->native_code + patch_info->ip.i;
-
-                       if (IS_REX (pos [1]))
-                               *(guint32*)(pos + 5) = (guint8*)code - pos - 9;
-                       else
-                               *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
+                       if (IS_REX (pos [1])) {
+                               patch_pos = pos + 5;
+                               target_pos = code - pos - 9;
+                       }
+                       else {
+                               patch_pos = pos + 4;
+                               target_pos = code - pos - 8;
+                       }
 
                        if (patch_info->type == MONO_PATCH_INFO_R8) {
+#ifdef __native_client_codegen__
+                               /* Hide 64-bit data in a         */
+                               /* "mov imm64, r11" instruction. */
+                               /* write it before the start of  */
+                               /* the data*/
+                               *(code-2) = 0x49; /* prefix      */
+                               *(code-1) = 0xbb; /* mov X, %r11 */
+#endif
                                *(double*)code = *(double*)patch_info->data.target;
                                code += sizeof (double);
                        } else {
+#ifdef __native_client_codegen__
+                               /* Hide 32-bit data in a        */
+                               /* "push imm32" instruction.    */
+                               *(code-1) = 0x68; /* push */
+#endif
                                *(float*)code = *(float*)patch_info->data.target;
                                code += sizeof (float);
                        }
 
+                       *(guint32*)(patch_pos) = target_pos;
+
+                       remove = TRUE;
+                       break;
+               }
+               case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: {
+                       guint8 *pos;
+
+                       if (cfg->compile_aot)
+                               continue;
+
+                       /*loading is faster against aligned addresses.*/
+                       code = (guint8*)ALIGN_TO (code, 8);
+                       memset (orig_code, 0, code - orig_code);
+
+                       pos = cfg->native_code + patch_info->ip.i;
+
+                       /*alu_op [rex] modr/m imm32 - 7 or 8 bytes */
+                       if (IS_REX (pos [1]))
+                               *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
+                       else
+                               *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
+
+                       *(gpointer*)code = (gpointer)patch_info->data.target;
+                       code += sizeof (gpointer);
+
                        remove = TRUE;
                        break;
                }
@@ -6584,6 +7338,7 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                                tmp->next = patch_info->next;
                        }
                }
+               g_assert (code < cfg->native_code + cfg->code_size);
        }
 
        cfg->code_len = code - cfg->native_code;
@@ -6609,7 +7364,7 @@ mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean ena
                /* Allocate a new area on the stack and save arguments there */
                sig = mono_method_signature (cfg->method);
 
-               cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+               cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
 
                n = sig->param_count + sig->hasthis;
 
@@ -6901,123 +7656,56 @@ mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guin
        return can_write;
 }
 
-gpointer
-mono_arch_get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
-{
-       guint8 buf [10];
-       guint32 reg;
-       gint32 disp;
-       guint8 rex = 0;
-       MonoJitInfo *ji = NULL;
-
-#ifdef ENABLE_LLVM
-       /* code - 9 might be before the start of the method */
-       /* FIXME: Avoid this expensive call somehow */
-       ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
-#endif
-
-       mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 9, buf, sizeof (buf));
-       code = buf + 9;
-
-       *displacement = 0;
+#if defined(__native_client_codegen__)
+/* For membase calls, we want the base register. for Native Client,  */
+/* all indirect calls have the following sequence with the given sizes: */
+/* mov %eXX,%eXX                               [2-3]   */
+/* mov disp(%r15,%rXX,scale),%r11d             [4-8]   */
+/* and $0xffffffffffffffe0,%r11d               [4]     */
+/* add %r15,%r11                               [3]     */
+/* callq *%r11                                 [3]     */
 
-       code -= 7;
 
-       /* 
-        * A given byte sequence can match more than case here, so we have to be
-        * really careful about the ordering of the cases. Longer sequences
-        * come first.
-        * There are two types of calls:
-        * - direct calls: 0xff address_byte 8/32 bits displacement
-        * - indirect calls: nop nop nop <call>
-        * The nops make sure we don't confuse the instruction preceeding an indirect
-        * call with a direct call.
-        */
-       if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
-               /* call OFFSET(%rip) */
-               disp = *(guint32*)(code + 3);
-               return (gpointer*)(code + disp + 7);
-       } else if ((code [0] == 0xff) && (amd64_modrm_reg (code [1]) == 0x2) && (amd64_modrm_mod (code [1]) == 0x2) && (amd64_sib_index (code [2]) == 4) && (amd64_sib_scale (code [2]) == 0)) {
-               /* call *[reg+disp32] using indexed addressing */
-               /* The LLVM JIT emits this, and we emit it too for %r12 */
-               if (IS_REX (code [-1])) {
-                       rex = code [-1];
-                       g_assert (amd64_rex_x (rex) == 0);
-               }                       
-               reg = amd64_sib_base (code [2]);
-               disp = *(gint32*)(code + 3);
-       } else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
-               /* call *[reg+disp32] */
-               if (IS_REX (code [0]))
-                       rex = code [0];
-               reg = amd64_modrm_rm (code [2]);
-               disp = *(gint32*)(code + 3);
-               /* R10 is clobbered by the IMT thunk code */
-               g_assert (reg != AMD64_R10);
-       } else if (code [2] == 0xe8) {
-               /* call <ADDR> */
-               return NULL;
-       } else if ((code [3] == 0xff) && (amd64_modrm_reg (code [4]) == 0x2) && (amd64_modrm_mod (code [4]) == 0x1) && (amd64_sib_index (code [5]) == 4) && (amd64_sib_scale (code [5]) == 0)) {
-               /* call *[r12+disp8] using indexed addressing */
-               if (IS_REX (code [2]))
-                       rex = code [2];
-               reg = amd64_sib_base (code [5]);
-               disp = *(gint8*)(code + 6);
-       } else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
-               /* call *%reg */
-               return NULL;
-       } else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
-               /* call *[reg+disp8] */
-               if (IS_REX (code [3]))
-                       rex = code [3];
-               reg = amd64_modrm_rm (code [5]);
-               disp = *(gint8*)(code + 6);
-               //printf ("B: [%%r%d+0x%x]\n", reg, disp);
-       }
-       else if ((code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x0)) {
-               /* call *%reg */
-               if (IS_REX (code [4]))
-                       rex = code [4];
-               reg = amd64_modrm_rm (code [6]);
-               disp = 0;
-       }
-       else
-               g_assert_not_reached ();
-
-       reg += amd64_rex_b (rex);
-
-       /* R11 is clobbered by the trampoline code */
-       g_assert (reg != AMD64_R11);
+/* Determine if code points to a NaCl call-through-register sequence, */
+/* (i.e., the last 3 instructions listed above) */
+int
+is_nacl_call_reg_sequence(guint8* code)
+{
+       const char *sequence = "\x41\x83\xe3\xe0" /* and */
+                              "\x4d\x03\xdf"     /* add */
+                              "\x41\xff\xd3";   /* call */
+       return memcmp(code, sequence, 10) == 0;
+}
 
-       *displacement = disp;
-       return (gpointer)regs [reg];
+/* Determine if code points to the first opcode of the mov membase component */
+/* of an indirect call sequence (i.e. the first 2 instructions listed above) */
+/* (there could be a REX prefix before the opcode but it is ignored) */
+static int
+is_nacl_indirect_call_membase_sequence(guint8* code)
+{
+              /* Check for mov opcode, reg-reg addressing mode (mod = 3), */
+       return code[0] == 0x8b && amd64_modrm_mod(code[1]) == 3 &&
+              /* and that src reg = dest reg */
+              amd64_modrm_reg(code[1]) == amd64_modrm_rm(code[1]) &&
+              /* Check that next inst is mov, uses SIB byte (rm = 4), */
+              IS_REX(code[2]) &&
+              code[3] == 0x8b && amd64_modrm_rm(code[4]) == 4 &&
+              /* and has dst of r11 and base of r15 */
+              (amd64_modrm_reg(code[4]) + amd64_rex_r(code[2])) == AMD64_R11 &&
+              (amd64_sib_base(code[5]) + amd64_rex_b(code[2])) == AMD64_R15;
 }
+#endif /* __native_client_codegen__ */
 
 int
-mono_arch_get_this_arg_reg (MonoMethodSignature *sig, MonoGenericSharingContext *gsctx, guint8 *code)
+mono_arch_get_this_arg_reg (guint8 *code)
 {
-       int this_reg = AMD64_ARG_REG1;
-
-       if (MONO_TYPE_ISSTRUCT (sig->ret)) {
-               CallInfo *cinfo;
-
-               if (!gsctx && code)
-                       gsctx = mono_get_generic_context_from_code (code);
-
-               cinfo = get_call_info (gsctx, NULL, sig, FALSE);
-               
-               if (cinfo->ret.storage != ArgValuetypeInReg)
-                       this_reg = AMD64_ARG_REG2;
-               g_free (cinfo);
-       }
-
-       return this_reg;
+       return AMD64_ARG_REG1;
 }
 
 gpointer
-mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
+mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
 {
-       return (gpointer)regs [mono_arch_get_this_arg_reg (sig, gsctx, code)];
+       return (gpointer)regs [mono_arch_get_this_arg_reg (code)];
 }
 
 #define MAX_ARCH_DELEGATE_PARAMS 10
@@ -7061,11 +7749,25 @@ get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *cod
                g_assert ((code - start) < 64);
        }
 
+       nacl_global_codeman_validate(&start, 64, &code);
+
        mono_debug_add_delegate_trampoline (start, code - start);
 
        if (code_len)
                *code_len = code - start;
 
+
+       if (mono_jit_map_is_enabled ()) {
+               char *buff;
+               if (has_target)
+                       buff = (char*)"delegate_invoke_has_target";
+               else
+                       buff = g_strdup_printf ("delegate_invoke_no_target_%d", param_count);
+               mono_emit_jit_tramp (start, code - start, buff);
+               if (!has_target)
+                       g_free (buff);
+       }
+
        return start;
 }
 
@@ -7193,6 +7895,7 @@ mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
 
 #ifdef MONO_ARCH_HAVE_IMT
 
+#if defined(__default_codegen__)
 #define CMP_SIZE (6 + 1)
 #define CMP_REG_REG_SIZE (4 + 1)
 #define BR_SMALL_SIZE 2
@@ -7200,6 +7903,20 @@ mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
 #define MOV_REG_IMM_SIZE 10
 #define MOV_REG_IMM_32BIT_SIZE 6
 #define JUMP_REG_SIZE (2 + 1)
+#elif defined(__native_client_codegen__)
+/* NaCl N-byte instructions can be padded up to N-1 bytes */
+#define CMP_SIZE ((6 + 1) * 2 - 1)
+#define CMP_REG_REG_SIZE ((4 + 1) * 2 - 1)
+#define BR_SMALL_SIZE (2 * 2 - 1)
+#define BR_LARGE_SIZE (6 * 2 - 1)
+#define MOV_REG_IMM_SIZE (10 * 2 - 1)
+#define MOV_REG_IMM_32BIT_SIZE (6 * 2 - 1)
+/* Jump reg for NaCl adds a mask (+4) and add (+3) */
+#define JUMP_REG_SIZE ((2 + 1 + 4 + 3) * 2 - 1)
+/* Jump membase's size is large and unpredictable    */
+/* in native client, just pad it out a whole bundle. */
+#define JUMP_MEMBASE_SIZE (kNaClAlignment)
+#endif
 
 static int
 imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
@@ -7239,6 +7956,9 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                                item->chunk_size += MOV_REG_IMM_32BIT_SIZE;
                                        else
                                                item->chunk_size += MOV_REG_IMM_SIZE;
+#ifdef __native_client_codegen__
+                                       item->chunk_size += JUMP_MEMBASE_SIZE;
+#endif
                                }
                                item->chunk_size += BR_SMALL_SIZE + JUMP_REG_SIZE;
                        } else {
@@ -7254,6 +7974,9 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                        /* with assert below:
                                         * item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
                                         */
+#ifdef __native_client_codegen__
+                                       item->chunk_size += JUMP_MEMBASE_SIZE;
+#endif
                                }
                        }
                } else {
@@ -7266,10 +7989,16 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                }
                size += item->chunk_size;
        }
+#if defined(__native_client__) && defined(__native_client_codegen__)
+       /* In Native Client, we don't re-use thunks, allocate from the */
+       /* normal code manager paths. */
+       code = mono_domain_code_reserve (domain, size);
+#else
        if (fail_tramp)
                code = mono_method_alloc_generic_virtual_thunk (domain, size);
        else
                code = mono_domain_code_reserve (domain, size);
+#endif
        start = code;
        for (i = 0; i < count; ++i) {
                MonoIMTCheckItem *item = imt_entries [i];
@@ -7282,25 +8011,24 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                        if (amd64_is_imm32 (item->key))
                                                amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
                                        else {
-                                               amd64_mov_reg_imm (code, AMD64_R10, item->key);
-                                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+                                               amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+                                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
                                        }
                                }
                                item->jmp_code = code;
                                amd64_branch8 (code, X86_CC_NE, 0, FALSE);
-                               /* See the comment below about R10 */
                                if (item->has_target_code) {
-                                       amd64_mov_reg_imm (code, AMD64_R10, item->value.target_code);
-                                       amd64_jump_reg (code, AMD64_R10);
+                                       amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->value.target_code);
+                                       amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
                                } else {
-                                       amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
-                                       amd64_jump_membase (code, AMD64_R10, 0);
+                                       amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
+                                       amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
                                }
 
                                if (fail_case) {
                                        amd64_patch (item->jmp_code, code);
-                                       amd64_mov_reg_imm (code, AMD64_R10, fail_tramp);
-                                       amd64_jump_reg (code, AMD64_R10);
+                                       amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, fail_tramp);
+                                       amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
                                        item->jmp_code = NULL;
                                }
                        } else {
@@ -7309,33 +8037,33 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                if (amd64_is_imm32 (item->key))
                                        amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
                                else {
-                                       amd64_mov_reg_imm (code, AMD64_R10, item->key);
-                                       amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+                                       amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+                                       amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
                                }
                                item->jmp_code = code;
                                amd64_branch8 (code, X86_CC_NE, 0, FALSE);
                                /* See the comment below about R10 */
-                               amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
-                               amd64_jump_membase (code, AMD64_R10, 0);
+                               amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
+                               amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
                                amd64_patch (item->jmp_code, code);
                                amd64_breakpoint (code);
                                item->jmp_code = NULL;
 #else
-                               /* We're using R10 here because R11
+                               /* We're using R10 (MONO_ARCH_IMT_SCRATCH_REG) here because R11 (MONO_ARCH_IMT_REG)
                                   needs to be preserved.  R10 needs
                                   to be preserved for calls which
                                   require a runtime generic context,
                                   but interface calls don't. */
-                               amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
-                               amd64_jump_membase (code, AMD64_R10, 0);
+                               amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
+                               amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
 #endif
                        }
                } else {
                        if (amd64_is_imm32 (item->key))
                                amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
                        else {
-                               amd64_mov_reg_imm (code, AMD64_R10, item->key);
-                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+                               amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
                        }
                        item->jmp_code = code;
                        if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
@@ -7359,6 +8087,8 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                mono_stats.imt_thunks_size += code - start;
        g_assert (code - start <= size);
 
+       nacl_domain_code_validate(domain, &start, size, &code);
+
        return start;
 }
 
@@ -7500,6 +8230,59 @@ mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
        }
 }
 
+/*MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD*/
+gpointer
+mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
+{
+       int offset;
+       gpointer *sp, old_value;
+       char *bp;
+       const unsigned char *handler;
+
+       /*Decode the first instruction to figure out where did we store the spvar*/
+       /*Our jit MUST generate the following:
+        mov    %rsp, ?(%rbp)
+
+        Which is encoded as: REX.W 0x89 mod_rm
+        mod_rm (rsp, rbp, imm) which can be: (imm will never be zero)
+               mod (reg + imm8):  01 reg(rsp): 100 rm(rbp): 101 -> 01100101 (0x65)
+               mod (reg + imm32): 10 reg(rsp): 100 rm(rbp): 101 -> 10100101 (0xA5)
+
+       FIXME can we generate frameless methods on this case?
+
+       */
+       handler = clause->handler_start;
+
+       /*REX.W*/
+       if (*handler != 0x48)
+               return NULL;
+       ++handler;
+
+       /*mov r, r/m */
+       if (*handler != 0x89)
+               return NULL;
+       ++handler;
+
+       if (*handler == 0x65)
+               offset = *(signed char*)(handler + 1);
+       else if (*handler == 0xA5)
+               offset = *(int*)(handler + 1);
+       else
+               return NULL;
+
+       /*Load the spvar*/
+       bp = MONO_CONTEXT_GET_BP (ctx);
+       sp = *(gpointer*)(bp + offset);
+
+       old_value = *sp;
+       if (old_value < ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
+               return old_value;
+
+       *sp = new_value;
+
+       return old_value;
+}
+
 /*
  * mono_arch_emit_load_aotconst:
  *
@@ -7516,6 +8299,18 @@ mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, in
        return code;
 }
 
+/*
+ * mono_arch_get_trampolines:
+ *
+ *   Return a list of MonoTrampInfo structures describing arch specific trampolines
+ * for AOT.
+ */
+GSList *
+mono_arch_get_trampolines (gboolean aot)
+{
+       return mono_amd64_get_exception_trampolines (aot);
+}
+
 /* Soft Debug support */
 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED