Merge remote branch 'upstream/master'
[mono.git] / mono / mini / mini-amd64.c
index cb1edb8cdc0c53697b6c3021851f401c69fac76f..49c3a53c715324b74493490b6898225eaf745142 100644 (file)
@@ -205,11 +205,278 @@ amd64_is_near_call (guint8 *code)
        return code [0] == 0xe8;
 }
 
+#ifdef __native_client_codegen__
+
+/* Keep track of instruction "depth", that is, the level of sub-instruction */
+/* for any given instruction.  For instance, amd64_call_reg resolves to     */
+/* amd64_call_reg_internal, which uses amd64_alu_* macros, etc.             */
+/* We only want to force bundle alignment for the top level instruction,    */
+/* so NaCl pseudo-instructions can be implemented with sub instructions.    */
+static guint32 nacl_instruction_depth;
+
+static guint32 nacl_rex_tag;
+static guint32 nacl_legacy_prefix_tag;
+
+void
+amd64_nacl_clear_legacy_prefix_tag ()
+{
+       TlsSetValue (nacl_legacy_prefix_tag, NULL);
+}
+
+void
+amd64_nacl_tag_legacy_prefix (guint8* code)
+{
+       if (TlsGetValue (nacl_legacy_prefix_tag) == NULL)
+               TlsSetValue (nacl_legacy_prefix_tag, code);
+}
+
+void
+amd64_nacl_tag_rex (guint8* code)
+{
+       TlsSetValue (nacl_rex_tag, code);
+}
+
+guint8*
+amd64_nacl_get_legacy_prefix_tag ()
+{
+       return (guint8*)TlsGetValue (nacl_legacy_prefix_tag);
+}
+
+guint8*
+amd64_nacl_get_rex_tag ()
+{
+       return (guint8*)TlsGetValue (nacl_rex_tag);
+}
+
+/* Increment the instruction "depth" described above */
+void
+amd64_nacl_instruction_pre ()
+{
+       intptr_t depth = (intptr_t) TlsGetValue (nacl_instruction_depth);
+       depth++;
+       TlsSetValue (nacl_instruction_depth, (gpointer)depth);
+}
+
+/* amd64_nacl_instruction_post: Decrement instruction "depth", force bundle */
+/* alignment if depth == 0 (top level instruction)                          */
+/* IN: start, end    pointers to instruction beginning and end              */
+/* OUT: start, end   pointers to beginning and end after possible alignment */
+/* GLOBALS: nacl_instruction_depth     defined above                        */
+void
+amd64_nacl_instruction_post (guint8 **start, guint8 **end)
+{
+       intptr_t depth = (intptr_t) TlsGetValue(nacl_instruction_depth);
+       depth--;
+       TlsSetValue (nacl_instruction_depth, (void*)depth);
+
+       g_assert ( depth >= 0 );
+       if (depth == 0) {
+               uintptr_t space_in_block;
+               uintptr_t instlen;
+               guint8 *prefix = amd64_nacl_get_legacy_prefix_tag ();
+               /* if legacy prefix is present, and if it was emitted before */
+               /* the start of the instruction sequence, adjust the start   */
+               if (prefix != NULL && prefix < *start) {
+                       g_assert (*start - prefix <= 3);/* only 3 are allowed */
+                       *start = prefix;
+               }
+               space_in_block = kNaClAlignment - ((uintptr_t)(*start) & kNaClAlignmentMask);
+               instlen = (uintptr_t)(*end - *start);
+               /* Only check for instructions which are less than        */
+               /* kNaClAlignment. The only instructions that should ever */
+               /* be that long are call sequences, which are already     */
+               /* padded out to align the return to the next bundle.     */
+               if (instlen > space_in_block && instlen < kNaClAlignment) {
+                       const size_t MAX_NACL_INST_LENGTH = kNaClAlignment;
+                       guint8 copy_of_instruction[MAX_NACL_INST_LENGTH];
+                       const size_t length = (size_t)((*end)-(*start));
+                       g_assert (length < MAX_NACL_INST_LENGTH);
+                       
+                       memcpy (copy_of_instruction, *start, length);
+                       *start = mono_arch_nacl_pad (*start, space_in_block);
+                       memcpy (*start, copy_of_instruction, length);
+                       *end = *start + length;
+               }
+               amd64_nacl_clear_legacy_prefix_tag ();
+               amd64_nacl_tag_rex (NULL);
+       }
+}
+
+/* amd64_nacl_membase_handler: ensure all access to memory of the form      */
+/*   OFFSET(%rXX) is sandboxed.  For allowable base registers %rip, %rbp,   */
+/*   %rsp, and %r15, emit the membase as usual.  For all other registers,   */
+/*   make sure the upper 32-bits are cleared, and use that register in the  */
+/*   index field of a new address of this form: OFFSET(%r15,%eXX,1)         */
+/* IN:      code                                                            */
+/*             pointer to current instruction stream (in the                */
+/*             middle of an instruction, after opcode is emitted)           */
+/*          basereg/offset/dreg                                             */
+/*             operands of normal membase address                           */
+/* OUT:     code                                                            */
+/*             pointer to the end of the membase/memindex emit              */
+/* GLOBALS: nacl_rex_tag                                                    */
+/*             position in instruction stream that rex prefix was emitted   */
+/*          nacl_legacy_prefix_tag                                          */
+/*             (possibly NULL) position in instruction of legacy x86 prefix */
+void
+amd64_nacl_membase_handler (guint8** code, gint8 basereg, gint32 offset, gint8 dreg)
+{
+       gint8 true_basereg = basereg;
+
+       /* Cache these values, they might change  */
+       /* as new instructions are emitted below. */
+       guint8* rex_tag = amd64_nacl_get_rex_tag ();
+       guint8* legacy_prefix_tag = amd64_nacl_get_legacy_prefix_tag ();
+
+       /* 'basereg' is given masked to 0x7 at this point, so check */
+       /* the rex prefix to see if this is an extended register.   */
+       if ((rex_tag != NULL) && IS_REX(*rex_tag) && (*rex_tag & AMD64_REX_B)) {
+               true_basereg |= 0x8;
+       }
+
+#define X86_LEA_OPCODE (0x8D)
+
+       if (!amd64_is_valid_nacl_base (true_basereg) && (*(*code-1) != X86_LEA_OPCODE)) {
+               guint8* old_instruction_start;
+               
+               /* This will hold the 'mov %eXX, %eXX' that clears the upper */
+               /* 32-bits of the old base register (new index register)     */
+               guint8 buf[32];
+               guint8* buf_ptr = buf;
+               size_t insert_len;
+
+               g_assert (rex_tag != NULL);
+
+               if (IS_REX(*rex_tag)) {
+                       /* The old rex.B should be the new rex.X */
+                       if (*rex_tag & AMD64_REX_B) {
+                               *rex_tag |= AMD64_REX_X;
+                       }
+                       /* Since our new base is %r15 set rex.B */
+                       *rex_tag |= AMD64_REX_B;
+               } else {
+                       /* Shift the instruction by one byte  */
+                       /* so we can insert a rex prefix      */
+                       memmove (rex_tag + 1, rex_tag, (size_t)(*code - rex_tag));
+                       *code += 1;
+                       /* New rex prefix only needs rex.B for %r15 base */
+                       *rex_tag = AMD64_REX(AMD64_REX_B);
+               }
+
+               if (legacy_prefix_tag) {
+                       old_instruction_start = legacy_prefix_tag;
+               } else {
+                       old_instruction_start = rex_tag;
+               }
+               
+               /* Clears the upper 32-bits of the previous base register */
+               amd64_mov_reg_reg_size (buf_ptr, true_basereg, true_basereg, 4);
+               insert_len = buf_ptr - buf;
+               
+               /* Move the old instruction forward to make */
+               /* room for 'mov' stored in 'buf_ptr'       */
+               memmove (old_instruction_start + insert_len, old_instruction_start, (size_t)(*code - old_instruction_start));
+               *code += insert_len;
+               memcpy (old_instruction_start, buf, insert_len);
+
+               /* Sandboxed replacement for the normal membase_emit */
+               x86_memindex_emit (*code, dreg, AMD64_R15, offset, basereg, 0);
+               
+       } else {
+               /* Normal default behavior, emit membase memory location */
+               x86_membase_emit_body (*code, dreg, basereg, offset);
+       }
+}
+
+
+static inline unsigned char*
+amd64_skip_nops (unsigned char* code)
+{
+       guint8 in_nop;
+       do {
+               in_nop = 0;
+               if (   code[0] == 0x90) {
+                       in_nop = 1;
+                       code += 1;
+               }
+               if (   code[0] == 0x66 && code[1] == 0x90) {
+                       in_nop = 1;
+                       code += 2;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x00) {
+                       in_nop = 1;
+                       code += 3;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x40 && code[3] == 0x00) {
+                       in_nop = 1;
+                       code += 4;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x44 && code[3] == 0x00
+                && code[4] == 0x00) {
+                       in_nop = 1;
+                       code += 5;
+               }
+               if (code[0] == 0x66 && code[1] == 0x0f
+                && code[2] == 0x1f && code[3] == 0x44
+                && code[4] == 0x00 && code[5] == 0x00) {
+                       in_nop = 1;
+                       code += 6;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x80 && code[3] == 0x00
+                && code[4] == 0x00 && code[5] == 0x00
+                && code[6] == 0x00) {
+                       in_nop = 1;
+                       code += 7;
+               }
+               if (code[0] == 0x0f && code[1] == 0x1f
+                && code[2] == 0x84 && code[3] == 0x00
+                && code[4] == 0x00 && code[5] == 0x00
+                && code[6] == 0x00 && code[7] == 0x00) {
+                       in_nop = 1;
+                       code += 8;
+               }
+       } while ( in_nop );
+       return code;
+}
+
+guint8*
+mono_arch_nacl_skip_nops (guint8* code)
+{
+  return amd64_skip_nops(code);
+}
+
+#endif /*__native_client_codegen__*/
+
 static inline void 
 amd64_patch (unsigned char* code, gpointer target)
 {
        guint8 rex = 0;
 
+#ifdef __native_client_codegen__
+       code = amd64_skip_nops (code);
+#endif
+#if defined(__native_client_codegen__) && defined(__native_client__)
+       if (nacl_is_code_address (code)) {
+               /* For tail calls, code is patched after being installed */
+               /* but not through the normal "patch callsite" method.   */
+               unsigned char buf[kNaClAlignment];
+               unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
+               int ret;
+               memcpy (buf, aligned_code, kNaClAlignment);
+               /* Patch a temp buffer of bundle size, */
+               /* then install to actual location.    */
+               amd64_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), target);
+               ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
+               g_assert (ret == 0);
+               return;
+       }
+       target = nacl_modify_patch_target (target);
+#endif
+
        /* Skip REX */
        if ((code [0] >= 0x40) && (code [0] <= 0x4f)) {
                rex = code [0];
@@ -302,7 +569,9 @@ add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
 
     if (*gr >= PARAM_REGS) {
                ainfo->storage = ArgOnStack;
-               (*stack_size) += sizeof (gpointer);
+               /* Since the same stack slot size is used for all arg */
+               /*  types, it needs to be big enough to hold them all */
+               (*stack_size) += sizeof(mgreg_t);
     }
     else {
                ainfo->storage = ArgInIReg;
@@ -324,7 +593,9 @@ add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
 
     if (*gr >= FLOAT_PARAM_REGS) {
                ainfo->storage = ArgOnStack;
-               (*stack_size) += sizeof (gpointer);
+               /* Since the same stack slot size is used for both float */
+               /*  types, it needs to be big enough to hold them both */
+               (*stack_size) += sizeof(mgreg_t);
     }
     else {
                /* A double register */
@@ -419,6 +690,32 @@ merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
 
        return class1;
 }
+#ifdef __native_client_codegen__
+const guint kNaClAlignment = kNaClAlignmentAMD64;
+const guint kNaClAlignmentMask = kNaClAlignmentMaskAMD64;
+
+/* Default alignment for Native Client is 32-byte. */
+gint8 nacl_align_byte = -32; /* signed version of 0xe0 */
+
+/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code,  */
+/* Check that alignment doesn't cross an alignment boundary.             */
+guint8*
+mono_arch_nacl_pad(guint8 *code, int pad)
+{
+       const int kMaxPadding = 8; /* see amd64-codegen.h:amd64_padding_size() */
+
+       if (pad == 0) return code;
+       /* assertion: alignment cannot cross a block boundary */
+       g_assert (((uintptr_t)code & (~kNaClAlignmentMask)) ==
+                (((uintptr_t)code + pad - 1) & (~kNaClAlignmentMask)));
+       while (pad >= kMaxPadding) {
+               amd64_padding (code, kMaxPadding);
+               pad -= kMaxPadding;
+       }
+       if (pad != 0) amd64_padding (code, pad);
+       return code;
+}
+#endif
 
 static void
 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
@@ -426,6 +723,9 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
                           guint32 *gr, guint32 *fr, guint32 *stack_size)
 {
        guint32 size, quad, nquads, i;
+       /* Keep track of the size used in each quad so we can */
+       /* use the right size when copying args/return vars.  */
+       guint32 quadsize [2] = {8, 8};
        ArgumentClass args [2];
        MonoMarshalType *info = NULL;
        MonoClass *klass;
@@ -454,6 +754,24 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
        }
 #endif
 
+       /* If this struct can't be split up naturally into 8-byte */
+       /* chunks (registers), pass it on the stack.              */
+       if (sig->pinvoke && !pass_on_stack) {
+               info = mono_marshal_load_type_info (klass);
+               g_assert(info);
+               guint32 align;
+               guint32 field_size;
+               for (i = 0; i < info->num_fields; ++i) {
+                       field_size = mono_marshal_type_size (info->fields [i].field->type, 
+                                                          info->fields [i].mspec, 
+                                                          &align, TRUE, klass->unicode);
+                       if ((info->fields [i].offset < 8) && (info->fields [i].offset + field_size) > 8) {
+                               pass_on_stack = TRUE;
+                               break;
+                       }
+               }
+       }
+
        if (pass_on_stack) {
                /* Allways pass in memory */
                ainfo->offset = *stack_size;
@@ -553,6 +871,10 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
                                if ((quad == 1) && (info->fields [i].offset < 8))
                                        continue;
 
+                               /* How far into this quad this data extends.*/
+                               /* (8 is size of quad) */
+                               quadsize [quad] = info->fields [i].offset + size - (quad * 8);
+
                                class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
                        }
                        g_assert (class1 != ARG_CLASS_NO_CLASS);
@@ -590,7 +912,9 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
                                if (*fr >= FLOAT_PARAM_REGS)
                                        args [quad] = ARG_CLASS_MEMORY;
                                else {
-                                       ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
+                                       if (quadsize[quad] <= 4)
+                                               ainfo->pair_storage [quad] = ArgInFloatSSEReg;
+                                       else ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
                                        ainfo->pair_regs [quad] = *fr;
                                        (*fr) ++;
                                }
@@ -611,7 +935,7 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
                        if (sig->pinvoke)
                                *stack_size += ALIGN_TO (info->native_size, 8);
                        else
-                               *stack_size += nquads * sizeof (gpointer);
+                               *stack_size += nquads * sizeof(mgreg_t);
                        ainfo->storage = ArgOnStack;
                }
        }
@@ -910,6 +1234,9 @@ mono_amd64_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSigna
 static int 
 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
 {
+#if defined(MONO_CROSS_COMPILE)
+       return 0;
+#else
 #ifndef _MSC_VER
        __asm__ __volatile__ ("cpuid"
                : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
@@ -923,6 +1250,7 @@ cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
        *p_edx = info[3];
 #endif
        return 1;
+#endif
 }
 
 /*
@@ -956,6 +1284,12 @@ mono_arch_init (void)
        int flags;
 
        InitializeCriticalSection (&mini_arch_mutex);
+#if defined(__native_client_codegen__)
+       nacl_instruction_depth = TlsAlloc ();
+       TlsSetValue (nacl_instruction_depth, (gpointer)0);
+       nacl_rex_tag = TlsAlloc ();
+       nacl_legacy_prefix_tag = TlsAlloc ();
+#endif
 
 #ifdef MONO_ARCH_NOMAP32BIT
        flags = MONO_MMAP_READ;
@@ -988,6 +1322,11 @@ void
 mono_arch_cleanup (void)
 {
        DeleteCriticalSection (&mini_arch_mutex);
+#if defined(__native_client_codegen__)
+       TlsFree (nacl_instruction_depth);
+       TlsFree (nacl_rex_tag);
+       TlsFree (nacl_legacy_prefix_tag);
+#endif
 }
 
 /*
@@ -1119,6 +1458,13 @@ mono_arch_compute_omit_fp (MonoCompile *cfg)
        cfg->arch.omit_fp = TRUE;
        cfg->arch.omit_fp_computed = TRUE;
 
+#ifdef __native_client_codegen__
+       /* NaCl modules may not change the value of RBP, so it cannot be */
+       /* used as a normal register, but it can be used as a frame pointer*/
+       cfg->disable_omit_fp = TRUE;
+       cfg->arch.omit_fp = FALSE;
+#endif
+
        if (cfg->disable_omit_fp)
                cfg->arch.omit_fp = FALSE;
 
@@ -1175,7 +1521,9 @@ mono_arch_get_global_int_regs (MonoCompile *cfg)
                regs = g_list_prepend (regs, (gpointer)AMD64_R12);
                regs = g_list_prepend (regs, (gpointer)AMD64_R13);
                regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+#ifndef __native_client_codegen__
                regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+#endif
  
                regs = g_list_prepend (regs, (gpointer)AMD64_R10);
                regs = g_list_prepend (regs, (gpointer)AMD64_R9);
@@ -1194,7 +1542,9 @@ mono_arch_get_global_int_regs (MonoCompile *cfg)
                regs = g_list_prepend (regs, (gpointer)AMD64_R12);
                regs = g_list_prepend (regs, (gpointer)AMD64_R13);
                regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+#ifndef __native_client_codegen__
                regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+#endif
 #ifdef HOST_WIN32
                regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
                regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
@@ -1230,7 +1580,9 @@ mono_arch_get_iregs_clobbered_by_call (MonoCallInst *call)
                regs = g_list_prepend (regs, (gpointer)AMD64_R12);
                regs = g_list_prepend (regs, (gpointer)AMD64_R13);
                regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+#ifndef __native_client_codegen__
                regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+#endif
 
                regs = g_list_prepend (regs, (gpointer)AMD64_R10);
                regs = g_list_prepend (regs, (gpointer)AMD64_R9);
@@ -1431,7 +1783,7 @@ mono_arch_allocate_vars (MonoCompile *cfg)
                /* Reserve space for caller saved registers */
                for (i = 0; i < AMD64_NREG; ++i)
                        if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
-                               offset += sizeof (gpointer);
+                               offset += sizeof(mgreg_t);
                        }
        }
 
@@ -1560,12 +1912,12 @@ mono_arch_allocate_vars (MonoCompile *cfg)
                                        ins->opcode = OP_REGOFFSET;
                                        ins->inst_basereg = cfg->frame_reg;
                                        /* These arguments are saved to the stack in the prolog */
-                                       offset = ALIGN_TO (offset, sizeof (gpointer));
+                                       offset = ALIGN_TO (offset, sizeof(mgreg_t));
                                        if (cfg->arch.omit_fp) {
                                                ins->inst_offset = offset;
-                                               offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (gpointer) : sizeof (gpointer);
+                                               offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
                                        } else {
-                                               offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (gpointer) : sizeof (gpointer);
+                                               offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
                                                ins->inst_offset = - offset;
                                        }
                                        break;
@@ -1637,14 +1989,14 @@ mono_arch_allocate_vars (MonoCompile *cfg)
                                ins->opcode = OP_REGOFFSET;
                                ins->inst_basereg = cfg->frame_reg;
                                /* These arguments are saved to the stack in the prolog */
-                               offset = ALIGN_TO (offset, sizeof (gpointer));
+                               offset = ALIGN_TO (offset, sizeof(mgreg_t));
                                if (cfg->arch.omit_fp) {
                                        ins->inst_offset = offset;
-                                       offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (gpointer) : sizeof (gpointer);
+                                       offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
                                        // Arguments are yet supported by the stack map creation code
                                        //cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset);
                                } else {
-                                       offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (gpointer) : sizeof (gpointer);
+                                       offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
                                        ins->inst_offset = - offset;
                                        //cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset);
                                }
@@ -1740,7 +2092,11 @@ arg_storage_to_load_membase (ArgStorage storage)
 {
        switch (storage) {
        case ArgInIReg:
+#if defined(__mono_ilp32__)
+               return OP_LOADI8_MEMBASE;
+#else
                return OP_LOAD_MEMBASE;
+#endif
        case ArgInDoubleSSEReg:
                return OP_LOADR8_MEMBASE;
        case ArgInFloatSSEReg:
@@ -2149,7 +2505,7 @@ mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
 
                        MONO_INST_NEW (cfg, load, arg_storage_to_load_membase (ainfo->pair_storage [part]));
                        load->inst_basereg = src->dreg;
-                       load->inst_offset = part * sizeof (gpointer);
+                       load->inst_offset = part * sizeof(mgreg_t);
 
                        switch (ainfo->pair_storage [part]) {
                        case ArgInIReg:
@@ -2366,6 +2722,15 @@ mono_arch_dyn_call_free (MonoDynCallInfo *info)
        g_free (ainfo);
 }
 
+#if !defined(__native_client__)
+#define PTR_TO_GREG(ptr) (mgreg_t)(ptr)
+#define GREG_TO_PTR(greg) (gpointer)(greg)
+#else
+/* Correctly handle casts to/from 32-bit pointers without compiler warnings */
+#define PTR_TO_GREG(ptr) (mgreg_t)(uintptr_t)(ptr)
+#define GREG_TO_PTR(greg) (gpointer)(guint32)(greg)
+#endif
+
 /*
  * mono_arch_get_start_dyn_call:
  *
@@ -2398,20 +2763,20 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g
        pindex = 0;
 
        if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
-               p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
+               p->regs [greg ++] = PTR_TO_GREG(*(args [arg_index ++]));
                if (!sig->hasthis)
                        pindex = 1;
        }
 
        if (dinfo->cinfo->vtype_retaddr)
-               p->regs [greg ++] = (mgreg_t)ret;
+               p->regs [greg ++] = PTR_TO_GREG(ret);
 
        for (i = pindex; i < sig->param_count; i++) {
                MonoType *t = mono_type_get_underlying_type (sig->params [i]);
                gpointer *arg = args [arg_index ++];
 
                if (t->byref) {
-                       p->regs [greg ++] = (mgreg_t)*(arg);
+                       p->regs [greg ++] = PTR_TO_GREG(*(arg));
                        continue;
                }
 
@@ -2424,11 +2789,20 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g
                case MONO_TYPE_PTR:
                case MONO_TYPE_I:
                case MONO_TYPE_U:
+#if !defined(__mono_ilp32__)
                case MONO_TYPE_I8:
                case MONO_TYPE_U8:
+#endif
                        g_assert (dinfo->cinfo->args [i + sig->hasthis].reg == param_regs [greg]);
-                       p->regs [greg ++] = (mgreg_t)*(arg);
+                       p->regs [greg ++] = PTR_TO_GREG(*(arg));
                        break;
+#if defined(__mono_ilp32__)
+               case MONO_TYPE_I8:
+               case MONO_TYPE_U8:
+                       g_assert (dinfo->cinfo->args [i + sig->hasthis].reg == param_regs [greg]);
+                       p->regs [greg ++] = *(guint64*)(arg);
+                       break;
+#endif
                case MONO_TYPE_BOOLEAN:
                case MONO_TYPE_U1:
                        p->regs [greg ++] = *(guint8*)(arg);
@@ -2451,7 +2825,7 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g
                        break;
                case MONO_TYPE_GENERICINST:
                    if (MONO_TYPE_IS_REFERENCE (t)) {
-                               p->regs [greg ++] = (mgreg_t)*(arg);
+                               p->regs [greg ++] = PTR_TO_GREG(*(arg));
                                break;
                        } else {
                                /* Fall through */
@@ -2507,7 +2881,7 @@ mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
        case MONO_TYPE_I:
        case MONO_TYPE_U:
        case MONO_TYPE_PTR:
-               *(gpointer*)ret = (gpointer)res;
+               *(gpointer*)ret = GREG_TO_PTR(res);
                break;
        case MONO_TYPE_I1:
                *(gint8*)ret = res;
@@ -2537,7 +2911,7 @@ mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
                break;
        case MONO_TYPE_GENERICINST:
                if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
-                       *(gpointer*)ret = (gpointer)res;
+                       *(gpointer*)ret = GREG_TO_PTR(res);
                        break;
                } else {
                        /* Fall through */
@@ -2690,8 +3064,10 @@ emit_call_body (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointe
                         * not span cache lines. This is required for code patching to work on SMP
                         * systems.
                         */
-                       if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0)
-                               amd64_padding (code, 4 - ((guint32)(code + 1 - cfg->native_code) % 4));
+                       if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0) {
+                               guint32 pad_size = 4 - ((guint32)(code + 1 - cfg->native_code) % 4);
+                               amd64_padding (code, pad_size);
+                       }
                        mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
                        amd64_call_code (code, 0);
                }
@@ -2948,8 +3324,13 @@ mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                                ins->sreg2 = temp->dreg;
                        }
                        break;
+#ifndef __mono_ilp32__
                case OP_LOAD_MEMBASE:
+#endif
                case OP_LOADI8_MEMBASE:
+#ifndef __native_client_codegen__
+               /*  Don't generate memindex opcodes (to simplify */
+               /*  read sandboxing) */
                        if (!amd64_is_imm32 (ins->inst_offset)) {
                                NEW_INS (cfg, ins, temp, OP_I8CONST);
                                temp->inst_c0 = ins->inst_offset;
@@ -2957,8 +3338,11 @@ mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                                ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
                                ins->inst_indexreg = temp->dreg;
                        }
+#endif
                        break;
+#ifndef __mono_ilp32__
                case OP_STORE_MEMBASE_IMM:
+#endif
                case OP_STOREI8_MEMBASE_IMM:
                        if (!amd64_is_imm32 (ins->inst_imm)) {
                                NEW_INS (cfg, ins, temp, OP_I8CONST);
@@ -3110,8 +3494,20 @@ mono_emit_stack_alloc (MonoCompile *cfg, guchar *code, MonoInst* tree)
                if (cfg->param_area && cfg->arch.no_pushes)
                        amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area);
                amd64_cld (code);
+#if defined(__default_codegen__)
+               amd64_prefix (code, X86_REP_PREFIX);
+               amd64_stosl (code);
+#elif defined(__native_client_codegen__)
+               /* NaCl stos pseudo-instruction */
+               amd64_codegen_pre(code);
+               /* First, clear the upper 32 bits of RDI (mov %edi, %edi)  */
+               amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 4);
+               /* Add %r15 to %rdi using lea, condition flags unaffected. */
+               amd64_lea_memindex_size (code, AMD64_RDI, AMD64_R15, 0, AMD64_RDI, 0, 8);
                amd64_prefix (code, X86_REP_PREFIX);
                amd64_stosl (code);
+               amd64_codegen_post(code);
+#endif /* __native_client_codegen__ */
                
                if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
                        amd64_pop_reg (code, AMD64_RDI);
@@ -3163,12 +3559,12 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
 
                        /* Load the destination address */
                        g_assert (loc->opcode == OP_REGOFFSET);
-                       amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, 8);
+                       amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, sizeof(gpointer));
 
                        for (quad = 0; quad < 2; quad ++) {
                                switch (cinfo->ret.pair_storage [quad]) {
                                case ArgInIReg:
-                                       amd64_mov_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad], 8);
+                                       amd64_mov_membase_reg (code, AMD64_RCX, (quad * sizeof(mgreg_t)), cinfo->ret.pair_regs [quad], sizeof(mgreg_t));
                                        break;
                                case ArgInFloatSSEReg:
                                        amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
@@ -3244,6 +3640,15 @@ amd64_pop_reg (code, AMD64_RAX);
 
 #ifndef DISABLE_JIT
 
+#if defined(__native_client__) || defined(__native_client_codegen__)
+void mono_nacl_gc()
+{
+#ifdef __native_client_gc__
+       __nacl_suspend_thread_if_needed();
+#endif
+}
+#endif
+
 void
 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 {
@@ -3277,6 +3682,21 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                }
        }
 
+#if defined(__native_client_codegen__)
+       /* For Native Client, all indirect call/jump targets must be */
+       /* 32-byte aligned.  Exception handler blocks are jumped to  */
+       /* indirectly as well.                                       */
+       gboolean bb_needs_alignment = (bb->flags & BB_INDIRECT_JUMP_TARGET) ||
+                                     (bb->flags & BB_EXCEPTION_HANDLER);
+
+       if ( bb_needs_alignment && ((cfg->code_len & kNaClAlignmentMask) != 0)) {
+               int pad = kNaClAlignment - (cfg->code_len & kNaClAlignmentMask);
+               if (pad != kNaClAlignment) code = mono_arch_nacl_pad(code, pad);
+               cfg->code_len += pad;
+               bb->native_offset = cfg->code_len;
+       }
+#endif  /*__native_client_codegen__*/
+
        if (cfg->verbose_level > 2)
                g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
 
@@ -3302,9 +3722,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 
                max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
 
-               if (G_UNLIKELY (offset > (cfg->code_size - max_len - 16))) {
+#define EXTRA_CODE_SPACE (NACL_SIZE (16, 16 + kNaClAlignment))
+
+               if (G_UNLIKELY (offset > (cfg->code_size - max_len - EXTRA_CODE_SPACE))) {
                        cfg->code_size *= 2;
-                       cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+                       cfg->native_code = mono_realloc_native_code(cfg);
                        code = cfg->native_code + offset;
                        mono_jit_stats.code_reallocs++;
                }
@@ -3337,7 +3759,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_STOREI2_MEMBASE_REG:
                        amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
                        break;
+               /* In AMD64 NaCl, pointers are 4 bytes, */
+               /*  so STORE_* != STOREI8_*. Likewise below. */
                case OP_STORE_MEMBASE_REG:
+                       amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, sizeof(gpointer));
+                       break;
                case OP_STOREI8_MEMBASE_REG:
                        amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
                        break;
@@ -3345,15 +3771,32 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
                        break;
                case OP_STORE_MEMBASE_IMM:
+#ifndef __native_client_codegen__
+                       /* In NaCl, this could be a PCONST type, which could */
+                       /* mean a pointer type was copied directly into the  */
+                       /* lower 32-bits of inst_imm, so for InvalidPtr==-1  */
+                       /* the value would be 0x00000000FFFFFFFF which is    */
+                       /* not proper for an imm32 unless you cast it.       */
+                       g_assert (amd64_is_imm32 (ins->inst_imm));
+#endif
+                       amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, (gint32)ins->inst_imm, sizeof(gpointer));
+                       break;
                case OP_STOREI8_MEMBASE_IMM:
                        g_assert (amd64_is_imm32 (ins->inst_imm));
                        amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
                        break;
                case OP_LOAD_MEM:
+#ifdef __mono_ilp32__
+                       /* In ILP32, pointers are 4 bytes, so separate these */
+                       /* cases, use literal 8 below where we really want 8 */
+                       amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+                       amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, sizeof(gpointer));
+                       break;
+#endif
                case OP_LOADI8_MEM:
                        // FIXME: Decompose this earlier
                        if (amd64_is_imm32 (ins->inst_imm))
-                               amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, sizeof (gpointer));
+                               amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8);
                        else {
                                amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
                                amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8);
@@ -3377,13 +3820,20 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, FALSE);
                        break;
                case OP_LOADU2_MEM:
+                       /* For NaCl, pointers are 4 bytes, so separate these */
+                       /* cases, use literal 8 below where we really want 8 */
                        amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
                        amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, TRUE);
                        break;
                case OP_LOAD_MEMBASE:
+                       g_assert (amd64_is_imm32 (ins->inst_offset));
+                       amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof(gpointer));
+                       break;
                case OP_LOADI8_MEMBASE:
+                       /* Use literal 8 instead of sizeof pointer or */
+                       /* register, we really want 8 for this opcode */
                        g_assert (amd64_is_imm32 (ins->inst_offset));
-                       amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
+                       amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 8);
                        break;
                case OP_LOADI4_MEMBASE:
                        amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
@@ -4071,14 +4521,14 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case OP_AOTCONST:
                        mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
-                       amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, 8);
+                       amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, sizeof(gpointer));
                        break;
                case OP_JUMP_TABLE:
                        mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
                        amd64_mov_reg_imm_size (code, ins->dreg, 0, 8);
                        break;
                case OP_MOVE:
-                       amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
+                       amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof(mgreg_t));
                        break;
                case OP_AMD64_SET_XMMREG_R4: {
                        amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
@@ -4116,20 +4566,20 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        else {
                                for (i = 0; i < AMD64_NREG; ++i)
                                        if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
-                                               pos -= sizeof (gpointer);
+                                               pos -= sizeof(mgreg_t);
 
                                /* Restore callee-saved registers */
                                for (i = AMD64_NREG - 1; i > 0; --i) {
                                        if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
-                                               amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
-                                               pos += 8;
+                                               amd64_mov_reg_membase (code, i, AMD64_RBP, pos, sizeof(mgreg_t));
+                                               pos += sizeof(mgreg_t);
                                        }
                                }
 
                                /* Copy arguments on the stack to our argument area */
-                               for (i = 0; i < call->stack_usage; i += 8) {
-                                       amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i, 8);
-                                       amd64_mov_membase_reg (code, AMD64_RBP, 16 + i, AMD64_RAX, 8);
+                               for (i = 0; i < call->stack_usage; i += sizeof(mgreg_t)) {
+                                       amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i, sizeof(mgreg_t));
+                                       amd64_mov_membase_reg (code, AMD64_RBP, 16 + i, AMD64_RAX, sizeof(mgreg_t));
                                }
                        
                                if (pos)
@@ -4155,7 +4605,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case OP_ARGLIST: {
                        amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, cfg->sig_cookie);
-                       amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
+                       amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, sizeof(gpointer));
                        break;
                }
                case OP_CALL:
@@ -4278,7 +4728,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 
                        /* Set argument registers */
                        for (i = 0; i < PARAM_REGS; ++i)
-                               amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof (gpointer), 8);
+                               amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof(mgreg_t), sizeof(mgreg_t));
                        
                        /* Make the call */
                        amd64_call_reg (code, AMD64_R10);
@@ -4403,8 +4853,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
                        break;
                case OP_START_HANDLER: {
+                       /* Even though we're saving RSP, use sizeof */
+                       /* gpointer because spvar is of type IntPtr */
+                       /* see: mono_create_spvar_for_region */
                        MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
-                       amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, 8);
+                       amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, sizeof(gpointer));
 
                        if ((MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY) ||
                                 MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY)) &&
@@ -4415,13 +4868,13 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                }
                case OP_ENDFINALLY: {
                        MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
-                       amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, 8);
+                       amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
                        amd64_ret (code);
                        break;
                }
                case OP_ENDFILTER: {
                        MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
-                       amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, 8);
+                       amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
                        /* The local allocator will put the result into RAX */
                        amd64_ret (code);
                        break;
@@ -5677,6 +6130,12 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
                        break;
                }
+               case OP_NACL_GC_SAFE_POINT: {
+#if defined(__native_client_codegen__)
+                       code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
+#endif
+                       break;
+               }
                case OP_GC_LIVENESS_DEF:
                case OP_GC_LIVENESS_USE:
                case OP_GC_PARAM_SLOT_LIVENESS_DEF:
@@ -5692,9 +6151,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                }
 
                if ((code - cfg->native_code - offset) > max_len) {
+#if !defined(__native_client_codegen__)
                        g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
                                   mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
                        g_assert_not_reached ();
+#endif
                }
               
                last_ins = ins;
@@ -5824,10 +6285,27 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        gint32 lmf_offset = cfg->arch.lmf_offset;
        gboolean args_clobbered = FALSE;
        gboolean trace = FALSE;
+#ifdef __native_client_codegen__
+       guint alignment_check;
+#endif
 
        cfg->code_size =  MAX (cfg->header->code_size * 4, 10240);
 
+#if defined(__default_codegen__)
        code = cfg->native_code = g_malloc (cfg->code_size);
+#elif defined(__native_client_codegen__)
+       /* native_code_alloc is not 32-byte aligned, native_code is. */
+       cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
+
+       /* Align native_code to next nearest kNaclAlignment byte. */
+       cfg->native_code = (uintptr_t)cfg->native_code_alloc + kNaClAlignment;
+       cfg->native_code = (uintptr_t)cfg->native_code & ~kNaClAlignmentMask;
+
+       code = cfg->native_code;
+
+       alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
+       g_assert (alignment_check == 0);
+#endif
 
        if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
                trace = TRUE;
@@ -5873,7 +6351,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                /* These are handled automatically by the stack marking code */
                mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
                
-               amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
+               amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
                mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP);
                async_exc_point (code);
 #ifdef HOST_WIN32
@@ -5888,7 +6366,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                for (i = 0; i < AMD64_NREG; ++i)
                        if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
                                amd64_push_reg (code, i);
-                               pos += sizeof (gpointer);
+                               pos += 8; /* AMD64 push inst is always 8 bytes, no way to change it */
                                offset += 8;
                                mono_emit_unwind_op_offset (cfg, code, i, - offset);
                                async_exc_point (code);
@@ -5904,7 +6382,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                if (cfg->arch.omit_fp)
                        // FIXME:
                        g_assert_not_reached ();
-               cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof (gpointer));
+               cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof(mgreg_t));
        }
 
        if (cfg->arch.omit_fp) {
@@ -5942,7 +6420,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                if (G_UNLIKELY (required_code_size >= (cfg->code_size - offset))) {
                        while (required_code_size >= (cfg->code_size - offset))
                                cfg->code_size *= 2;
-                       cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+                       cfg->native_code = mono_realloc_native_code (cfg);
                        code = cfg->native_code + offset;
                        mono_jit_stats.code_reallocs++;
                }
@@ -6008,8 +6486,20 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8);
 
                amd64_cld (code);
+#if defined(__default_codegen__)
+               amd64_prefix (code, X86_REP_PREFIX);
+               amd64_stosl (code);
+#elif defined(__native_client_codegen__)
+               /* NaCl stos pseudo-instruction */
+               amd64_codegen_pre (code);
+               /* First, clear the upper 32 bits of RDI (mov %edi, %edi)  */
+               amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 4);
+               /* Add %r15 to %rdi using lea, condition flags unaffected. */
+               amd64_lea_memindex_size (code, AMD64_RDI, AMD64_R15, 0, AMD64_RDI, 0, 8);
                amd64_prefix (code, X86_REP_PREFIX);
                amd64_stosl (code);
+               amd64_codegen_post (code);
+#endif /* __native_client_codegen__ */
 
                amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8);
                amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
@@ -6037,7 +6527,9 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        case AMD64_R12: offset = G_STRUCT_OFFSET (MonoLMF, r12); break;
                        case AMD64_R13: offset = G_STRUCT_OFFSET (MonoLMF, r13); break;
                        case AMD64_R14: offset = G_STRUCT_OFFSET (MonoLMF, r14); break;
+#ifndef __native_client_codegen__
                        case AMD64_R15: offset = G_STRUCT_OFFSET (MonoLMF, r15); break;
+#endif
 #ifdef HOST_WIN32
                        case AMD64_RDI: offset = G_STRUCT_OFFSET (MonoLMF, rdi); break;
                        case AMD64_RSI: offset = G_STRUCT_OFFSET (MonoLMF, rsi); break;
@@ -6100,7 +6592,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET &&
                                (cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP));
 
-               amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, 8);
+               amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer));
        }
 
        /* compute max_length in order to use short forward jumps */
@@ -6115,8 +6607,22 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        /* max alignment for loops */
                        if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
                                max_length += LOOP_ALIGNMENT;
+#ifdef __native_client_codegen__
+                       /* max alignment for native client */
+                       max_length += kNaClAlignment;
+#endif
 
                        MONO_BB_FOR_EACH_INS (bb, ins) {
+#ifdef __native_client_codegen__
+                               {
+                                       int space_in_block = kNaClAlignment -
+                                               ((max_length + cfg->code_len) & kNaClAlignmentMask);
+                                       int max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
+                                       if (space_in_block < max_len && max_len < kNaClAlignment) {
+                                               max_length += space_in_block;
+                                       }
+                               }
+#endif  /*__native_client_codegen__*/
                                max_length += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
                        }
 
@@ -6168,13 +6674,13 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                                for (quad = 0; quad < 2; quad ++) {
                                        switch (ainfo->pair_storage [quad]) {
                                        case ArgInIReg:
-                                               amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
+                                               amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
                                                break;
                                        case ArgInFloatSSEReg:
-                                               amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
                                                break;
                                        case ArgInDoubleSSEReg:
-                                               amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
                                                break;
                                        case ArgNone:
                                                break;
@@ -6220,13 +6726,13 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                                for (quad = 0; quad < 2; quad ++) {
                                        switch (ainfo->pair_storage [quad]) {
                                        case ArgInIReg:
-                                               amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
+                                               amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
                                                break;
                                        case ArgInFloatSSEReg:
-                                               amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
                                                break;
                                        case ArgInDoubleSSEReg:
-                                               amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
                                                break;
                                        case ArgNone:
                                                break;
@@ -6354,13 +6860,13 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        }
 
                        /* Save lmf_addr */
-                       amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
+                       amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
                        /* Save previous_lmf */
-                       amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
-                       amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
+                       amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
+                       amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
                        /* Set new lmf */
                        amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
-                       amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
+                       amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
                }
        }
 
@@ -6471,7 +6977,7 @@ mono_arch_emit_epilog (MonoCompile *cfg)
 
        while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
                cfg->code_size *= 2;
-               cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+               cfg->native_code = mono_realloc_native_code (cfg);
                mono_jit_stats.code_reallocs++;
        }
 
@@ -6507,14 +7013,14 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                         * through the mono_lmf_addr TLS variable.
                         */
                        /* reg = previous_lmf */
-                       amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
+                       amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
                        x86_prefix (code, X86_FS_PREFIX);
                        amd64_mov_mem_reg (code, lmf_tls_offset, AMD64_R11, 8);
                } else {
                        /* Restore previous lmf */
-                       amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
-                       amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
-                       amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
+                       amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
+                       amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
+                       amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
                }
 
                /* Restore caller saved regs */
@@ -6534,7 +7040,11 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                        amd64_mov_reg_membase (code, AMD64_R14, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
                }
                if (cfg->used_int_regs & (1 << AMD64_R15)) {
+#if defined(__default_codegen__)
                        amd64_mov_reg_membase (code, AMD64_R15, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
+#elif defined(__native_client_codegen__)
+                       g_assert_not_reached();
+#endif
                }
 #ifdef HOST_WIN32
                if (cfg->used_int_regs & (1 << AMD64_RDI)) {
@@ -6558,10 +7068,10 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                else {
                        for (i = 0; i < AMD64_NREG; ++i)
                                if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
-                                       pos -= sizeof (gpointer);
+                                       pos -= sizeof(mgreg_t);
 
                        if (pos) {
-                               if (pos == - sizeof (gpointer)) {
+                               if (pos == - sizeof(mgreg_t)) {
                                        /* Only one register, so avoid lea */
                                        for (i = AMD64_NREG - 1; i > 0; --i)
                                                if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
@@ -6590,13 +7100,13 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                for (quad = 0; quad < 2; quad ++) {
                        switch (ainfo->pair_storage [quad]) {
                        case ArgInIReg:
-                               amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), sizeof (gpointer));
+                               amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)), sizeof(mgreg_t));
                                break;
                        case ArgInFloatSSEReg:
-                               amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
+                               amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)));
                                break;
                        case ArgInDoubleSSEReg:
-                               amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
+                               amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)));
                                break;
                        case ArgNone:
                                break;
@@ -6642,9 +7152,16 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                        code_size += 8 + 7; /*sizeof (void*) + alignment */
        }
 
+#ifdef __native_client_codegen__
+       /* Give us extra room on Native Client.  This could be   */
+       /* more carefully calculated, but bundle alignment makes */
+       /* it much trickier, so *2 like other places is good.    */
+       code_size *= 2;
+#endif
+
        while (cfg->code_len + code_size > (cfg->code_size - 16)) {
                cfg->code_size *= 2;
-               cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+               cfg->native_code = mono_realloc_native_code (cfg);
                mono_jit_stats.code_reallocs++;
        }
 
@@ -6705,6 +7222,7 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                        /* do nothing */
                        break;
                }
+               g_assert(code < cfg->native_code + cfg->code_size);
        }
 
        /* Handle relocations with RIP relative addressing */
@@ -6715,27 +7233,69 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                switch (patch_info->type) {
                case MONO_PATCH_INFO_R8:
                case MONO_PATCH_INFO_R4: {
-                       guint8 *pos;
+                       guint8 *pos, *patch_pos, *target_pos;
 
                        /* The SSE opcodes require a 16 byte alignment */
+#if defined(__default_codegen__)
                        code = (guint8*)ALIGN_TO (code, 16);
-                       memset (orig_code, 0, code - orig_code);
+#elif defined(__native_client_codegen__)
+                       {
+                               /* Pad this out with HLT instructions  */
+                               /* or we can get garbage bytes emitted */
+                               /* which will fail validation          */
+                               guint8 *aligned_code;
+                               /* extra align to make room for  */
+                               /* mov/push below                      */
+                               int extra_align = patch_info->type == MONO_PATCH_INFO_R8 ? 2 : 1;
+                               aligned_code = (guint8*)ALIGN_TO (code + extra_align, 16);
+                               /* The technique of hiding data in an  */
+                               /* instruction has a problem here: we  */
+                               /* need the data aligned to a 16-byte  */
+                               /* boundary but the instruction cannot */
+                               /* cross the bundle boundary. so only  */
+                               /* odd multiples of 16 can be used     */
+                               if ((intptr_t)aligned_code % kNaClAlignment == 0) {
+                                       aligned_code += 16;
+                               }
+                               while (code < aligned_code) {
+                                       *(code++) = 0xf4; /* hlt */
+                               }
+                       }       
+#endif
 
                        pos = cfg->native_code + patch_info->ip.i;
-
-                       if (IS_REX (pos [1]))
-                               *(guint32*)(pos + 5) = (guint8*)code - pos - 9;
-                       else
-                               *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
+                       if (IS_REX (pos [1])) {
+                               patch_pos = pos + 5;
+                               target_pos = code - pos - 9;
+                       }
+                       else {
+                               patch_pos = pos + 4;
+                               target_pos = code - pos - 8;
+                       }
 
                        if (patch_info->type == MONO_PATCH_INFO_R8) {
+#ifdef __native_client_codegen__
+                               /* Hide 64-bit data in a         */
+                               /* "mov imm64, r11" instruction. */
+                               /* write it before the start of  */
+                               /* the data*/
+                               *(code-2) = 0x49; /* prefix      */
+                               *(code-1) = 0xbb; /* mov X, %r11 */
+#endif
                                *(double*)code = *(double*)patch_info->data.target;
                                code += sizeof (double);
                        } else {
+#ifdef __native_client_codegen__
+                               /* Hide 32-bit data in a        */
+                               /* "push imm32" instruction.    */
+                               *(code-1) = 0x68; /* push */
+#endif
                                *(float*)code = *(float*)patch_info->data.target;
                                code += sizeof (float);
                        }
 
+                       *(guint32*)(patch_pos) = target_pos;
+
                        remove = TRUE;
                        break;
                }
@@ -6778,6 +7338,7 @@ mono_arch_emit_exceptions (MonoCompile *cfg)
                                tmp->next = patch_info->next;
                        }
                }
+               g_assert (code < cfg->native_code + cfg->code_size);
        }
 
        cfg->code_len = code - cfg->native_code;
@@ -7095,6 +7656,46 @@ mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guin
        return can_write;
 }
 
+#if defined(__native_client_codegen__)
+/* For membase calls, we want the base register. for Native Client,  */
+/* all indirect calls have the following sequence with the given sizes: */
+/* mov %eXX,%eXX                               [2-3]   */
+/* mov disp(%r15,%rXX,scale),%r11d             [4-8]   */
+/* and $0xffffffffffffffe0,%r11d               [4]     */
+/* add %r15,%r11                               [3]     */
+/* callq *%r11                                 [3]     */
+
+
+/* Determine if code points to a NaCl call-through-register sequence, */
+/* (i.e., the last 3 instructions listed above) */
+int
+is_nacl_call_reg_sequence(guint8* code)
+{
+       const char *sequence = "\x41\x83\xe3\xe0" /* and */
+                              "\x4d\x03\xdf"     /* add */
+                              "\x41\xff\xd3";   /* call */
+       return memcmp(code, sequence, 10) == 0;
+}
+
+/* Determine if code points to the first opcode of the mov membase component */
+/* of an indirect call sequence (i.e. the first 2 instructions listed above) */
+/* (there could be a REX prefix before the opcode but it is ignored) */
+static int
+is_nacl_indirect_call_membase_sequence(guint8* code)
+{
+              /* Check for mov opcode, reg-reg addressing mode (mod = 3), */
+       return code[0] == 0x8b && amd64_modrm_mod(code[1]) == 3 &&
+              /* and that src reg = dest reg */
+              amd64_modrm_reg(code[1]) == amd64_modrm_rm(code[1]) &&
+              /* Check that next inst is mov, uses SIB byte (rm = 4), */
+              IS_REX(code[2]) &&
+              code[3] == 0x8b && amd64_modrm_rm(code[4]) == 4 &&
+              /* and has dst of r11 and base of r15 */
+              (amd64_modrm_reg(code[4]) + amd64_rex_r(code[2])) == AMD64_R11 &&
+              (amd64_sib_base(code[5]) + amd64_rex_b(code[2])) == AMD64_R15;
+}
+#endif /* __native_client_codegen__ */
+
 int
 mono_arch_get_this_arg_reg (guint8 *code)
 {
@@ -7148,6 +7749,8 @@ get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *cod
                g_assert ((code - start) < 64);
        }
 
+       nacl_global_codeman_validate(&start, 64, &code);
+
        mono_debug_add_delegate_trampoline (start, code - start);
 
        if (code_len)
@@ -7292,6 +7895,7 @@ mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
 
 #ifdef MONO_ARCH_HAVE_IMT
 
+#if defined(__default_codegen__)
 #define CMP_SIZE (6 + 1)
 #define CMP_REG_REG_SIZE (4 + 1)
 #define BR_SMALL_SIZE 2
@@ -7299,6 +7903,20 @@ mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
 #define MOV_REG_IMM_SIZE 10
 #define MOV_REG_IMM_32BIT_SIZE 6
 #define JUMP_REG_SIZE (2 + 1)
+#elif defined(__native_client_codegen__)
+/* NaCl N-byte instructions can be padded up to N-1 bytes */
+#define CMP_SIZE ((6 + 1) * 2 - 1)
+#define CMP_REG_REG_SIZE ((4 + 1) * 2 - 1)
+#define BR_SMALL_SIZE (2 * 2 - 1)
+#define BR_LARGE_SIZE (6 * 2 - 1)
+#define MOV_REG_IMM_SIZE (10 * 2 - 1)
+#define MOV_REG_IMM_32BIT_SIZE (6 * 2 - 1)
+/* Jump reg for NaCl adds a mask (+4) and add (+3) */
+#define JUMP_REG_SIZE ((2 + 1 + 4 + 3) * 2 - 1)
+/* Jump membase's size is large and unpredictable    */
+/* in native client, just pad it out a whole bundle. */
+#define JUMP_MEMBASE_SIZE (kNaClAlignment)
+#endif
 
 static int
 imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
@@ -7338,6 +7956,9 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                                item->chunk_size += MOV_REG_IMM_32BIT_SIZE;
                                        else
                                                item->chunk_size += MOV_REG_IMM_SIZE;
+#ifdef __native_client_codegen__
+                                       item->chunk_size += JUMP_MEMBASE_SIZE;
+#endif
                                }
                                item->chunk_size += BR_SMALL_SIZE + JUMP_REG_SIZE;
                        } else {
@@ -7353,6 +7974,9 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                        /* with assert below:
                                         * item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
                                         */
+#ifdef __native_client_codegen__
+                                       item->chunk_size += JUMP_MEMBASE_SIZE;
+#endif
                                }
                        }
                } else {
@@ -7365,10 +7989,16 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                }
                size += item->chunk_size;
        }
+#if defined(__native_client__) && defined(__native_client_codegen__)
+       /* In Native Client, we don't re-use thunks, allocate from the */
+       /* normal code manager paths. */
+       code = mono_domain_code_reserve (domain, size);
+#else
        if (fail_tramp)
                code = mono_method_alloc_generic_virtual_thunk (domain, size);
        else
                code = mono_domain_code_reserve (domain, size);
+#endif
        start = code;
        for (i = 0; i < count; ++i) {
                MonoIMTCheckItem *item = imt_entries [i];
@@ -7381,24 +8011,24 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                        if (amd64_is_imm32 (item->key))
                                                amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
                                        else {
-                                               amd64_mov_reg_imm (code, AMD64_R11, item->key);
-                                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
+                                               amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+                                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
                                        }
                                }
                                item->jmp_code = code;
                                amd64_branch8 (code, X86_CC_NE, 0, FALSE);
                                if (item->has_target_code) {
-                                       amd64_mov_reg_imm (code, AMD64_R11, item->value.target_code);
-                                       amd64_jump_reg (code, AMD64_R11);
+                                       amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->value.target_code);
+                                       amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
                                } else {
-                                       amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
-                                       amd64_jump_membase (code, AMD64_R11, 0);
+                                       amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
+                                       amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
                                }
 
                                if (fail_case) {
                                        amd64_patch (item->jmp_code, code);
-                                       amd64_mov_reg_imm (code, AMD64_R11, fail_tramp);
-                                       amd64_jump_reg (code, AMD64_R11);
+                                       amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, fail_tramp);
+                                       amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
                                        item->jmp_code = NULL;
                                }
                        } else {
@@ -7407,27 +8037,33 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                if (amd64_is_imm32 (item->key))
                                        amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
                                else {
-                                       amd64_mov_reg_imm (code, AMD64_R11, item->key);
-                                       amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
+                                       amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+                                       amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
                                }
                                item->jmp_code = code;
                                amd64_branch8 (code, X86_CC_NE, 0, FALSE);
-                               amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
-                               amd64_jump_membase (code, AMD64_R11, 0);
+                               /* See the comment below about R10 */
+                               amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
+                               amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
                                amd64_patch (item->jmp_code, code);
                                amd64_breakpoint (code);
                                item->jmp_code = NULL;
 #else
-                               amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
-                               amd64_jump_membase (code, AMD64_R11, 0);
+                               /* We're using R10 (MONO_ARCH_IMT_SCRATCH_REG) here because R11 (MONO_ARCH_IMT_REG)
+                                  needs to be preserved.  R10 needs
+                                  to be preserved for calls which
+                                  require a runtime generic context,
+                                  but interface calls don't. */
+                               amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
+                               amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
 #endif
                        }
                } else {
                        if (amd64_is_imm32 (item->key))
                                amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
                        else {
-                               amd64_mov_reg_imm (code, AMD64_R11, item->key);
-                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
+                               amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
                        }
                        item->jmp_code = code;
                        if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
@@ -7451,6 +8087,8 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                mono_stats.imt_thunks_size += code - start;
        g_assert (code - start <= size);
 
+       nacl_domain_code_validate(domain, &start, size, &code);
+
        return start;
 }