Merge mono/io-layer, mono/metadata, mono/arch/x86 and configure.in for Native Client
authorElijah Taylor <elijahtaylor@google.com>
Wed, 15 Dec 2010 00:03:45 +0000 (16:03 -0800)
committerElijah Taylor <elijahtaylor@google.com>
Wed, 15 Dec 2010 00:03:45 +0000 (16:03 -0800)
configure.in
mono/arch/x86/x86-codegen.h
mono/io-layer/atomic.h
mono/io-layer/posix.c
mono/metadata/assembly.c
mono/metadata/boehm-gc.c
mono/metadata/domain-internals.h
mono/metadata/domain.c
mono/metadata/object.c

index 0f0cd253308b2474ba9b011da8d6fd1d1e4fd294..4a9809bce9d14524ded4b46500d0a16d2dca8bce 100644 (file)
@@ -1980,6 +1980,7 @@ dnl ***  NaCl  ***
 dnl **************
 
 AC_ARG_ENABLE(nacl_codegen, [  --enable-nacl-codegen      Enable Native Client code generation], enable_nacl_codegen=$enableval, enable_nacl_codegen=no)
+AC_ARG_ENABLE(nacl_gc, [  --enable-nacl-gc           Enable Native Client garbage collection], enable_nacl_gc=$enableval, enable_nacl_gc=no)
 
 AM_CONDITIONAL(NACL_CODEGEN, test x$enable_nacl_codegen != xno)
 if test "x$enable_nacl_codegen" = "xyes"; then
@@ -1988,6 +1989,10 @@ if test "x$enable_nacl_codegen" = "xyes"; then
    AC_DEFINE(TARGET_NACL, 1, [...])
 else
    MONO_NACL_ALIGN_MASK_OFF=0
+   CPPFLAGS="$CPPFLAGS -D__default_codegen__"
+fi
+if test "x$enable_nacl_gc" = "xyes"; then
+   CPPFLAGS="$CPPFLAGS -finstrument-for-thread-suspension -D__native_client_gc__"
 fi
 AC_SUBST(MONO_NACL_ALIGN_MASK_OFF)
 
@@ -2291,6 +2296,13 @@ if test "x$host" != "x$target"; then
                sizeof_register=8
                target_byte_order=G_BIG_ENDIAN
                ;;
+   x86_64-*-nacl)
+               TARGET=AMD64
+               arch_target=amd64
+               AC_DEFINE(TARGET_AMD64, 1, [...])
+               AC_DEFINE(MONO_CROSS_COMPILE,1,[The runtime is compiled for cross-compiling mode])
+               sizeof_register=8
+               ;;
        *)
                AC_MSG_WARN([Cross compiling is only supported for targets matching 'powerpc64-{ps3,xbox360}-linux-gnu'])
        esac
@@ -2548,6 +2560,10 @@ case "x$gc" in
                if test x$TARGET = xSPARC -o x$TARGET = xSPARC64; then
                        LIBGC_CPPFLAGS=`echo $LIBGC_CPPFLAGS | sed -e 's/-D_FILE_OFFSET_BITS=64//g'`
                fi
+               # Don't pass -finstrument-for-thread-suspension in, 
+               # if these are instrumented it will be very bad news 
+               # (infinite recursion, undefined parking behavior, etc)
+               LIBGC_CPPFLAGS=`echo $LIBGC_CPPFLAGS | sed -e 's/-finstrument-for-thread-suspension//g'`
                ac_configure_args="$ac_configure_args --disable-embed-check --with-libgc-threads=$libgc_threads $libgc_configure_args \"CPPFLAGS_FOR_LIBGC=$LIBGC_CPPFLAGS\" \"CFLAGS_FOR_LIBGC=$CFLAGS_FOR_LIBGC\""
                AC_CONFIG_SUBDIRS(libgc)
                ;;
index af3e3c6f5580f05afa3d1f1528a870daf0f69de7..6ca3695c7e1c8063ca35b9d3741ee5a14fe4e025 100644 (file)
@@ -17,9 +17,7 @@
 #include <assert.h>
 
 #ifdef __native_client_codegen__
-#define kNaClAlignment 32
-#define kNaClAlignmentMask (kNaClAlignment - 1)
-extern guint8 nacl_align_byte;
+extern gint8 nacl_align_byte;
 #endif /* __native_client_codegen__ */
 
 
@@ -28,15 +26,10 @@ extern guint8 nacl_align_byte;
 #define x86_call_sequence_pre(inst) guint8* _code_start = (inst);
 #define x86_call_sequence_post(inst) \
   (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
-#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
-#define x86_call_sequence_post_val(inst) \
-  (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
 #else
 #define x86_codegen_pre(inst_ptr_ptr, inst_len) do {} while (0)
-#define x86_call_sequence_pre(inst)
-#define x86_call_sequence_post(inst)
-#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
-#define x86_call_sequence_post_val(inst) _code_start
+#define x86_call_sequence_pre(inst) guint8* _code_start = (inst);
+#define x86_call_sequence_post(inst) _code_start
 #endif  /* __native_client_codegen__ */
 
 
@@ -305,7 +298,7 @@ typedef union {
 
 #define kMaxMembaseEmitPadding 6
 
-#define x86_membase_emit(inst,r,basereg,disp)  do {\
+#define x86_membase_emit_body(inst,r,basereg,disp)     do {\
        if ((basereg) == X86_ESP) {     \
                if ((disp) == 0) {      \
                        x86_address_byte ((inst), 0, (r), X86_ESP);     \
@@ -334,6 +327,18 @@ typedef union {
        }       \
        } while (0)
 
+#if defined(__native_client_codegen__) && defined(TARGET_AMD64)
+#define x86_membase_emit(inst,r,basereg,disp) \
+       do { \
+               amd64_nacl_membase_handler(&(inst), (basereg), (disp), (r)) ; \
+       } while (0)
+#else /* __default_codegen__ || 32-bit NaCl codegen */
+#define x86_membase_emit(inst,r,basereg,disp) \
+       do { \
+               x86_membase_emit_body((inst),(r),(basereg),(disp)); \
+       } while (0)
+#endif
+
 #define kMaxMemindexEmitPadding 6
 
 #define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift)  \
@@ -351,7 +356,7 @@ typedef union {
                        x86_imm_emit8 ((inst), (disp)); \
                } else {        \
                        x86_address_byte ((inst), 2, (r), 4);   \
-                       x86_address_byte ((inst), (shift), (indexreg), 5);      \
+                       x86_address_byte ((inst), (shift), (indexreg), (basereg));      \
                        x86_imm_emit32 ((inst), (disp));        \
                }       \
        } while (0)
@@ -438,12 +443,23 @@ typedef union {
     } while ( in_nop );  \
   } while (0)
 
+#if defined(__native_client__)
 #define x86_patch(ins,target) \
   do { \
     unsigned char* inst = (ins); \
+    guint8* new_target = nacl_modify_patch_target((target)); \
     x86_skip_nops((inst)); \
-    x86_do_patch((inst), (target)); \
+    x86_do_patch((inst), new_target); \
   } while (0)
+#else /* __native_client__ */
+#define x86_patch(ins,target) \
+  do { \
+    unsigned char* inst = (ins); \
+    guint8* new_target = (target); \
+    x86_skip_nops((inst)); \
+    x86_do_patch((inst), new_target); \
+  } while (0)
+#endif /* __native_client__ */
 
 #else
 #define x86_patch(ins,target) do { x86_do_patch((ins), (target)); } while (0)
@@ -472,6 +488,13 @@ typedef union {
 #define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0)
 #define x86_movsd(inst) x86_movsl((inst))
 
+#if defined(__default_codegen__)
+#define x86_prefix(inst,p) \
+       do { \
+               *(inst)++ =(unsigned char) (p); \
+       } while (0)
+#elif defined(__native_client_codegen__)
+#if defined(TARGET_X86)
 /* kNaClAlignment - 1 is the max value we can pass into x86_codegen_pre. */
 /* This keeps us from having to call x86_codegen_pre with specific       */
 /* knowledge of the size of the instruction that follows it, and         */
@@ -481,6 +504,18 @@ typedef union {
                x86_codegen_pre(&(inst), kNaClAlignment - 1); \
                *(inst)++ =(unsigned char) (p); \
        } while (0)
+#elif defined(TARGET_AMD64)
+/* We need to tag any prefixes so we can perform proper membase sandboxing */
+/* See: mini-amd64.c:amd64_nacl_membase_handler for verbose details        */
+#define x86_prefix(inst,p) \
+       do { \
+               amd64_nacl_tag_legacy_prefix((inst)); \
+               *(inst)++ =(unsigned char) (p); \
+       } while (0)
+
+#endif /* TARGET_AMD64 */
+
+#endif /* __native_client_codegen__ */
 
 #define x86_rdtsc(inst) \
        do {    \
@@ -1041,7 +1076,7 @@ typedef union {
                x86_codegen_pre(&(inst), 7); \
                switch ((size)) {       \
                case 1: *(inst)++ = (unsigned char)0x88; break; \
-               case 2: *(inst)++ = (unsigned char)0x66; /* fall through */     \
+               case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */      \
                case 4: *(inst)++ = (unsigned char)0x89; break; \
                default: assert (0);    \
                }       \
@@ -1053,7 +1088,7 @@ typedef union {
                x86_codegen_pre(&(inst), 3); \
                switch ((size)) {       \
                case 1: *(inst)++ = (unsigned char)0x88; break; \
-               case 2: *(inst)++ = (unsigned char)0x66; /* fall through */     \
+               case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */      \
                case 4: *(inst)++ = (unsigned char)0x89; break; \
                default: assert (0);    \
                }       \
@@ -1065,7 +1100,7 @@ typedef union {
                x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
                switch ((size)) {       \
                case 1: *(inst)++ = (unsigned char)0x88; break; \
-               case 2: *(inst)++ = (unsigned char)0x66; /* fall through */     \
+               case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */      \
                case 4: *(inst)++ = (unsigned char)0x89; break; \
                default: assert (0);    \
                }       \
@@ -1077,7 +1112,7 @@ typedef union {
                x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
                switch ((size)) {       \
                case 1: *(inst)++ = (unsigned char)0x88; break; \
-               case 2: *(inst)++ = (unsigned char)0x66; /* fall through */     \
+               case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */      \
                case 4: *(inst)++ = (unsigned char)0x89; break; \
                default: assert (0);    \
                }       \
@@ -1089,7 +1124,7 @@ typedef union {
                x86_codegen_pre(&(inst), 3); \
                switch ((size)) {       \
                case 1: *(inst)++ = (unsigned char)0x8a; break; \
-               case 2: *(inst)++ = (unsigned char)0x66; /* fall through */     \
+               case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */      \
                case 4: *(inst)++ = (unsigned char)0x8b; break; \
                default: assert (0);    \
                }       \
@@ -1101,7 +1136,7 @@ typedef union {
                x86_codegen_pre(&(inst), 7); \
                switch ((size)) {       \
                case 1: *(inst)++ = (unsigned char)0x8a; break; \
-               case 2: *(inst)++ = (unsigned char)0x66; /* fall through */     \
+               case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */      \
                case 4: *(inst)++ = (unsigned char)0x8b; break; \
                default: assert (0);    \
                }       \
@@ -1115,7 +1150,7 @@ typedef union {
                x86_codegen_pre(&(inst), kMovRegMembasePadding); \
                switch ((size)) {       \
                case 1: *(inst)++ = (unsigned char)0x8a; break; \
-               case 2: *(inst)++ = (unsigned char)0x66; /* fall through */     \
+               case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */      \
                case 4: *(inst)++ = (unsigned char)0x8b; break; \
                default: assert (0);    \
                }       \
@@ -1127,7 +1162,7 @@ typedef union {
                x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
                switch ((size)) {       \
                case 1: *(inst)++ = (unsigned char)0x8a; break; \
-               case 2: *(inst)++ = (unsigned char)0x66; /* fall through */     \
+               case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */      \
                case 4: *(inst)++ = (unsigned char)0x8b; break; \
                default: assert (0);    \
                }       \
@@ -1155,7 +1190,7 @@ typedef union {
                        x86_imm_emit8 ((inst), (imm));  \
                } else if ((size) == 2) {       \
                        x86_codegen_pre(&(inst), 9); \
-                       *(inst)++ = (unsigned char)0x66;        \
+                       x86_prefix((inst), X86_OPERAND_PREFIX); \
                        *(inst)++ = (unsigned char)0xc7;        \
                        x86_mem_emit ((inst), 0, (mem));        \
                        x86_imm_emit16 ((inst), (imm)); \
@@ -1176,7 +1211,7 @@ typedef union {
                        x86_imm_emit8 ((inst), (imm));  \
                } else if ((size) == 2) {       \
                        x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \
-                       *(inst)++ = (unsigned char)0x66;        \
+                       x86_prefix((inst), X86_OPERAND_PREFIX); \
                        *(inst)++ = (unsigned char)0xc7;        \
                        x86_membase_emit ((inst), 0, (basereg), (disp));        \
                        x86_imm_emit16 ((inst), (imm)); \
@@ -1197,7 +1232,7 @@ typedef union {
                        x86_imm_emit8 ((inst), (imm));  \
                } else if ((size) == 2) {       \
                        x86_codegen_pre(&(inst), 4 + kMaxMemindexEmitPadding); \
-                       *(inst)++ = (unsigned char)0x66;        \
+                       x86_prefix((inst), X86_OPERAND_PREFIX); \
                        *(inst)++ = (unsigned char)0xc7;        \
                        x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift));  \
                        x86_imm_emit16 ((inst), (imm)); \
@@ -1681,6 +1716,7 @@ typedef union {
                x86_imm_emit8 ((inst), (imm));  \
        } while (0)
 
+#if defined(TARGET_X86)
 #define x86_jump32(inst,imm)   \
        do {    \
                x86_codegen_pre(&(inst), 5); \
@@ -1694,9 +1730,27 @@ typedef union {
                *(inst)++ = (unsigned char)0xeb;        \
                x86_imm_emit8 ((inst), (imm));  \
        } while (0)
+#elif defined(TARGET_AMD64)
+/* These macros are used directly from mini-amd64.c and other      */
+/* amd64 specific files, so they need to be instrumented directly. */
+#define x86_jump32(inst,imm)   \
+       do {    \
+               amd64_codegen_pre(inst); \
+               *(inst)++ = (unsigned char)0xe9;        \
+               x86_imm_emit32 ((inst), (imm)); \
+               amd64_codegen_post(inst); \
+       } while (0)
 
+#define x86_jump8(inst,imm)    \
+       do {    \
+               amd64_codegen_pre(inst); \
+               *(inst)++ = (unsigned char)0xeb;        \
+               x86_imm_emit8 ((inst), (imm));  \
+               amd64_codegen_post(inst); \
+       } while (0)
+#endif
 
-#ifdef __native_client_codegen__
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
 #define x86_jump_reg(inst,reg) do {    \
     x86_codegen_pre(&(inst), 5);                       \
     *(inst)++ = (unsigned char)0x83;  /* and */                \
@@ -1747,7 +1801,7 @@ typedef union {
 /*
  * target is a pointer in our buffer.
  */
-#define x86_jump_code(inst,target)     \
+#define x86_jump_code_body(inst,target)        \
        do {    \
                int t; \
                x86_codegen_pre(&(inst), 2); \
@@ -1761,6 +1815,31 @@ typedef union {
                }       \
        } while (0)
 
+#if defined(__default_codegen__) 
+#define x86_jump_code(inst,target) \
+       do { \
+               x86_jump_code_body((inst),(target)); \
+       } while (0)
+#elif defined(__native_client_codegen__) && defined(TARGET_X86)
+#define x86_jump_code(inst,target) \
+       do { \
+               guint8* jump_start = (inst); \
+               x86_jump_code_body((inst),(target)); \
+               x86_patch(jump_start, (target)); \
+       } while (0)
+#elif defined(__native_client_codegen__) && defined(TARGET_AMD64)
+#define x86_jump_code(inst,target) \
+       do { \
+               /* jump_code_body is used twice because there are offsets */ \
+               /* calculated based on the IP, which can change after the */ \
+               /* call to amd64_codegen_post                             */ \
+               amd64_codegen_pre(inst); \
+               x86_jump_code_body((inst),(target)); \
+               inst = amd64_codegen_post(inst); \
+               x86_jump_code_body((inst),(target)); \
+       } while (0)
+#endif /* __native_client_codegen__ */
+
 #define x86_jump_disp(inst,disp)       \
        do {    \
                int t = (disp) - 2;     \
@@ -1772,6 +1851,7 @@ typedef union {
                }       \
        } while (0)
 
+#if defined(TARGET_X86)
 #define x86_branch8(inst,cond,imm,is_signed)   \
        do {    \
                x86_codegen_pre(&(inst), 2); \
@@ -1792,12 +1872,40 @@ typedef union {
                        *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10;        \
                x86_imm_emit32 ((inst), (imm)); \
        } while (0)
+#elif defined(TARGET_AMD64)
+/* These macros are used directly from mini-amd64.c and other      */
+/* amd64 specific files, so they need to be instrumented directly. */
+#define x86_branch8(inst,cond,imm,is_signed)   \
+       do {    \
+               amd64_codegen_pre(inst); \
+               if ((is_signed))        \
+                       *(inst)++ = x86_cc_signed_map [(cond)]; \
+               else    \
+                       *(inst)++ = x86_cc_unsigned_map [(cond)];       \
+               x86_imm_emit8 ((inst), (imm));  \
+               amd64_codegen_post(inst); \
+       } while (0)
+#define x86_branch32(inst,cond,imm,is_signed)  \
+       do {    \
+               amd64_codegen_pre(inst); \
+               *(inst)++ = (unsigned char)0x0f;        \
+               if ((is_signed))        \
+                       *(inst)++ = x86_cc_signed_map [(cond)] + 0x10;  \
+               else    \
+                       *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10;        \
+               x86_imm_emit32 ((inst), (imm)); \
+               amd64_codegen_post(inst); \
+       } while (0)
+#endif
 
+#if defined(TARGET_X86)
 #define x86_branch(inst,cond,target,is_signed) \
        do {    \
                int offset;                                      \
+               guint8* branch_start; \
                x86_codegen_pre(&(inst), 2); \
                offset = (target) - (inst) - 2; \
+               branch_start = (inst); \
                if (x86_is_imm8 ((offset)))     \
                        x86_branch8 ((inst), (cond), offset, (is_signed));      \
                else {  \
@@ -1805,7 +1913,42 @@ typedef union {
                        offset = (target) - (inst) - 6; \
                        x86_branch32 ((inst), (cond), offset, (is_signed));     \
                }       \
+               x86_patch(branch_start, (target)); \
        } while (0)
+#elif defined(TARGET_AMD64)
+/* This macro is used directly from mini-amd64.c and other        */
+/* amd64 specific files, so it needs to be instrumented directly. */
+
+#define x86_branch_body(inst,cond,target,is_signed)    \
+       do {    \
+               int offset = (target) - (inst) - 2;     \
+               if (x86_is_imm8 ((offset)))     \
+                       x86_branch8 ((inst), (cond), offset, (is_signed));      \
+               else {  \
+                       offset = (target) - (inst) - 6; \
+                       x86_branch32 ((inst), (cond), offset, (is_signed));     \
+               }       \
+       } while (0)
+
+#if defined(__default_codegen__)
+#define x86_branch(inst,cond,target,is_signed) \
+       do { \
+               x86_branch_body((inst),(cond),(target),(is_signed)); \
+       } while (0)
+#elif defined(__native_client_codegen__)
+#define x86_branch(inst,cond,target,is_signed) \
+       do {    \
+               /* branch_body is used twice because there are offsets */ \
+               /* calculated based on the IP, which can change after  */ \
+               /* the call to amd64_codegen_post                      */ \
+               amd64_codegen_pre(inst); \
+               x86_branch_body((inst),(cond),(target),(is_signed)); \
+               inst = amd64_codegen_post(inst); \
+               x86_branch_body((inst),(cond),(target),(is_signed)); \
+       } while (0)
+#endif /* __native_client_codegen__ */
+
+#endif /* TARGET_AMD64 */
 
 #define x86_branch_disp(inst,cond,disp,is_signed)      \
        do {    \
@@ -1865,10 +2008,10 @@ typedef union {
                x86_call_sequence_post((inst)); \
        } while (0)
 
-#ifdef __native_client_codegen__
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
 #define x86_call_reg_internal(inst,reg)        \
   do {                                                 \
-    x86_codegen_pre(&(inst), 5);                       \
     *(inst)++ = (unsigned char)0x83;  /* and */                \
     x86_reg_emit ((inst), 4, (reg));  /* reg */                \
     *(inst)++ = (unsigned char)nacl_align_byte;                \
@@ -1914,20 +2057,23 @@ typedef union {
 #endif  /* __native_client_codegen__ */
 
 
-#ifdef __native_client_codegen__
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
 
 #define x86_call_code(inst,target)     \
        do {    \
                int _x86_offset; \
+               guint8* call_start; \
                guint8* _aligned_start; \
-               x86_call_sequence_pre_val ((inst)); \
+               x86_call_sequence_pre((inst)); \
                _x86_offset = (unsigned char*)(target) - (inst);        \
                _x86_offset -= 5;       \
                x86_call_imm_body ((inst), _x86_offset);        \
-               _aligned_start = x86_call_sequence_post_val ((inst)); \
+               _aligned_start = x86_call_sequence_post((inst)); \
+               call_start = _aligned_start; \
                _x86_offset = (unsigned char*)(target) - (_aligned_start);      \
                _x86_offset -= 5;       \
                x86_call_imm_body ((_aligned_start), _x86_offset);      \
+               x86_patch(call_start, (target)); \
        } while (0)
 
 #define SIZE_OF_RET 6
@@ -2062,9 +2208,9 @@ typedef union {
 
 #ifdef __native_client_codegen__
 
-#define kNaClLengthOfCallReg 5
-#define kNaClLengthOfCallImm 5
-#define kNaClLengthOfCallMembase (kNaClLengthOfCallReg + 6)
+#define kx86NaClLengthOfCallReg 5
+#define kx86NaClLengthOfCallImm 5
+#define kx86NaClLengthOfCallMembase (kx86NaClLengthOfCallReg + 6)
 
 #endif  /* __native_client_codegen__ */
 
index 258aa1851297e1a21a1e6873f085fb9d7220cfd1..e45cfcf99b023116e656004b1454caa2fbc5f791 100644 (file)
@@ -92,7 +92,7 @@ static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest
        gpointer old;
 
        __asm__ __volatile__ ("lock; "
-#ifdef __x86_64__
+#if defined(__x86_64__)  && !defined(__native_client__)
                              "cmpxchgq"
 #else
                              "cmpxchgl"
@@ -154,7 +154,7 @@ static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
        gpointer ret;
        
        __asm__ __volatile__ ("1:; lock; "
-#ifdef __x86_64__
+#if defined(__x86_64__)  && !defined(__native_client__)
                              "cmpxchgq"
 #else
                              "cmpxchgl"
index 732529039ce58ab588e8d7d554e368074fdaf7f1..a7781bdd7bd544a80d58dad401d0f57a37eb5d11 100644 (file)
@@ -60,7 +60,8 @@ gpointer _wapi_stdhandle_create (int fd, const gchar *name)
        g_message("%s: creating standard handle type %s, fd %d", __func__,
                  name, fd);
 #endif
-       
+
+#if !defined(__native_client__)        
        /* Check if fd is valid */
        do {
                flags=fcntl(fd, F_GETFL);
@@ -78,11 +79,18 @@ gpointer _wapi_stdhandle_create (int fd, const gchar *name)
                SetLastError (_wapi_get_win32_file_error (errno));
                return(INVALID_HANDLE_VALUE);
        }
+       file_handle.fileaccess=convert_from_flags(flags);
+#else
+       /* 
+        * fcntl will return -1 in nacl, as there is no real file system API. 
+        * Yet, standard streams are available.
+        */
+       file_handle.fileaccess = (fd == STDIN_FILENO) ? GENERIC_READ : GENERIC_WRITE;
+#endif
 
        file_handle.filename = g_strdup(name);
        /* some default security attributes might be needed */
        file_handle.security_attributes=0;
-       file_handle.fileaccess=convert_from_flags(flags);
 
        /* Apparently input handles can't be written to.  (I don't
         * know if output or error handles can't be read from.)
index 9474f0d0f1b2115ba4d80b826a7be837105819aa..335adc83f1b7ab49248d184dbffbd4b031f4c28c 100644 (file)
@@ -196,13 +196,23 @@ mono_public_tokens_are_equal (const unsigned char *pubt1, const unsigned char *p
        return memcmp (pubt1, pubt2, 16) == 0;
 }
 
+/* Native Client can't get this info from an environment variable so */
+/* it's passed in to the runtime, or set manually by embedding code. */
+#ifdef __native_client__
+char* nacl_mono_path = NULL;
+#endif
+
 static void
 check_path_env (void)
 {
        const char *path;
        char **splitted, **dest;
        
+#ifdef __native_client__
+       path = nacl_mono_path;
+#else
        path = g_getenv ("MONO_PATH");
+#endif
        if (!path)
                return;
 
index fa49e6a2aa9b6166d46f5b61220e907c3f42d0d1..050cb328d606e66fed2375aada5769d205e7dedc 100644 (file)
@@ -105,6 +105,8 @@ mono_gc_base_init (void)
 
                GC_stackbottom = (char*)ss.ss_sp;
        }
+#elif defined(__native_client__)
+       /* Do nothing, GC_stackbottom is set correctly in libgc */
 #else
        {
                int dummy;
index bca936805be25bace581924bcd5ff7c355f94a80..ac252d96d3cdf1d44eae705a5e69db71027546cc 100644 (file)
@@ -421,6 +421,14 @@ mono_domain_code_reserve_align (MonoDomain *domain, int size, int alignment) MON
 void
 mono_domain_code_commit (MonoDomain *domain, void *data, int size, int newsize) MONO_INTERNAL;
 
+#if defined(__native_client_codegen__) && defined(__native_client__)
+void *
+nacl_domain_get_code_dest (MonoDomain *domain, void *data) MONO_INTERNAL;
+
+void 
+nacl_domain_code_validate (MonoDomain *domain, guint8 **buf_base, int buf_size, guint8 **code_end) MONO_INTERNAL;
+#endif
+
 void
 mono_domain_code_foreach (MonoDomain *domain, MonoCodeManagerFunc func, void *user_data) MONO_INTERNAL;
 
index 07fe67e659eaa73f08eed0d3a7e13a5357949685..4c1a29c29ac9f01c85efecae58053a2895aac681 100644 (file)
@@ -2196,6 +2196,42 @@ mono_domain_code_commit (MonoDomain *domain, void *data, int size, int newsize)
        mono_domain_unlock (domain);
 }
 
+#if defined(__native_client_codegen__) && defined(__native_client__)
+/*
+ * Given the temporary buffer (allocated by mono_domain_code_reserve) into which
+ * we are generating code, return a pointer to the destination in the dynamic 
+ * code segment into which the code will be copied when mono_domain_code_commit
+ * is called.
+ * LOCKING: Acquires the domain lock.
+ */
+void *
+nacl_domain_get_code_dest (MonoDomain *domain, void *data)
+{
+       void *dest;
+       mono_domain_lock (domain);
+       dest = nacl_code_manager_get_code_dest (domain->code_mp, data);
+       mono_domain_unlock (domain);
+       return dest;
+}
+
+/* 
+ * Convenience function which calls mono_domain_code_commit to validate and copy
+ * the code. The caller sets *buf_base and *buf_size to the start and size of
+ * the buffer (allocated by mono_domain_code_reserve), and *code_end to the byte
+ * after the last instruction byte. On return, *buf_base will point to the start
+ * of the copied in the code segment, and *code_end will point after the end of 
+ * the copied code.
+ */
+void
+nacl_domain_code_validate (MonoDomain *domain, guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+       guint8 *tmp = nacl_domain_get_code_dest (domain, *buf_base);
+       mono_domain_code_commit (domain, *buf_base, buf_size, *code_end - *buf_base);
+       *code_end = tmp + (*code_end - *buf_base);
+       *buf_base = tmp;
+}
+#endif
+
 /*
  * mono_domain_code_foreach:
  * Iterate over the code thunks of the code manager of @domain.
index 39534444502d623f7c2bb550854e30f2ec2b51fc..cc550bc6a87759a3ba9d00a222bb49a42e9db271 100644 (file)
@@ -1709,8 +1709,12 @@ mono_method_add_generic_virtual_invocation (MonoDomain *domain, MonoVTable *vtab
                        g_ptr_array_free (sorted, TRUE);
                }
 
+#ifndef __native_client__
+               /* We don't re-use any thunks as there is a lot of overhead */
+               /* to deleting and re-using code in Native Client.          */
                if (old_thunk != vtable_trampoline && old_thunk != imt_trampoline)
                        invalidate_generic_virtual_thunk (domain, old_thunk);
+#endif
        }
 
        mono_domain_unlock (domain);