use_sigposix=yes
ikvm_native=no
AC_DEFINE(DISABLE_SOCKETS,1,[Disable sockets support])
+ AC_DEFINE(DISABLE_ATTACH, 1, [Disable agent attach support])
;;
*-*-hpux*)
host_win32=no
with_shared_mono=no
fi
+case $host in
+*nacl* ) with_shared_mono=yes;;
+esac
+
if test "x$host_win32" = "xyes"; then
# Boehm GC requires the runtime to be in its own dll
with_static_mono=no
dnl **************
AC_ARG_ENABLE(nacl_codegen, [ --enable-nacl-codegen Enable Native Client code generation], enable_nacl_codegen=$enableval, enable_nacl_codegen=no)
+AC_ARG_ENABLE(nacl_gc, [ --enable-nacl-gc Enable Native Client garbage collection], enable_nacl_gc=$enableval, enable_nacl_gc=no)
AM_CONDITIONAL(NACL_CODEGEN, test x$enable_nacl_codegen != xno)
if test "x$enable_nacl_codegen" = "xyes"; then
AC_DEFINE(TARGET_NACL, 1, [...])
else
MONO_NACL_ALIGN_MASK_OFF=0
+ CPPFLAGS="$CPPFLAGS -D__default_codegen__"
+fi
+if test "x$enable_nacl_gc" = "xyes"; then
+ CPPFLAGS="$CPPFLAGS -finstrument-for-thread-suspension -D__native_client_gc__"
fi
AC_SUBST(MONO_NACL_ALIGN_MASK_OFF)
sgen_supported=true
;;
esac
+ case "$host" in
+ x86_64-*-nacl*)
+ AC_DEFINE(__mono_ilp32__, 1, [64 bit mode with 4 byte longs and pointers])
+ sizeof_register=8
+ ;;
+ esac
;;
ia64-*-*)
TARGET=IA64
sizeof_register=8
target_byte_order=G_BIG_ENDIAN
;;
+ x86_64-*-nacl)
+ TARGET=AMD64
+ arch_target=amd64
+ AC_DEFINE(TARGET_AMD64, 1, [...])
+ AC_DEFINE(MONO_CROSS_COMPILE,1,[The runtime is compiled for cross-compiling mode])
+ AC_DEFINE(__mono_ilp32__, 1, [64 bit mode with 4 byte longs and pointers])
+ sizeof_register=8
+ ;;
*)
AC_MSG_WARN([Cross compiling is only supported for targets matching 'powerpc64-{ps3,xbox360}-linux-gnu'])
esac
if test x$TARGET = xSPARC -o x$TARGET = xSPARC64; then
LIBGC_CPPFLAGS=`echo $LIBGC_CPPFLAGS | sed -e 's/-D_FILE_OFFSET_BITS=64//g'`
fi
+ # Don't pass -finstrument-for-thread-suspension in,
+ # if these are instrumented it will be very bad news
+ # (infinite recursion, undefined parking behavior, etc)
+ LIBGC_CPPFLAGS=`echo $LIBGC_CPPFLAGS | sed -e 's/-finstrument-for-thread-suspension//g'`
ac_configure_args="$ac_configure_args --disable-embed-check --with-libgc-threads=$libgc_threads $libgc_configure_args \"CPPFLAGS_FOR_LIBGC=$LIBGC_CPPFLAGS\" \"CFLAGS_FOR_LIBGC=$CFLAGS_FOR_LIBGC\""
AC_CONFIG_SUBDIRS(libgc)
;;
MOONLIGHT_DEFINES=
AC_ARG_WITH(moonlight, [ --with-moonlight=yes|no|only If you want to build Mono for Moonlight (defaults to no)],[
- MOONLIGHT_DEFINES="-DMOONLIGHT -DDISABLE_ASSEMBLY_REMAPPING"
+ if test x$with_moonlight != xno; then
+ MOONLIGHT_DEFINES="-DMOONLIGHT -DDISABLE_ASSEMBLY_REMAPPING "
+ fi
], [with_moonlight=no])
AC_SUBST(MOONLIGHT_DEFINES)
-
+ AC_ARG_WITH(moon_gc, [ --with-moon-gc=boehm,sgen Select the gc to use with Moonlight (defaults to boehm)],[
+ if test "x$with_moon_gc" != "xsgen"; then
+ with_moon_gc=boehm
+ fi
+ ], [with_moon_gc=boehm])
AC_CHECK_HEADER([malloc.h],
[AC_DEFINE([HAVE_USR_INCLUDE_MALLOC_H], [1],
AM_CONDITIONAL(MOONLIGHT, [test "x$with_moonlight" != "xno"])
AM_CONDITIONAL(ONLY_MOONLIGHT, [test "x$with_moonlight" = "xonly"])
+ AM_CONDITIONAL(MOONLIGHT_BOEHM, [test "x$with_moon_gc" = "xboehm"])
+ AM_CONDITIONAL(MOONLIGHT_SGEN, [test "x$with_moon_gc" = "xsgen"])
+
AM_CONDITIONAL(INSTALL_4_0, [test "x$with_profile4" = xyes])
AM_CONDITIONAL(INSTALL_MONODROID, [test "x$with_monodroid" = xyes])
AM_CONDITIONAL(INSTALL_MONOTOUCH, [test "x$with_monotouch" = xyes])
LLVM Back End: $enable_llvm (dynamically loaded: $enable_loadedllvm)
Libraries:
- Moon Profile: $with_moonlight
+ Moon Profile: $with_moonlight ($with_moon_gc)
MonoDroid: $with_monodroid
MonoTouch: $with_monotouch
JNI support: $jdk_headers_found
* (C) 2002 Ximian, Inc.
*/
+#include <config.h>
+
#ifndef DISABLE_SOCKETS
-#include <config.h>
#include <glib.h>
#include <pthread.h>
#include <errno.h>
g_message ("%s: send error: %s", __func__, strerror (errno));
#endif
+ #ifdef O_NONBLOCK
/* At least linux returns EAGAIN/EWOULDBLOCK when the timeout has been set on
* a blocking socket. See bug #599488 */
if (errnum == EAGAIN) {
- gboolean nonblock;
-
- ret = ioctlsocket (fd, FIONBIO, (gulong *) &nonblock);
- if (ret != SOCKET_ERROR && !nonblock)
+ ret = fcntl (fd, F_GETFL, 0);
+ if (ret != -1 && (ret & O_NONBLOCK) == 0)
errnum = ETIMEDOUT;
}
+ #endif /* O_NONBLOCK */
errnum = errno_to_WSA (errnum, __func__);
WSASetLastError (errnum);
{"System.Runtime.Serialization.Formatters.Soap", 0},
{"System.Security", 0},
{"System.ServiceProcess", 0},
+ {"System.Transactions", 2},
{"System.Web", 0},
{"System.Web.Abstractions", 2},
{"System.Web.Mobile", 0},
return memcmp (pubt1, pubt2, 16) == 0;
}
+/* Native Client can't get this info from an environment variable so */
+/* it's passed in to the runtime, or set manually by embedding code. */
+#ifdef __native_client__
+char* nacl_mono_path = NULL;
+#endif
+
static void
check_path_env (void)
{
const char *path;
char **splitted, **dest;
+#ifdef __native_client__
+ path = nacl_mono_path;
+#else
path = g_getenv ("MONO_PATH");
+#endif
if (!path)
return;
keylen = strlen (key) >> 1;
if (keylen < 1)
return FALSE;
-
+
+ /* allow the ECMA standard key */
+ if (strcmp (key, "00000000000000000400000000000000") == 0) {
+ if (pubkey) {
+ arr = g_strdup ("b77a5c561934e089");
+ *pubkey = arr;
+ }
+ return TRUE;
+ }
val = g_ascii_xdigit_value (key [0]) << 4;
val |= g_ascii_xdigit_value (key [1]);
switch (val) {
g_ptr_array_free (sorted, TRUE);
}
+#ifndef __native_client__
+ /* We don't re-use any thunks as there is a lot of overhead */
+ /* to deleting and re-using code in Native Client. */
if (old_thunk != vtable_trampoline && old_thunk != imt_trampoline)
invalidate_generic_virtual_thunk (domain, old_thunk);
+#endif
}
mono_domain_unlock (domain);
return FALSE;
}
+ /**
+ * mono_class_field_get_special_static_type:
+ * @field: The MonoClassField describing the field.
+ *
+ * Returns: SPECIAL_STATIC_THREAD if the field is thread static, SPECIAL_STATIC_CONTEXT if it is context static,
+ * SPECIAL_STATIC_NONE otherwise.
+ */
+ guint32
+ mono_class_field_get_special_static_type (MonoClassField *field)
+ {
+ if (!(field->type->attrs & FIELD_ATTRIBUTE_STATIC))
+ return SPECIAL_STATIC_NONE;
+ if (mono_field_is_deleted (field))
+ return SPECIAL_STATIC_NONE;
+ if (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL))
+ return field_is_special_static (field->parent, field);
+ return SPECIAL_STATIC_NONE;
+ }
+
/**
* mono_class_has_special_static_fields:
*
if (field->offset == -1) {
/* Special static */
- gpointer addr = g_hash_table_lookup (vt->domain->special_static_fields, field);
+ gpointer addr;
+
+ mono_domain_lock (vt->domain);
+ addr = g_hash_table_lookup (vt->domain->special_static_fields, field);
+ mono_domain_unlock (vt->domain);
dest = mono_get_special_static_data (GPOINTER_TO_UINT (addr));
} else {
dest = (char*)vt->data + field->offset;
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
if (field->offset == -1) {
/* Special static */
- gpointer addr = g_hash_table_lookup (vt->domain->special_static_fields, field);
+ gpointer addr;
+
+ mono_domain_lock (vt->domain);
+ addr = g_hash_table_lookup (vt->domain->special_static_fields, field);
+ mono_domain_unlock (vt->domain);
src = mono_get_special_static_data (GPOINTER_TO_UINT (addr));
} else {
src = (guint8*)vt->data + field->offset;
mono_get_constant_value_from_blob (domain, def_type, data, value);
}
+ void
+ mono_field_static_get_value_for_thread (MonoInternalThread *thread, MonoVTable *vt, MonoClassField *field, void *value)
+ {
+ void *src;
+
+ g_return_if_fail (field->type->attrs & FIELD_ATTRIBUTE_STATIC);
+
+ if (field->type->attrs & FIELD_ATTRIBUTE_LITERAL) {
+ get_default_field_value (vt->domain, field, value);
+ return;
+ }
+
+ if (field->offset == -1) {
+ /* Special static */
+ gpointer addr = g_hash_table_lookup (vt->domain->special_static_fields, field);
+ src = mono_get_special_static_data_for_thread (thread, GPOINTER_TO_UINT (addr));
+ } else {
+ src = (char*)vt->data + field->offset;
+ }
+ set_value (field->type, value, src, TRUE);
+ }
+
/**
* mono_field_static_get_value:
* @vt: vtable to the object
void
mono_field_static_get_value (MonoVTable *vt, MonoClassField *field, void *value)
{
- void *src;
-
- g_return_if_fail (field->type->attrs & FIELD_ATTRIBUTE_STATIC);
-
- if (field->type->attrs & FIELD_ATTRIBUTE_LITERAL) {
- get_default_field_value (vt->domain, field, value);
- return;
- }
-
- if (field->offset == -1) {
- /* Special static */
- gpointer addr = g_hash_table_lookup (vt->domain->special_static_fields, field);
- src = mono_get_special_static_data (GPOINTER_TO_UINT (addr));
- } else {
- src = (char*)vt->data + field->offset;
- }
- set_value (field->type, value, src, TRUE);
+ return mono_field_static_get_value_for_thread (mono_thread_internal_current (), vt, field, value);
}
/**
$(monodir)/mono/utils/libmonoutils.la \
$(GLIB_LIBS)
+ if MOONLIGHT
moon_libs = \
$(monodir)/mono/metadata/libmonoruntimemoon.la \
$(monodir)/mono/io-layer/libwapi.la \
$(monodir)/mono/utils/libmonoutils.la \
- $(GLIB_LIBS) \
- $(libgc_libs)
+ $(GLIB_LIBS)
+ endif
static_libs= \
$(monodir)/mono/metadata/libmonoruntime-static.la \
basic-simd.cs
regtests=basic.exe basic-float.exe basic-long.exe basic-calls.exe objects.exe arrays.exe basic-math.exe exceptions.exe iltests.exe devirtualization.exe generics.exe basic-simd.exe
-fsatests=basic.exe basic-float.exe basic-long.exe basic-calls.exe objects.exe arrays.exe basic-math.exe exceptions.exe devirtualization.exe basic-simd.exe
if X86
if MONO_DEBUGGER_SUPPORTED
endif
libmono_2_0_la_SOURCES = $(common_sources) $(llvm_sources) $(arch_sources) $(os_sources)
- libmono_2_0_la_CFLAGS = $(AM_CFLAGS) $(BOEHM_DEFINES) $(LIBGC_CFLAGS)
+ libmono_2_0_la_CFLAGS = $(mono_CFLAGS)
+ libmono_2_0_la_LIBADD = $(libs) $(LIBMONO_DTRACE_OBJECT)
libmonosgen_2_0_la_SOURCES = $(libmono_2_0_la_SOURCES)
- libmonosgen_2_0_la_CFLAGS = $(AM_CFLAGS) $(SGEN_DEFINES)
+ libmonosgen_2_0_la_CFLAGS = $(mono_sgen_CFLAGS)
+ libmonosgen_2_0_la_LIBADD = $(sgen_libs) $(LIBMONO_DTRACE_OBJECT)
if MOONLIGHT
libmono_moon_la_SOURCES = $(libmono_2_0_la_SOURCES)
- libmono_moon_la_CFLAGS = $(AM_CFLAGS) $(MOONLIGHT_DEFINES)
+ if MOONLIGHT_BOEHM
+ libmono_moon_la_CFLAGS = $(mono_CFLAGS) $(MOONLIGHT_DEFINES)
+ libmono_moon_la_LIBADD = $(moon_libs) $(libgc_libs) $(LIBMONO_DTRACE_OBJECT)
+ else
+ libmono_moon_la_CFLAGS = $(mono_sgen_CFLAGS) $(MOONLIGHT_DEFINES)
libmono_moon_la_LIBADD = $(moon_libs) $(LIBMONO_DTRACE_OBJECT)
endif
+ endif
libmono_static_la_SOURCES = $(libmono_2_0_la_SOURCES)
- libmono_static_la_CFLAGS = $(AM_CFLAGS) $(BOEHM_DEFINES) $(LIBGC_CFLAGS)
+ libmono_static_la_CFLAGS = $(mono_CFLAGS)
libmono_static_la_LDFLAGS = -static
libmono_static_la_LIBADD = $(static_libs) $(MONO_DTRACE_OBJECT)
libmonosgen_static_la_SOURCES = $(libmono_2_0_la_SOURCES)
- libmonosgen_static_la_CFLAGS = $(AM_CFLAGS) $(SGEN_DEFINES)
+ libmonosgen_static_la_CFLAGS = $(mono_sgen_CFLAGS)
libmonosgen_static_la_LDFLAGS = -static
libmonosgen_static_la_LIBADD = $(sgenstatic_libs) $(MONO_DTRACE_OBJECT)
nodist_libmono_static_la_SOURCES = $(nodist_libmono_la_SOURCES)
- nodist_libmono_static_la_CFLAGS = $(AM_CFLAGS) $(BOEHM_DEFINES) $(LIBGC_CFLAGS)
+ nodist_libmono_static_la_CFLAGS = $(mono_CFLAGS)
BURGSRC= $(common_BURGSRC) $(arch_BURGSRC)
libmonoinclude_HEADERS = jit.h
- libmono_2_0_la_LIBADD = \
- $(libs) $(LIBMONO_DTRACE_OBJECT)
-
- libmonosgen_2_0_la_LIBADD = \
- $(sgen_libs) $(LIBMONO_DTRACE_OBJECT)
-
basic-simd.exe: basic-simd.cs
$(MCS) -out:$@ $< -r:TestDriver.dll -r:Mono.Simd.dll
+nacl.exe: nacl.cs
+ $(MCS) -out:$@ $< -r:TestDriver.dll -r:Mono.Simd.dll
+
generics.exe: generics.cs TestDriver.dll generics-variant-types.dll
$(MCS) -out:$@ $< -r:TestDriver.dll -r:generics-variant-types.dll
rcheck: mono $(regtests)
$(RUNTIME) --regression $(regtests)
- LLVM_AOT_RUNTIME_OPTS=$(if ($LLVM),--llvm,)
+ LLVM_AOT_RUNTIME_OPTS=$(if $(LLVM),--llvm,)
aotcheck: mono $(regtests)
rm -f *.exe.so
llvmfullaotcheck:
$(MAKE) fullaotcheck LLVM=1
-fsacheck: mono $(fsatests) fsacheck.c generics.exe
- rm -rf fsa-tmp
- mkdir fsa-tmp
- cp $(CLASS)/mscorlib.dll $(CLASS)/System.Core.dll $(CLASS)/System.dll $(CLASS)/Mono.Posix.dll $(CLASS)/System.Configuration.dll $(CLASS)/System.Security.dll $(CLASS)/System.Xml.dll $(CLASS)/Mono.Security.dll $(CLASS)/Mono.Simd.dll \
- $(fsatests) generics-variant-types.dll TestDriver.dll fsa-tmp/
- cp $(fsatests) fsa-tmp/
- MONO_PATH=fsa-tmp $(top_builddir)/runtime/mono-wrapper --aot=full,static fsa-tmp/*.dll || exit 1
- MONO_PATH=fsa-tmp $(top_builddir)/runtime/mono-wrapper --aot=full,static fsa-tmp/*.exe || exit 1
- $(CC) -o $@.out -g -static $(VPATH)/fsacheck.c fsa-tmp/*.o \
- -lmono-2.0 -lpthread -lm -ldl -lrt \
- -DTARGET_X86 -L.libs -I${prefix}/include/mono-2.0 \
- -I${prefix} -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include
- for i in $(fsatests); do echo $$i; MONO_PATH=fsa-tmp ./$@.out $$i || exit 1; done
-
bench: mono test.exe
time env $(RUNTIME) --ncompile $(count) --compile Test:$(mtest) test.exe
MonoClass **typespec_classes;
GString *llc_args;
GString *as_args;
+ gboolean thumb_mixed;
} MonoAotCompile;
typedef struct {
int plt_offset;
- char *symbol;
+ char *symbol, *llvm_symbol, *debug_sym;
MonoJumpInfo *ji;
+ gboolean jit_used, llvm_used;
} MonoPltEntry;
#define mono_acfg_lock(acfg) EnterCriticalSection (&((acfg)->mutex))
img_writer_emit_string (acfg->w, value);
}
+ static void
+ emit_local_string_symbol (MonoAotCompile *acfg, const char *name, const char *value)
+ {
+ img_writer_emit_section_change (acfg->w, RODATA_SECT, 1);
+ img_writer_emit_label (acfg->w, name);
+ img_writer_emit_string (acfg->w, value);
+ }
+
static G_GNUC_UNUSED void
emit_uleb128 (MonoAotCompile *acfg, guint32 value)
{
#else
#define AOT_FUNC_ALIGNMENT 16
#endif
-#if defined(TARGET_X86) && defined(__native_client_codegen__)
+#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && defined(__native_client_codegen__)
#undef AOT_FUNC_ALIGNMENT
#define AOT_FUNC_ALIGNMENT 32
#endif
acfg->llc_args = g_string_new ("");
acfg->as_args = g_string_new ("");
+ /*
+ * The prefix LLVM likes to put in front of symbol names on darwin.
+ * The mach-os specs require this for globals, but LLVM puts them in front of all
+ * symbols. We need to handle this, since we need to refer to LLVM generated
+ * symbols.
+ */
+ acfg->llvm_label_prefix = "";
+
#ifdef TARGET_ARM
if (acfg->aot_opts.mtriple && strstr (acfg->aot_opts.mtriple, "darwin")) {
g_string_append (acfg->llc_args, "-mattr=+v6");
g_string_append (acfg->llc_args, " -soft-float");
#endif
}
+ if (acfg->aot_opts.mtriple && strstr (acfg->aot_opts.mtriple, "thumb"))
+ acfg->thumb_mixed = TRUE;
if (acfg->aot_opts.mtriple)
mono_arch_set_target (acfg->aot_opts.mtriple);
{
#if defined(TARGET_X86)
guint32 offset = (acfg->plt_got_offset_base + index) * sizeof (gpointer);
-
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ /* jmp *<offset>(%ebx) */
+ emit_byte (acfg, 0xff);
+ emit_byte (acfg, 0xa3);
+ emit_int32 (acfg, offset);
+ /* Used by mono_aot_get_plt_info_offset */
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+#elif defined(__native_client_codegen__)
const guint8 kSizeOfNaClJmp = 11;
guint8 bytes[kSizeOfNaClJmp];
guint8 *pbytes = &bytes[0];
emit_byte (acfg, 0x68); /* hide data in a push */
emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
-#else
- /* jmp *<offset>(%ebx) */
- emit_byte (acfg, 0xff);
- emit_byte (acfg, 0xa3);
- emit_int32 (acfg, offset);
- /* Used by mono_aot_get_plt_info_offset */
- emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
-#endif /* __native_client_codegen__ */
+#endif /*__native_client_codegen__*/
#elif defined(TARGET_AMD64)
+#if defined(__default_codegen__)
/*
* We can't emit jumps because they are 32 bits only so they can't be patched.
* So we make indirect calls through GOT entries which are patched by the AOT
emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) -4);
/* Used by mono_aot_get_plt_info_offset */
emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+#elif defined(__native_client_codegen__)
+ guint8 buf [256];
+ guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
+ guint8 *code = buf_aligned;
+
+ /* mov <OFFSET>(%rip), %r11d */
+ emit_byte (acfg, '\x45');
+ emit_byte (acfg, '\x8b');
+ emit_byte (acfg, '\x1d');
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) -4);
+
+ amd64_jump_reg (code, AMD64_R11);
+ /* This should be constant for the plt patch */
+ g_assert ((size_t)(code-buf_aligned) == 10);
+ emit_bytes (acfg, buf_aligned, code - buf_aligned);
+
+ /* Hide data in a push imm32 so it passes validation */
+ emit_byte (acfg, 0x68); /* push */
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#endif /*__native_client_codegen__*/
#elif defined(TARGET_ARM)
guint8 buf [256];
guint8 *code;
- /* FIXME:
- * - optimize OP_AOTCONST implementation
- * - optimize the PLT entries
- * - optimize SWITCH AOT implementation
- */
code = buf;
- if (acfg->use_bin_writer && FALSE) {
- /* FIXME: mono_arch_patch_plt_entry () needs to decode this */
- /* We only emit 1 relocation since we implement it ourselves anyway */
- img_writer_emit_reloc (acfg->w, R_ARM_ALU_PC_G0_NC, acfg->got_symbol, ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) - 8);
- /* FIXME: A 2 instruction encoding is sufficient in most cases */
- ARM_ADD_REG_IMM (code, ARMREG_IP, ARMREG_PC, 0, 0);
- ARM_ADD_REG_IMM (code, ARMREG_IP, ARMREG_IP, 0, 0);
- ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0);
- emit_bytes (acfg, buf, code - buf);
- /* Used by mono_aot_get_plt_info_offset */
- emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
- } else {
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
- emit_bytes (acfg, buf, code - buf);
- emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) - 4);
- /* Used by mono_aot_get_plt_info_offset */
- emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
- }
- /*
- * The plt_got_info_offset is computed automatically by
- * mono_aot_get_plt_info_offset (), so no need to save it here.
- */
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+ ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
+ emit_bytes (acfg, buf, code - buf);
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) - 4);
+ /* Used by mono_aot_get_plt_info_offset */
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
#elif defined(TARGET_POWERPC)
guint32 offset = (acfg->plt_got_offset_base + index) * sizeof (gpointer);
#endif
}
+ static void
+ arch_emit_llvm_plt_entry (MonoAotCompile *acfg, int index)
+ {
+ #if defined(TARGET_ARM)
+ #if 0
+ /* LLVM calls the PLT entries using bl, so emit a stub */
+ /* FIXME: Too much overhead on every call */
+ fprintf (acfg->fp, ".thumb_func\n");
+ fprintf (acfg->fp, "bx pc\n");
+ fprintf (acfg->fp, "nop\n");
+ fprintf (acfg->fp, ".arm\n");
+ #endif
+ /* LLVM calls the PLT entries using bl, so these have to be thumb2 */
+ fprintf (acfg->fp, ".thumb_func\n");
+ /* The code below should be 12 bytes long */
+ fprintf (acfg->fp, "ldr ip, [pc, #8]\n");
+ /* thumb can't encode ld pc, [pc, ip] */
+ fprintf (acfg->fp, "add ip, pc, ip\n");
+ fprintf (acfg->fp, "ldr ip, [ip, #0]\n");
+ fprintf (acfg->fp, "bx ip\n");
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) + 4);
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+ #else
+ g_assert_not_reached ();
+ #endif
+ }
+
/*
* arch_emit_specific_trampoline:
*
* - all the trampolines should be of the same length.
*/
#if defined(TARGET_AMD64)
+#if defined(__default_codegen__)
/* This should be exactly 16 bytes long */
*tramp_size = 16;
/* call *<offset>(%rip) */
emit_byte (acfg, '\x15');
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
/* This should be relative to the start of the trampoline */
- emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4 + 19);
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset+1) * sizeof (gpointer)) + 7);
emit_zero_bytes (acfg, 5);
+#elif defined(__native_client_codegen__)
+ guint8 buf [256];
+ guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
+ guint8 *code = buf_aligned;
+ guint8 *call_start;
+ size_t call_len;
+ int got_offset;
+
+ /* Emit this call in 'code' so we can find out how long it is. */
+ amd64_call_reg (code, AMD64_R11);
+ call_start = mono_arch_nacl_skip_nops (buf_aligned);
+ call_len = code - call_start;
+
+ /* The tramp_size is twice the NaCl alignment because it starts with */
+ /* a call which needs to be aligned to the end of the boundary. */
+ *tramp_size = kNaClAlignment*2;
+ {
+ /* Emit nops to align call site below which is 7 bytes plus */
+ /* the length of the call sequence emitted above. */
+ /* Note: this requires the specific trampoline starts on a */
+ /* kNaclAlignedment aligned address, which it does because */
+ /* it's its own function that is aligned. */
+ guint8 nop_buf[256];
+ guint8 *nopbuf_aligned = ALIGN_TO (nop_buf, kNaClAlignment);
+ guint8 *nopbuf_end = mono_arch_nacl_pad (nopbuf_aligned, kNaClAlignment - 7 - (call_len));
+ emit_bytes (acfg, nopbuf_aligned, nopbuf_end - nopbuf_aligned);
+ }
+ /* The trampoline is stored at the offset'th pointer, the -4 is */
+ /* present because RIP relative addressing starts at the end of */
+ /* the current instruction, while the label "." is relative to */
+ /* the beginning of the current asm location, which in this case */
+ /* is not the mov instruction, but the offset itself, due to the */
+ /* way the bytes and ints are emitted here. */
+ got_offset = (offset * sizeof(gpointer)) - 4;
+
+ /* mov <OFFSET>(%rip), %r11d */
+ emit_byte (acfg, '\x45');
+ emit_byte (acfg, '\x8b');
+ emit_byte (acfg, '\x1d');
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", got_offset);
+
+ /* naclcall %r11 */
+ emit_bytes (acfg, call_start, call_len);
+
+ /* The arg is stored at the offset+1 pointer, relative to beginning */
+ /* of trampoline: 7 for mov, plus the call length, and 1 for push. */
+ got_offset = ((offset + 1) * sizeof(gpointer)) + 7 + call_len + 1;
+
+ /* We can't emit this data directly, hide in a "push imm32" */
+ emit_byte (acfg, '\x68'); /* push */
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", got_offset);
+ emit_alignment (acfg, kNaClAlignment);
+#endif /*__native_client_codegen__*/
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
arch_emit_static_rgctx_trampoline (MonoAotCompile *acfg, int offset, int *tramp_size)
{
#if defined(TARGET_AMD64)
+#if defined(__default_codegen__)
/* This should be exactly 13 bytes long */
*tramp_size = 13;
emit_byte (acfg, '\xff');
emit_byte (acfg, '\x25');
emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4);
+#elif defined(__native_client_codegen__)
+ guint8 buf [128];
+ guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
+ guint8 *code = buf_aligned;
+
+ /* mov <OFFSET>(%rip), %r10d */
+ emit_byte (acfg, '\x45');
+ emit_byte (acfg, '\x8b');
+ emit_byte (acfg, '\x15');
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
+
+ /* mov <OFFSET>(%rip), %r11d */
+ emit_byte (acfg, '\x45');
+ emit_byte (acfg, '\x8b');
+ emit_byte (acfg, '\x1d');
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4);
+
+ /* nacljmp *%r11 */
+ amd64_jump_reg (code, AMD64_R11);
+ emit_bytes (acfg, buf_aligned, code - buf_aligned);
+
+ emit_alignment (acfg, kNaClAlignment);
+ *tramp_size = kNaClAlignment;
+#endif /*__native_client_codegen__*/
+
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
{
#if defined(TARGET_AMD64)
guint8 *buf, *code;
+#if defined(__native_client_codegen__)
+ guint8 *buf_alloc;
+#endif
guint8 *labels [3];
+ guint8 mov_buf[3];
+ guint8 *mov_buf_ptr = mov_buf;
+ const int kSizeOfMove = 7;
+#if defined(__default_codegen__)
code = buf = g_malloc (256);
+#elif defined(__native_client_codegen__)
+ buf_alloc = g_malloc (256 + kNaClAlignment + kSizeOfMove);
+ buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
+ /* The RIP relative move below is emitted first */
+ buf += kSizeOfMove;
+ code = buf;
+#endif
/* FIXME: Optimize this, i.e. use binary search etc. */
/* Maybe move the body into a separate function (slower, but much smaller) */
- /* R11 is a free register */
+ /* MONO_ARCH_IMT_SCRATCH_REG is a free register */
labels [0] = code;
- amd64_alu_membase_imm (code, X86_CMP, AMD64_R11, 0, 0);
+ amd64_alu_membase_imm (code, X86_CMP, MONO_ARCH_IMT_SCRATCH_REG, 0, 0);
labels [1] = code;
- amd64_branch8 (code, X86_CC_Z, FALSE, 0);
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
/* Check key */
- amd64_alu_membase_reg (code, X86_CMP, AMD64_R11, 0, MONO_ARCH_IMT_REG);
+ amd64_alu_membase_reg_size (code, X86_CMP, MONO_ARCH_IMT_SCRATCH_REG, 0, MONO_ARCH_IMT_REG, sizeof (gpointer));
labels [2] = code;
- amd64_branch8 (code, X86_CC_Z, FALSE, 0);
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
/* Loop footer */
- amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, 2 * sizeof (gpointer));
+ amd64_alu_reg_imm (code, X86_ADD, MONO_ARCH_IMT_SCRATCH_REG, 2 * sizeof (gpointer));
amd64_jump_code (code, labels [0]);
/* Match */
mono_amd64_patch (labels [2], code);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, sizeof (gpointer), 8);
- amd64_jump_membase (code, AMD64_R11, 0);
+ amd64_mov_reg_membase (code, MONO_ARCH_IMT_SCRATCH_REG, MONO_ARCH_IMT_SCRATCH_REG, sizeof (gpointer), sizeof (gpointer));
+ amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
/* No match */
/* FIXME: */
mono_amd64_patch (labels [1], code);
x86_breakpoint (code);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 12345678, 8);
-
- /* mov <OFFSET>(%rip), %r11 */
- emit_byte (acfg, '\x4d');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x1d');
+ /* mov <OFFSET>(%rip), MONO_ARCH_IMT_SCRATCH_REG */
+ amd64_emit_rex (mov_buf_ptr, sizeof(gpointer), MONO_ARCH_IMT_SCRATCH_REG, 0, AMD64_RIP);
+ *(mov_buf_ptr)++ = (unsigned char)0x8b; /* mov opcode */
+ x86_address_byte (mov_buf_ptr, 0, MONO_ARCH_IMT_SCRATCH_REG & 0x7, 5);
+ emit_bytes (acfg, mov_buf, mov_buf_ptr - mov_buf);
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
emit_bytes (acfg, buf, code - buf);
- *tramp_size = code - buf + 7;
+ *tramp_size = code - buf + kSizeOfMove;
+#if defined(__native_client_codegen__)
+ /* The tramp will be padded to the next kNaClAlignment bundle. */
+ *tramp_size = ALIGN_TO ((*tramp_size), kNaClAlignment);
+#endif
+
+#if defined(__default_codegen__)
+ g_free (buf);
+#elif defined(__native_client_codegen__)
+ g_free (buf_alloc);
+#endif
+
#elif defined(TARGET_X86)
guint8 *buf, *code;
#ifdef __native_client_codegen__
#endif
guint8 *labels [3];
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ code = buf = g_malloc (256);
+#elif defined(__native_client_codegen__)
buf_alloc = g_malloc (256 + kNaClAlignment);
code = buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
-#else
- code = buf = g_malloc (256);
#endif
/* Allocate a temporary stack slot */
emit_bytes (acfg, buf, code - buf);
*tramp_size = code - buf;
+
+#if defined(__default_codegen__)
+ g_free (buf);
+#elif defined(__native_client_codegen__)
+ g_free (buf_alloc);
+#endif
+
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code, *code2, *labels [16];
return add_stream_data (&acfg->blob, (char*)data, data_len);
}
+ static guint32
+ add_to_blob_aligned (MonoAotCompile *acfg, const guint8 *data, guint32 data_len, guint32 align)
+ {
+ char buf [4] = {0};
+ guint32 count;
+
+ if (acfg->blob.alloc_size == 0)
+ stream_init (&acfg->blob);
+
+ count = acfg->blob.index % align;
+
+ /* we assume the stream data will be aligned */
+ if (count)
+ add_stream_data (&acfg->blob, buf, 4 - count);
+
+ return add_stream_data (&acfg->blob, (char*)data, data_len);
+ }
+
/*
* emit_offset_table:
*
break;
}
case MONO_WRAPPER_UNKNOWN:
- if (strcmp (method->name, "FastMonitorEnter") == 0)
+ if (strcmp (method->name, "FastMonitorEnter") == 0) {
encode_value (MONO_AOT_WRAPPER_MONO_ENTER, p, &p);
- else if (strcmp (method->name, "FastMonitorExit") == 0)
+ } else if (strcmp (method->name, "FastMonitorExit") == 0) {
encode_value (MONO_AOT_WRAPPER_MONO_EXIT, p, &p);
- else
+ } else if (strcmp (method->name, "PtrToStructure") == 0) {
+ encode_value (MONO_AOT_WRAPPER_PTR_TO_STRUCTURE, p, &p);
+ encode_klass_ref (acfg, method->klass, p, &p);
+ } else if (strcmp (method->name, "StructureToPtr") == 0) {
+ encode_value (MONO_AOT_WRAPPER_STRUCTURE_TO_PTR, p, &p);
+ encode_klass_ref (acfg, method->klass, p, &p);
+ } else {
g_assert_not_reached ();
+ }
break;
case MONO_WRAPPER_SYNCHRONIZED:
case MONO_WRAPPER_MANAGED_TO_NATIVE:
res->plt_offset = acfg->plt_offset;
res->ji = new_ji;
res->symbol = get_plt_symbol (acfg, res->plt_offset, patch_info);
+ res->llvm_symbol = g_strdup_printf ("%s_llvm", res->symbol);
g_hash_table_insert (acfg->patch_to_plt_entry, new_ji, res);
MonoMethodHeader *header;
gboolean skip, direct_call;
guint32 got_slot;
- char direct_call_target [128];
+ char direct_call_target [1024];
if (method) {
header = mono_method_get_header (method);
MonoCompile *callee_cfg = g_hash_table_lookup (acfg->method_to_cfg, patch_info->data.method);
//printf ("DIRECT: %s %s\n", method ? mono_method_full_name (method, TRUE) : "", mono_method_full_name (callee_cfg->method, TRUE));
direct_call = TRUE;
+ g_assert (strlen (callee_cfg->asm_symbol) < 1000);
sprintf (direct_call_target, "%s", callee_cfg->asm_symbol);
patch_info->type = MONO_PATCH_INFO_NONE;
acfg->stats.direct_calls ++;
/* Nullify the patch */
patch_info->type = MONO_PATCH_INFO_NONE;
+ plt_entry->jit_used = TRUE;
}
}
{
char *name1, *name2, *cached;
int i, j, len, count;
-
+
name1 = mono_method_full_name (method, TRUE);
len = strlen (name1);
name2 = malloc (strlen (prefix) + len + 16);
seq_points = cfg->seq_point_info;
- buf_size = header->num_clauses * 256 + debug_info_size + 1024 + (seq_points ? (seq_points->len * 64) : 0);
+ buf_size = header->num_clauses * 256 + debug_info_size + 1024 + (seq_points ? (seq_points->len * 64) : 0) + cfg->gc_map_size;
p = buf = g_malloc (buf_size);
#ifdef MONO_ARCH_HAVE_XP_UNWIND
use_unwind_ops = cfg->unwind_ops != NULL;
#endif
- flags = (jinfo->has_generic_jit_info ? 1 : 0) | (use_unwind_ops ? 2 : 0) | (header->num_clauses ? 4 : 0) | (seq_points ? 8 : 0) | (cfg->compile_llvm ? 16 : 0) | (jinfo->has_try_block_holes ? 32 : 0);
+ flags = (jinfo->has_generic_jit_info ? 1 : 0) | (use_unwind_ops ? 2 : 0) | (header->num_clauses ? 4 : 0) | (seq_points ? 8 : 0) | (cfg->compile_llvm ? 16 : 0) | (jinfo->has_try_block_holes ? 32 : 0) | (cfg->gc_map ? 64 : 0);
encode_value (flags, p, &p);
}
}
-
g_assert (debug_info_size < buf_size);
encode_value (debug_info_size, p, &p);
g_free (debug_info);
}
+ /* GC Map */
+ if (cfg->gc_map) {
+ encode_value (cfg->gc_map_size, p, &p);
+ /* The GC map requires 4 bytes of alignment */
+ while ((gsize)p % 4)
+ p ++;
+ memcpy (p, cfg->gc_map, cfg->gc_map_size);
+ p += cfg->gc_map_size;
+ }
+
acfg->stats.ex_info_size += p - buf;
g_assert (p - buf < buf_size);
/* Emit info */
- cfg->ex_info_offset = add_to_blob (acfg, buf, p - buf);
+ /* The GC Map requires 4 byte alignment */
+ cfg->ex_info_offset = add_to_blob_aligned (acfg, buf, p - buf, cfg->gc_map ? 4 : 1);
g_free (buf);
}
return res;
}
+ static char*
+ get_plt_entry_debug_sym (MonoAotCompile *acfg, MonoJumpInfo *ji, GHashTable *cache)
+ {
+ char *debug_sym;
+
+ switch (ji->type) {
+ case MONO_PATCH_INFO_METHOD:
+ debug_sym = get_debug_sym (ji->data.method, "plt_", cache);
+ break;
+ case MONO_PATCH_INFO_INTERNAL_METHOD:
+ debug_sym = g_strdup_printf ("plt__jit_icall_%s", ji->data.name);
+ break;
+ case MONO_PATCH_INFO_CLASS_INIT:
+ debug_sym = g_strdup_printf ("plt__class_init_%s", mono_type_get_name (&ji->data.klass->byval_arg));
+ sanitize_symbol (debug_sym);
+ break;
+ case MONO_PATCH_INFO_RGCTX_FETCH:
+ debug_sym = g_strdup_printf ("plt__rgctx_fetch_%d", acfg->label_generator ++);
+ break;
+ case MONO_PATCH_INFO_ICALL_ADDR: {
+ char *s = get_debug_sym (ji->data.method, "", cache);
+
+ debug_sym = g_strdup_printf ("plt__icall_native_%s", s);
+ g_free (s);
+ break;
+ }
+ case MONO_PATCH_INFO_JIT_ICALL_ADDR:
+ debug_sym = g_strdup_printf ("plt__jit_icall_native_%s", ji->data.name);
+ break;
+ case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
+ debug_sym = g_strdup_printf ("plt__generic_class_init");
+ break;
+ default:
+ break;
+ }
+
+ return debug_sym;
+ }
+
/*
* Calls made from AOTed code are routed through a table of jumps similar to the
- * ELF PLT (Program Linkage Table). The differences are the following:
- * - the ELF PLT entries make an indirect jump though the GOT so they expect the
- * GOT pointer to be in EBX. We want to avoid this, so our table contains direct
- * jumps. This means the jumps need to be patched when the address of the callee is
- * known. Initially the PLT entries jump to code which transfers control to the
- * AOT runtime through the first PLT entry.
+ * ELF PLT (Program Linkage Table). Initially the PLT entries jump to code which transfers
+ * control to the AOT runtime through a trampoline.
*/
static void
emit_plt (MonoAotCompile *acfg)
sprintf (symbol, "plt");
emit_section_change (acfg, ".text", 0);
- emit_global (acfg, symbol, TRUE);
- emit_alignment (acfg, 16);
+ emit_alignment (acfg, NACL_SIZE(16, kNaClAlignment));
emit_label (acfg, symbol);
emit_label (acfg, acfg->plt_symbol);
for (i = 0; i < acfg->plt_offset; ++i) {
- char label [128];
char *debug_sym = NULL;
MonoPltEntry *plt_entry = NULL;
MonoJumpInfo *ji;
- if (i == 0) {
+ if (i == 0)
/*
- * The first plt entry is used to transfer code to the AOT loader.
+ * The first plt entry is unused.
*/
- arch_emit_plt_entry (acfg, i);
continue;
- }
plt_entry = g_hash_table_lookup (acfg->plt_offset_to_entry, GUINT_TO_POINTER (i));
ji = plt_entry->ji;
- sprintf (label, "%s", plt_entry->symbol);
if (acfg->llvm) {
/*
*/
if (ji && is_direct_callable (acfg, NULL, ji) && !acfg->use_bin_writer) {
MonoCompile *callee_cfg = g_hash_table_lookup (acfg->method_to_cfg, ji->data.method);
- fprintf (acfg->fp, "\n.set %s, %s\n", label, callee_cfg->asm_symbol);
+
+ if (acfg->thumb_mixed && !callee_cfg->compile_llvm) {
+ /* LLVM calls the PLT entries using bl, so emit a stub */
+ emit_label (acfg, plt_entry->llvm_symbol);
+ fprintf (acfg->fp, ".thumb_func\n");
+ fprintf (acfg->fp, "bx pc\n");
+ fprintf (acfg->fp, "nop\n");
+ fprintf (acfg->fp, ".arm\n");
+ fprintf (acfg->fp, "b %s\n", callee_cfg->asm_symbol);
+ } else {
+ fprintf (acfg->fp, "\n.set %s, %s\n", plt_entry->llvm_symbol, callee_cfg->asm_symbol);
+ }
continue;
}
}
- emit_label (acfg, label);
+ if (acfg->aot_opts.write_symbols)
+ plt_entry->debug_sym = get_plt_entry_debug_sym (acfg, ji, cache);
+ debug_sym = plt_entry->debug_sym;
- if (acfg->aot_opts.write_symbols) {
- switch (ji->type) {
- case MONO_PATCH_INFO_METHOD:
- debug_sym = get_debug_sym (ji->data.method, "plt_", cache);
- break;
- case MONO_PATCH_INFO_INTERNAL_METHOD:
- debug_sym = g_strdup_printf ("plt__jit_icall_%s", ji->data.name);
- break;
- case MONO_PATCH_INFO_CLASS_INIT:
- debug_sym = g_strdup_printf ("plt__class_init_%s", mono_type_get_name (&ji->data.klass->byval_arg));
- sanitize_symbol (debug_sym);
- break;
- case MONO_PATCH_INFO_RGCTX_FETCH:
- debug_sym = g_strdup_printf ("plt__rgctx_fetch_%d", acfg->label_generator ++);
- break;
- case MONO_PATCH_INFO_ICALL_ADDR: {
- char *s = get_debug_sym (ji->data.method, "", cache);
-
- debug_sym = g_strdup_printf ("plt__icall_native_%s", s);
- g_free (s);
- break;
- }
- case MONO_PATCH_INFO_JIT_ICALL_ADDR:
- debug_sym = g_strdup_printf ("plt__jit_icall_native_%s", ji->data.name);
- break;
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
- debug_sym = g_strdup_printf ("plt__generic_class_init");
- break;
- default:
- break;
+ if (acfg->thumb_mixed && !plt_entry->jit_used)
+ /* Emit only a thumb version */
+ continue;
+
+ if (!acfg->thumb_mixed)
+ emit_label (acfg, plt_entry->llvm_symbol);
+
+ if (debug_sym) {
+ emit_local_symbol (acfg, debug_sym, NULL, TRUE);
+ emit_label (acfg, debug_sym);
+ }
+
+ emit_label (acfg, plt_entry->symbol);
+
+ arch_emit_plt_entry (acfg, i);
+
+ if (debug_sym)
+ emit_symbol_size (acfg, debug_sym, ".");
+ }
+
+ if (acfg->thumb_mixed) {
+ /*
+ * Emit a separate set of PLT entries using thumb2 which is called by LLVM generated
+ * code.
+ */
+ for (i = 0; i < acfg->plt_offset; ++i) {
+ char *debug_sym = NULL;
+ MonoPltEntry *plt_entry = NULL;
+ MonoJumpInfo *ji;
+
+ if (i == 0)
+ continue;
+
+ plt_entry = g_hash_table_lookup (acfg->plt_offset_to_entry, GUINT_TO_POINTER (i));
+ ji = plt_entry->ji;
+
+ if (ji && is_direct_callable (acfg, NULL, ji) && !acfg->use_bin_writer)
+ continue;
+
+ /* Skip plt entries not actually called by LLVM code */
+ if (!plt_entry->llvm_used)
+ continue;
+
+ if (acfg->aot_opts.write_symbols) {
+ if (plt_entry->debug_sym)
+ debug_sym = g_strdup_printf ("%s_thumb", plt_entry->debug_sym);
}
if (debug_sym) {
emit_local_symbol (acfg, debug_sym, NULL, TRUE);
emit_label (acfg, debug_sym);
}
- }
- arch_emit_plt_entry (acfg, i);
+ emit_label (acfg, plt_entry->llvm_symbol);
- if (debug_sym) {
- emit_symbol_size (acfg, debug_sym, ".");
- g_free (debug_sym);
+ arch_emit_llvm_plt_entry (acfg, i);
+
+ if (debug_sym) {
+ emit_symbol_size (acfg, debug_sym, ".");
+ g_free (debug_sym);
+ }
}
}
emit_symbol_size (acfg, acfg->plt_symbol, ".");
sprintf (symbol, "plt_end");
- emit_global (acfg, symbol, TRUE);
emit_label (acfg, symbol);
g_hash_table_destroy (cache);
g_assert_not_reached ();
}
- emit_global (acfg, symbol, TRUE);
emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
emit_label (acfg, symbol);
char*
mono_aot_get_method_name (MonoCompile *cfg)
- {
- guint32 method_index = get_method_index (llvm_acfg, cfg->orig_method);
-
- return g_strdup_printf ("m_%x", method_index);
- }
-
- char*
- mono_aot_get_method_debug_name (MonoCompile *cfg)
{
return get_debug_sym (cfg->orig_method, "", llvm_acfg->method_label_hash);
}
return NULL;
plt_entry = get_plt_entry (llvm_acfg, ji);
+ plt_entry->llvm_used = TRUE;
- return g_strdup_printf (plt_entry->symbol);
+ return g_strdup_printf (plt_entry->llvm_symbol);
}
MonoJumpInfo*
if (!acfg->llc_args)
acfg->llc_args = g_string_new ("");
- #if !LLVM_CHECK_VERSION(2, 8)
- /* LLVM 2.8 removed the -f flag ??? */
- g_string_append (acfg->llc_args, " -f");
- #endif
+ /* Verbose asm slows down llc greatly */
+ g_string_append (acfg->llc_args, " -asm-verbose=false");
if (acfg->aot_opts.mtriple)
g_string_append_printf (acfg->llc_args, " -mtriple=%s", acfg->aot_opts.mtriple);
+ unlink (acfg->tmpfname);
+
command = g_strdup_printf ("llc %s -relocation-model=pic -unwind-tables -disable-gnu-eh-frame -enable-mono-eh-frame -o %s temp.opt.bc", acfg->llc_args->str, acfg->tmpfname);
printf ("Executing llc: %s\n", command);
*/
sprintf (symbol, "methods");
emit_section_change (acfg, ".text", 0);
- emit_global (acfg, symbol, TRUE);
emit_alignment (acfg, 8);
if (acfg->llvm) {
for (i = 0; i < acfg->nmethods; ++i) {
* Emit some padding so the local symbol for the first method doesn't have the
* same address as 'methods'.
*/
+#if defined(__default_codegen__)
emit_zero_bytes (acfg, 16);
+#elif defined(__native_client_codegen__)
+ {
+ const int kPaddingSize = 16;
+ guint8 pad_buffer[kPaddingSize];
+ mono_arch_nacl_pad (pad_buffer, kPaddingSize);
+ emit_bytes (acfg, pad_buffer, kPaddingSize);
+ }
+#endif
+
for (l = acfg->method_order; l != NULL; l = l->next) {
MonoCompile *cfg;
sprintf (symbol, "methods_end");
emit_section_change (acfg, ".text", 0);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
sprintf (symbol, "code_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
sprintf (symbol, "method_info_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
name = NULL;
if (method->wrapper_type) {
+ gboolean encode_ref = FALSE;
+
/*
* We encode some wrappers using their name, since encoding them
- * directly would be difficult. This also avoids creating the wrapper
- * methods at runtime, since they are not needed anyway.
+ * directly would be difficult. This works because at runtime, we only need to
+ * check whenever a method ref matches an existing MonoMethod. The downside is
+ * that the method names are large, so we use the binary encoding if possible.
*/
switch (method->wrapper_type) {
case MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK:
case MONO_WRAPPER_SYNCHRONIZED:
- /* encode_method_ref () can handle these */
+ encode_ref = TRUE;
break;
+ case MONO_WRAPPER_MANAGED_TO_NATIVE:
+ /* Skip JIT icall wrappers */
+ if (!strstr (method->name, "__icall_wrapper"))
+ encode_ref = TRUE;
+ break;
+ case MONO_WRAPPER_UNKNOWN:
+ if (!strcmp (method->name, "PtrToStructure") || !strcmp (method->name, "StructureToPtr"))
+ encode_ref = TRUE;
+ break;
case MONO_WRAPPER_RUNTIME_INVOKE:
if (mono_marshal_method_from_wrapper (method) != method && !strstr (method->name, "virtual"))
/* Direct wrapper, encode normally */
- break;
- /* Fall through */
+ encode_ref = TRUE;
+ break;
default:
- name = mono_aot_wrapper_name (method);
break;
}
+
+ if (!encode_ref)
+ name = mono_aot_wrapper_name (method);
}
if (name) {
/* Emit the table */
sprintf (symbol, "extra_method_table");
emit_section_change (acfg, RODATA_SECT, 0);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
*/
sprintf (symbol, "extra_method_info_offsets");
emit_section_change (acfg, RODATA_SECT, 0);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
sprintf (symbol, "ex_info_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
emit_section_change (acfg, RODATA_SECT, 1);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
- emit_global (acfg, symbol, FALSE);
for (i = 0; i < acfg->unwind_ops->len; ++i) {
guint32 index = GPOINTER_TO_UINT (g_ptr_array_index (acfg->unwind_ops, i));
acfg->stats.unwind_info_size += (p - buf) + unwind_info_len;
}
-
- /*
- * Emit a reference to the mono_eh_frame table created by our modified LLVM compiler.
- */
- if (acfg->llvm) {
- sprintf (symbol, "mono_eh_frame_addr");
- emit_section_change (acfg, ".data", 0);
- emit_global (acfg, symbol, FALSE);
- emit_alignment (acfg, 8);
- emit_label (acfg, symbol);
- emit_pointer (acfg, "mono_eh_frame");
- }
}
static void
sprintf (symbol, "class_info_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
/* Emit the table */
sprintf (symbol, "class_name_table");
emit_section_change (acfg, RODATA_SECT, 0);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
* So we emit it at once, and reference its elements by an index.
*/
- sprintf (symbol, "mono_image_table");
+ sprintf (symbol, "image_table");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
/* Emit got_info_offsets table */
sprintf (symbol, "got_info_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
sprintf (symbol, "got_end");
emit_label (acfg, symbol);
}
-
- sprintf (symbol, "mono_aot_got_addr");
- emit_section_change (acfg, ".data", 0);
- emit_global (acfg, symbol, FALSE);
- emit_alignment (acfg, 8);
- emit_label (acfg, symbol);
- emit_pointer (acfg, acfg->got_symbol);
}
typedef struct GlobalsTableEntry {
{
char *build_info;
- emit_string_symbol (acfg, "mono_assembly_guid" , acfg->image->guid);
-
- emit_string_symbol (acfg, "mono_aot_version", MONO_AOT_FILE_VERSION);
+ emit_local_string_symbol (acfg, "assembly_guid" , acfg->image->guid);
if (acfg->aot_opts.bind_to_runtime_version) {
build_info = mono_get_runtime_build_info ();
- emit_string_symbol (acfg, "mono_runtime_version", build_info);
+ emit_local_string_symbol (acfg, "runtime_version", build_info);
g_free (build_info);
} else {
- emit_string_symbol (acfg, "mono_runtime_version", "");
+ emit_local_string_symbol (acfg, "runtime_version", "");
}
/*
sprintf (symbol, "mem_end");
emit_section_change (acfg, ".text", 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
}
emit_global (acfg, symbol, FALSE);
/* The data emitted here must match MonoAotFileInfo. */
+
+ emit_int32 (acfg, MONO_AOT_FILE_VERSION);
+ emit_int32 (acfg, 0);
+
+ /*
+ * We emit pointers to our data structures instead of emitting global symbols which
+ * point to them, to reduce the number of globals, and because using globals leads to
+ * various problems (i.e. arm/thumb).
+ */
+ emit_pointer (acfg, acfg->got_symbol);
+ emit_pointer (acfg, "methods");
+ if (acfg->llvm) {
+ /*
+ * Emit a reference to the mono_eh_frame table created by our modified LLVM compiler.
+ */
+ emit_pointer (acfg, "mono_eh_frame");
+ } else {
+ emit_pointer (acfg, NULL);
+ }
+ emit_pointer (acfg, "blob");
+ emit_pointer (acfg, "class_name_table");
+ emit_pointer (acfg, "class_info_offsets");
+ emit_pointer (acfg, "method_info_offsets");
+ emit_pointer (acfg, "ex_info_offsets");
+ emit_pointer (acfg, "code_offsets");
+ emit_pointer (acfg, "extra_method_info_offsets");
+ emit_pointer (acfg, "extra_method_table");
+ emit_pointer (acfg, "got_info_offsets");
+ emit_pointer (acfg, "methods_end");
+ emit_pointer (acfg, "unwind_info");
+ emit_pointer (acfg, "mem_end");
+ emit_pointer (acfg, "image_table");
+ emit_pointer (acfg, "plt");
+ emit_pointer (acfg, "plt_end");
+ emit_pointer (acfg, "assembly_guid");
+ emit_pointer (acfg, "runtime_version");
+ if (acfg->num_trampoline_got_entries) {
+ emit_pointer (acfg, "specific_trampolines");
+ emit_pointer (acfg, "static_rgctx_trampolines");
+ emit_pointer (acfg, "imt_thunks");
+ } else {
+ emit_pointer (acfg, NULL);
+ emit_pointer (acfg, NULL);
+ emit_pointer (acfg, NULL);
+ }
+ if (acfg->thumb_mixed) {
+ emit_pointer (acfg, "thumb_end");
+ } else {
+ emit_pointer (acfg, NULL);
+ }
+
emit_int32 (acfg, acfg->plt_got_offset_base);
emit_int32 (acfg, (int)(acfg->got_offset * sizeof (gpointer)));
emit_int32 (acfg, acfg->plt_offset);
sprintf (symbol, "blob");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
#endif
#ifdef __native_client_codegen__
+#if defined(TARGET_AMD64)
+#define AS_NAME "nacl64-as"
+#else
#define AS_NAME "nacl-as"
+#endif
#else
#define AS_NAME "as"
#endif
#endif
acfg->num_trampolines [MONO_AOT_TRAMP_IMT_THUNK] = acfg->aot_opts.full_aot ? acfg->aot_opts.nimt_trampolines : 0;
+ acfg->temp_prefix = img_writer_get_temp_label_prefix (NULL);
+
+ arch_init (acfg);
+
acfg->got_symbol_base = g_strdup_printf ("%smono_aot_%s_got", acfg->llvm_label_prefix, acfg->image->assembly->aname.name);
acfg->plt_symbol = g_strdup_printf ("%smono_aot_%s_plt", acfg->llvm_label_prefix, acfg->image->assembly->aname.name);
*p = '_';
}
- acfg->temp_prefix = img_writer_get_temp_label_prefix (NULL);
-
- /*
- * The prefix LLVM likes to put in front of symbol names on darwin.
- * The mach-os specs require this for globals, but LLVM puts them in front of all
- * symbols. We need to handle this, since we need to refer to LLVM generated
- * symbols.
- */
- acfg->llvm_label_prefix = "";
-
- arch_init (acfg);
-
acfg->method_index = 1;
collect_methods (acfg);
MonoCompile *cfg = acfg->cfgs [i];
int method_index = get_method_index (acfg, cfg->orig_method);
- cfg->asm_symbol = g_strdup_printf ("%s%sm_%x", acfg->temp_prefix, acfg->llvm_label_prefix, method_index);
+ if (COMPILE_LLVM (cfg))
+ cfg->asm_symbol = g_strdup_printf ("%s%s", acfg->llvm_label_prefix, cfg->llvm_method_name);
+ else
+ cfg->asm_symbol = g_strdup_printf ("%s%sm_%x", acfg->temp_prefix, acfg->llvm_label_prefix, method_index);
}
}
if (acfg->dwarf)
mono_dwarf_writer_emit_base_info (acfg->dwarf, mono_unwind_get_cie_program ());
+ if (acfg->thumb_mixed) {
+ char symbol [256];
+ /*
+ * This global symbol marks the end of THUMB code, and the beginning of ARM
+ * code generated by our JIT.
+ */
+ sprintf (symbol, "thumb_end");
+ emit_section_change (acfg, ".text", 0);
+ emit_label (acfg, symbol);
+ fprintf (acfg->fp, ".skip 16\n");
+
+ fprintf (acfg->fp, ".arm\n");
+ }
+
emit_code (acfg);
emit_info (acfg);
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/monitor.h>
#include <mono/metadata/threads-types.h>
+ #include <mono/metadata/mono-endian.h>
#include <mono/utils/mono-logger-internal.h>
#include <mono/utils/mono-mmap.h>
#include "mono/utils/mono-compiler.h"
guint32 *extra_method_table;
guint32 *extra_method_info_offsets;
guint8 *unwind_info;
+ guint8 *thumb_end;
/* Points to the mono EH data created by LLVM */
guint8 *mono_eh_frame;
MonoMethod *orig_method;
int subtype = decode_value (p, &p);
- if (subtype == MONO_AOT_WRAPPER_MONO_ENTER)
- desc = mono_method_desc_new ("Monitor:Enter", FALSE);
- else if (subtype == MONO_AOT_WRAPPER_MONO_EXIT)
- desc = mono_method_desc_new ("Monitor:Exit", FALSE);
- else
- g_assert_not_reached ();
- orig_method = mono_method_desc_search_in_class (desc, mono_defaults.monitor_class);
- g_assert (orig_method);
- mono_method_desc_free (desc);
- ref->method = mono_monitor_get_fast_path (orig_method);
+ if (subtype == MONO_AOT_WRAPPER_PTR_TO_STRUCTURE || subtype == MONO_AOT_WRAPPER_STRUCTURE_TO_PTR) {
+ MonoClass *klass = decode_klass_ref (module, p, &p);
+
+ if (!klass)
+ return FALSE;
+
+ g_assert (target);
+ if (klass != target->klass)
+ return FALSE;
+
+ if (subtype == MONO_AOT_WRAPPER_PTR_TO_STRUCTURE) {
+ if (strcmp (target->name, "PtrToStructure"))
+ return FALSE;
+ ref->method = mono_marshal_get_ptr_to_struct (klass);
+ } else {
+ if (strcmp (target->name, "StructureToPtr"))
+ return FALSE;
+ ref->method = mono_marshal_get_struct_to_ptr (klass);
+ }
+ } else {
+ if (subtype == MONO_AOT_WRAPPER_MONO_ENTER)
+ desc = mono_method_desc_new ("Monitor:Enter", FALSE);
+ else if (subtype == MONO_AOT_WRAPPER_MONO_EXIT)
+ desc = mono_method_desc_new ("Monitor:Exit", FALSE);
+ else
+ g_assert_not_reached ();
+ orig_method = mono_method_desc_search_in_class (desc, mono_defaults.monitor_class);
+ g_assert (orig_method);
+ mono_method_desc_free (desc);
+ ref->method = mono_monitor_get_fast_path (orig_method);
+ }
break;
}
case MONO_WRAPPER_RUNTIME_INVOKE: {
}
break;
}
+ case MONO_WRAPPER_MANAGED_TO_NATIVE: {
+ MonoMethod *m = decode_resolve_method_ref (module, p, &p);
+
+ if (!m)
+ return FALSE;
+
+ /* This should only happen when looking for an extra method */
+ g_assert (target);
+ if (mono_marshal_method_from_wrapper (target) == m)
+ ref->method = target;
+ else
+ return FALSE;
+ break;
+ }
default:
g_assert_not_reached ();
}
}
}
+ static gboolean
+ check_usable (MonoAssembly *assembly, MonoAotFileInfo *info, char **out_msg)
+ {
+ char *build_info;
+ char *msg = NULL;
+ gboolean usable = TRUE;
+ gboolean full_aot;
+ guint8 *blob;
+
+ if (strcmp (assembly->image->guid, info->assembly_guid)) {
+ msg = g_strdup_printf ("doesn't match assembly");
+ usable = FALSE;
+ }
+
+ build_info = mono_get_runtime_build_info ();
+ if (strlen (info->runtime_version) > 0 && strcmp (info->runtime_version, build_info)) {
+ msg = g_strdup_printf ("compiled against runtime version '%s' while this runtime has version '%s'", info->runtime_version, build_info);
+ usable = FALSE;
+ }
+ g_free (build_info);
+
+ full_aot = info->flags & MONO_AOT_FILE_FLAG_FULL_AOT;
+
+ if (mono_aot_only && !full_aot) {
+ msg = g_strdup_printf ("not compiled with --aot=full");
+ usable = FALSE;
+ }
+ if (!mono_aot_only && full_aot) {
+ msg = g_strdup_printf ("compiled with --aot=full");
+ usable = FALSE;
+ }
+ #ifdef TARGET_ARM
+ /* mono_arch_find_imt_method () requires this */
+ if ((info->flags & MONO_AOT_FILE_FLAG_WITH_LLVM) && !mono_use_llvm) {
+ msg = g_strdup_printf ("compiled against LLVM");
+ usable = FALSE;
+ }
+ #endif
+ if (mini_get_debug_options ()->mdb_optimizations && !(info->flags & MONO_AOT_FILE_FLAG_DEBUG) && !full_aot) {
+ msg = g_strdup_printf ("not compiled for debugging");
+ usable = FALSE;
+ }
+
+ blob = info->blob;
+
+ if (info->gc_name_index != -1) {
+ char *gc_name = (char*)&blob [info->gc_name_index];
+ const char *current_gc_name = mono_gc_get_gc_name ();
+
+ if (strcmp (current_gc_name, gc_name) != 0) {
+ msg = g_strdup_printf ("compiled against GC %s, while the current runtime uses GC %s.\n", gc_name, current_gc_name);
+ usable = FALSE;
+ }
+ }
+
+ *out_msg = msg;
+ return usable;
+ }
+
static void
load_aot_module (MonoAssembly *assembly, gpointer user_data)
{
MonoAotModule *amodule;
MonoDl *sofile;
gboolean usable = TRUE;
- char *saved_guid = NULL;
- char *aot_version = NULL;
- char *runtime_version, *build_info;
- char *opt_flags = NULL;
+ char *version_symbol = NULL;
+ char *msg = NULL;
gpointer *globals;
- gboolean full_aot = FALSE;
- MonoAotFileInfo *file_info = NULL;
- int i;
- gpointer *got_addr;
+ MonoAotFileInfo *info = NULL;
+ int i, version;
guint8 *blob;
+ gboolean do_load_image = TRUE;
if (mono_compile_aot)
return;
return;
}
- find_symbol (sofile, globals, "mono_assembly_guid", (gpointer *) &saved_guid);
- find_symbol (sofile, globals, "mono_aot_version", (gpointer *) &aot_version);
- find_symbol (sofile, globals, "mono_aot_opt_flags", (gpointer *)&opt_flags);
- find_symbol (sofile, globals, "mono_runtime_version", (gpointer *)&runtime_version);
- find_symbol (sofile, globals, "mono_aot_got_addr", (gpointer *)&got_addr);
+ find_symbol (sofile, globals, "mono_aot_version", (gpointer *) &version_symbol);
+ find_symbol (sofile, globals, "mono_aot_file_info", (gpointer*)&info);
- if (!aot_version || strcmp (aot_version, MONO_AOT_FILE_VERSION)) {
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT module %s has wrong file format version (expected %s got %s)\n", aot_name, MONO_AOT_FILE_VERSION, aot_version);
- usable = FALSE;
- }
- else {
- if (!saved_guid || strcmp (assembly->image->guid, saved_guid)) {
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT module %s is out of date.\n", aot_name);
- usable = FALSE;
- }
- }
-
- build_info = mono_get_runtime_build_info ();
- if (!runtime_version || ((strlen (runtime_version) > 0 && strcmp (runtime_version, build_info)))) {
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT module %s is compiled against runtime version '%s' while this runtime has version '%s'.\n", aot_name, runtime_version, build_info);
- usable = FALSE;
- }
- g_free (build_info);
-
- find_symbol (sofile, globals, "mono_aot_file_info", (gpointer*)&file_info);
- g_assert (file_info);
-
- full_aot = ((MonoAotFileInfo*)file_info)->flags & MONO_AOT_FILE_FLAG_FULL_AOT;
-
- if (mono_aot_only && !full_aot) {
- fprintf (stderr, "Can't use AOT image '%s' in aot-only mode because it is not compiled with --aot=full.\n", aot_name);
- exit (1);
- }
- if (!mono_aot_only && full_aot) {
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT module %s is compiled with --aot=full.\n", aot_name);
- usable = FALSE;
- }
-
- /* This is no longer needed, LLVM and non-LLVM runtimes should be compatible.
- if ((((MonoAotFileInfo*)file_info)->flags & MONO_AOT_FILE_FLAG_WITH_LLVM) && !mono_use_llvm) {
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT module %s is compiled with LLVM.\n", aot_name);
- usable = FALSE;
+ if (version_symbol) {
+ /* Old file format */
+ version = atoi (version_symbol);
+ } else {
+ g_assert (info);
+ version = info->version;
}
- */
- if (mini_get_debug_options ()->mdb_optimizations && !(file_info->flags & MONO_AOT_FILE_FLAG_DEBUG) && !full_aot) {
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT module %s is not compiled for debugging.\n", aot_name);
+ if (version != MONO_AOT_FILE_VERSION) {
+ msg = g_strdup_printf ("wrong file format version (expected %d got %d)", MONO_AOT_FILE_VERSION, version);
usable = FALSE;
- }
-
- find_symbol (sofile, globals, "blob", (gpointer*)&blob);
-
- if (usable && ((MonoAotFileInfo*)file_info)->gc_name_index != -1) {
- char *gc_name = (char*)&blob [((MonoAotFileInfo*)file_info)->gc_name_index];
- const char *current_gc_name = mono_gc_get_gc_name ();
-
- if (strcmp (current_gc_name, gc_name) != 0) {
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT module %s is compiled against GC %s, while the current runtime uses GC %s.\n", aot_name, gc_name, current_gc_name);
- usable = FALSE;
- }
+ } else {
+ usable = check_usable (assembly, info, &msg);
}
if (!usable) {
if (mono_aot_only) {
- fprintf (stderr, "Failed to load AOT module '%s' while running in aot-only mode.\n", aot_name);
+ fprintf (stderr, "Failed to load AOT module '%s' while running in aot-only mode: %s.\n", aot_name, msg);
exit (1);
} else {
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT module %s is unusable.\n", aot_name);
+ mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT module %s is unusable: %s.\n", aot_name, msg);
}
+ g_free (msg);
g_free (aot_name);
if (sofile)
mono_dl_close (sofile);
return;
}
+ blob = info->blob;
+
amodule = g_new0 (MonoAotModule, 1);
amodule->aot_name = aot_name;
amodule->assembly = assembly;
- memcpy (&amodule->info, file_info, sizeof (*file_info));
+ memcpy (&amodule->info, info, sizeof (*info));
- amodule->got = *got_addr;
+ amodule->got = amodule->info.got;
amodule->got [0] = assembly->image;
amodule->globals = globals;
amodule->sofile = sofile;
guint32 table_len, i;
char *table = NULL;
- find_symbol (sofile, globals, "mono_image_table", (gpointer *)&table);
+ table = info->image_table;
g_assert (table);
table_len = *(guint32*)table;
}
}
- /* Read method and method_info tables */
- find_symbol (sofile, globals, "code_offsets", (gpointer*)&amodule->code_offsets);
- find_symbol (sofile, globals, "methods", (gpointer*)&amodule->code);
- find_symbol (sofile, globals, "methods_end", (gpointer*)&amodule->code_end);
- find_symbol (sofile, globals, "method_info_offsets", (gpointer*)&amodule->method_info_offsets);
- find_symbol (sofile, globals, "ex_info_offsets", (gpointer*)&amodule->ex_info_offsets);
- find_symbol (sofile, globals, "class_info_offsets", (gpointer*)&amodule->class_info_offsets);
- find_symbol (sofile, globals, "class_name_table", (gpointer *)&amodule->class_name_table);
- find_symbol (sofile, globals, "extra_method_table", (gpointer *)&amodule->extra_method_table);
- find_symbol (sofile, globals, "extra_method_info_offsets", (gpointer *)&amodule->extra_method_info_offsets);
- find_symbol (sofile, globals, "got_info_offsets", (gpointer*)&amodule->got_info_offsets);
- find_symbol (sofile, globals, "specific_trampolines", (gpointer*)&(amodule->trampolines [MONO_AOT_TRAMP_SPECIFIC]));
- find_symbol (sofile, globals, "static_rgctx_trampolines", (gpointer*)&(amodule->trampolines [MONO_AOT_TRAMP_STATIC_RGCTX]));
- find_symbol (sofile, globals, "imt_thunks", (gpointer*)&(amodule->trampolines [MONO_AOT_TRAMP_IMT_THUNK]));
- find_symbol (sofile, globals, "unwind_info", (gpointer)&amodule->unwind_info);
- find_symbol (sofile, globals, "mem_end", (gpointer*)&amodule->mem_end);
-
+ amodule->code_offsets = info->code_offsets;
+ amodule->code = info->methods;
+ #ifdef TARGET_ARM
+ /* Mask out thumb interop bit */
+ amodule->code = (void*)((mgreg_t)amodule->code & ~1);
+ #endif
+ amodule->code_end = info->methods_end;
+ amodule->method_info_offsets = info->method_info_offsets;
+ amodule->ex_info_offsets = info->ex_info_offsets;
+ amodule->class_info_offsets = info->class_info_offsets;
+ amodule->class_name_table = info->class_name_table;
+ amodule->extra_method_table = info->extra_method_table;
+ amodule->extra_method_info_offsets = info->extra_method_info_offsets;
+ amodule->got_info_offsets = info->got_info_offsets;
+ amodule->unwind_info = info->unwind_info;
+ amodule->mem_end = info->mem_end;
amodule->mem_begin = amodule->code;
-
- find_symbol (sofile, globals, "plt", (gpointer*)&amodule->plt);
- find_symbol (sofile, globals, "plt_end", (gpointer*)&amodule->plt_end);
-
- if (file_info->flags & MONO_AOT_FILE_FLAG_WITH_LLVM) {
- gpointer *p = NULL;
- find_symbol (sofile, globals, "mono_eh_frame_addr", (gpointer*)&p);
- g_assert (p);
- amodule->mono_eh_frame = *p;
- }
+ amodule->plt = info->plt;
+ amodule->plt_end = info->plt_end;
+ amodule->mono_eh_frame = info->mono_eh_frame;
+ amodule->trampolines [MONO_AOT_TRAMP_SPECIFIC] = info->specific_trampolines;
+ amodule->trampolines [MONO_AOT_TRAMP_STATIC_RGCTX] = info->static_rgctx_trampolines;
+ amodule->trampolines [MONO_AOT_TRAMP_IMT_THUNK] = info->imt_thunks;
+ amodule->thumb_end = info->thumb_end;
if (make_unreadable) {
#ifndef TARGET_WIN32
* non-lazily, since we can't handle out-of-date errors later.
* The cached class info also depends on the exact assemblies.
*/
- for (i = 0; i < amodule->image_table_len; ++i)
- load_image (amodule, i, FALSE);
+#if defined(__native_client__)
+ /* TODO: Don't 'load_image' on mscorlib due to a */
+ /* recursive loading problem. This should be */
+ /* removed if mscorlib is loaded from disk. */
+ if (strncmp(assembly->aname.name, "mscorlib", 8)) {
+ do_load_image = TRUE;
+ } else {
+ do_load_image = FALSE;
+ }
+#endif
+ if (do_load_image) {
+ for (i = 0; i < amodule->image_table_len; ++i)
+ load_image (amodule, i, FALSE);
+ }
if (amodule->out_of_date) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_AOT, "AOT Module %s is unusable because a dependency is out-of-date.\n", assembly->image->name);
/* Count number of nested clauses */
nested_len = 0;
for (i = 0; i < ei_len; ++i) {
- gint32 cindex1 = *(gint32*)type_info [i];
+ /* This might be unaligned */
+ gint32 cindex1 = read32 (type_info [i]);
GSList *l;
for (l = nesting [cindex1]; l; l = l->next) {
gint32 nesting_cindex = GPOINTER_TO_INT (l->data);
for (j = 0; j < ei_len; ++j) {
- gint32 cindex2 = *(gint32*)type_info [j];
+ gint32 cindex2 = read32 (type_info [j]);
if (cindex2 == nesting_cindex)
nested_len ++;
* compiler, we have to combine that with the information produced by LLVM
*/
/* The type_info entries contain IL clause indexes */
- int clause_index = *(gint32*)type_info [i];
+ int clause_index = read32 (type_info [i]);
MonoJitExceptionInfo *jei = &jinfo->clauses [i];
MonoJitExceptionInfo *orig_jei = &clauses [clause_index];
jei->try_start = ei [i].try_start;
jei->try_end = ei [i].try_end;
jei->handler_start = ei [i].handler_start;
+
+ /* Make sure we transition to thumb when a handler starts */
+ if (amodule->thumb_end && (guint8*)jei->handler_start < amodule->thumb_end)
+ jei->handler_start = (void*)((mgreg_t)jei->handler_start + 1);
}
/* See exception_cb () in mini-llvm.c as to why this is needed */
nindex = ei_len;
for (i = 0; i < ei_len; ++i) {
- gint32 cindex1 = *(gint32*)type_info [i];
+ gint32 cindex1 = read32 (type_info [i]);
GSList *l;
for (l = nesting [cindex1]; l; l = l->next) {
gint32 nesting_cindex = GPOINTER_TO_INT (l->data);
for (j = 0; j < ei_len; ++j) {
- gint32 cindex2 = *(gint32*)type_info [j];
+ gint32 cindex2 = read32 (type_info [j]);
if (cindex2 == nesting_cindex) {
/*
MonoJitInfo *jinfo;
guint used_int_regs, flags;
gboolean has_generic_jit_info, has_dwarf_unwind_info, has_clauses, has_seq_points, has_try_block_holes;
- gboolean from_llvm;
+ gboolean from_llvm, has_gc_map;
guint8 *p;
- int generic_info_size, try_holes_info_size, num_holes, this_reg, this_offset;
+ int generic_info_size, try_holes_info_size, num_holes, this_reg = 0, this_offset = 0;
/* Load the method info from the AOT file */
has_seq_points = (flags & 8) != 0;
from_llvm = (flags & 16) != 0;
has_try_block_holes = (flags & 32) != 0;
+ has_gc_map = (flags & 64) != 0;
if (has_dwarf_unwind_info) {
guint32 offset;
/* Load debug info */
buf_len = decode_value (p, &p);
mono_debug_add_aot_method (domain, method, code, p, buf_len);
+ p += buf_len;
+
+ if (has_gc_map) {
+ int map_size = decode_value (p, &p);
+ /* The GC map requires 4 bytes of alignment */
+ while ((guint64)(gsize)p % 4)
+ p ++;
+ jinfo->gc_info = p;
+ p += map_size;
+ }
if (amodule != jinfo->method->klass->image->aot_module) {
mono_aot_lock ();
g_hash_table_insert (ji_to_amodule, jinfo, amodule);
mono_aot_unlock ();
}
-
+
return jinfo;
}
/* Compute a sorted table mapping code offsets to method indexes. */
if (!amodule->sorted_code_offsets) {
-
code_offsets = g_new0 (gint32, nmethods * 2);
offsets_len = 0;
for (i = 0; i < nmethods; ++i) {
info = &amodule->blob [mono_aot_get_offset (amodule->method_info_offsets, method_index)];
+ if (amodule->thumb_end && code < amodule->thumb_end) {
+ /* Convert this into a thumb address */
+ g_assert ((amodule->code_offsets [method_index] & 0x1) == 0);
+ code = &amodule->code [amodule->code_offsets [method_index] + 1];
+ }
+
mono_aot_lock ();
if (!amodule->methods_loaded)
amodule->methods_loaded = g_new0 (guint32, amodule->info.nmethods + 1);
guint8*
mono_aot_get_plt_entry (guint8 *code)
{
- MonoAotModule *aot_module = find_aot_module (code);
+ MonoAotModule *amodule = find_aot_module (code);
+ guint8 *target = NULL;
- if (!aot_module)
+ if (!amodule)
return NULL;
- #ifdef MONO_ARCH_AOT_SUPPORTED
- {
- guint8 *target = mono_arch_get_call_target (code);
-
- if ((target >= (guint8*)(aot_module->plt)) && (target < (guint8*)(aot_module->plt_end)))
- return target;
+ #ifdef TARGET_ARM
+ if (amodule->thumb_end && code < amodule->thumb_end) {
+ return mono_arm_get_thumb_plt_entry (code);
}
+ #endif
+
+ #ifdef MONO_ARCH_AOT_SUPPORTED
+ target = mono_arch_get_call_target (code);
#else
g_assert_not_reached ();
#endif
- return NULL;
+ if ((target >= (guint8*)(amodule->plt)) && (target < (guint8*)(amodule->plt_end)))
+ return target;
+ else
+ return NULL;
}
/*
#
# See the code in mini-x86.c for more details on how the specifiers are used.
#
+#
+# Native Client Note: NaCl call sequences do not really reach > 32 bytes but
+# the maximum length can be high, so if we get unlucky and wind up trying to
+# emit a call sequence such that we are one or two bytes too long, we need to
+# pad out almost an entire 32 bytes.
+#
+
break: len:2
jmp: len:120
tailcall: len:120 clob:c
label: len:0
seq_point: len:25
-long_add: dest:i src1:i src2:i len:3 clob:1
-long_sub: dest:i src1:i src2:i len:3 clob:1
+long_add: dest:i src1:i src2:i len:3 clob:1 nacl:6
+long_sub: dest:i src1:i src2:i len:3 clob:1 nacl:6
long_mul: dest:i src1:i src2:i len:4 clob:1
long_div: dest:a src1:a src2:i len:16 clob:d
long_div_un: dest:a src1:a src2:i len:16 clob:d
long_max: dest:i src1:i src2:i len:16 clob:1
long_max_un: dest:i src1:i src2:i len:16 clob:1
-throw: src1:i len:18
-rethrow: src1:i len:18
+throw: src1:i len:18 nacl:50
+rethrow: src1:i len:18 nacl:50
start_handler: len:16
-endfinally: len:9
-endfilter: src1:a len:9
+endfinally: len:9 nacl:22
+endfilter: src1:a len:9 nacl:19
ckfinite: dest:f src1:f len:43
ceq: dest:c len:8
cgt: dest:c len:8
icompare_imm: src1:i len:8
fcompare: src1:f src2:f clob:a len:13
oparglist: src1:b len:11
-checkthis: src1:b len:5
-call: dest:a clob:c len:32
-voidcall: clob:c len:32
-voidcall_reg: src1:i clob:c len:32
-voidcall_membase: src1:b clob:c len:32
+checkthis: src1:b len:5 nacl:8
+call: dest:a clob:c len:32 nacl:64
+voidcall: clob:c len:32 nacl:64
+voidcall_reg: src1:i clob:c len:32 nacl:64
+voidcall_membase: src1:b clob:c len:32 nacl:64
fcall: dest:f len:64 clob:c
fcall_reg: dest:f src1:i len:64 clob:c
fcall_membase: dest:f src1:b len:64 clob:c
vcall: len:64 clob:c
vcall_reg: src1:i len:64 clob:c
vcall_membase: src1:b len:64 clob:c
-call_reg: dest:a src1:i len:32 clob:c
-call_membase: dest:a src1:b len:32 clob:c
+call_reg: dest:a src1:i len:32 clob:c nacl:64
+call_membase: dest:a src1:b len:32 clob:c nacl:64
iconst: dest:i len:10
i8const: dest:i len:10
r4const: dest:f len:14
r8const: dest:f len:9
store_membase_imm: dest:b len:15
-store_membase_reg: dest:b src1:i len:9
-storei8_membase_reg: dest:b src1:i len:9
-storei1_membase_imm: dest:b len:11
-storei1_membase_reg: dest:b src1:c len:9
-storei2_membase_imm: dest:b len:13
-storei2_membase_reg: dest:b src1:i len:9
-storei4_membase_imm: dest:b len:13
-storei4_membase_reg: dest:b src1:i len:9
+store_membase_reg: dest:b src1:i len:9 nacl:11
+storei8_membase_reg: dest:b src1:i len:9 nacl:11
+storei1_membase_imm: dest:b len:11 nacl:15
+storei1_membase_reg: dest:b src1:c len:9 nacl:11
+storei2_membase_imm: dest:b len:13 nacl:15
+storei2_membase_reg: dest:b src1:i len:9 nacl:11
+storei4_membase_imm: dest:b len:13 nacl:15
+storei4_membase_reg: dest:b src1:i len:9 nacl:11
storei8_membase_imm: dest:b len:18
storer4_membase_reg: dest:b src1:f len:15
storer8_membase_reg: dest:b src1:f len:10
-load_membase: dest:i src1:b len:8
-loadi1_membase: dest:c src1:b len:9
-loadu1_membase: dest:c src1:b len:9
-loadi2_membase: dest:i src1:b len:9
-loadu2_membase: dest:i src1:b len:9
-loadi4_membase: dest:i src1:b len:9
-loadu4_membase: dest:i src1:b len:9
-loadi8_membase: dest:i src1:b len:18
+load_membase: dest:i src1:b len:8 nacl:12
+loadi1_membase: dest:c src1:b len:9 nacl:12
+loadu1_membase: dest:c src1:b len:9 nacl:12
+loadi2_membase: dest:i src1:b len:9 nacl:12
+loadu2_membase: dest:i src1:b len:9 nacl:12
+loadi4_membase: dest:i src1:b len:9 nacl:12
+loadu4_membase: dest:i src1:b len:9 nacl:12
+loadi8_membase: dest:i src1:b len:18 nacl:14
loadr4_membase: dest:f src1:b len:16
loadr8_membase: dest:f src1:b len:16
loadu4_mem: dest:i len:10
amd64_loadi8_memindex: dest:i src1:i src2:i len:10
move: dest:i src1:i len:3
-add_imm: dest:i src1:i len:8 clob:1
-sub_imm: dest:i src1:i len:8 clob:1
+add_imm: dest:i src1:i len:8 clob:1 nacl:11
+sub_imm: dest:i src1:i len:8 clob:1 nacl:11
mul_imm: dest:i src1:i len:11
and_imm: dest:i src1:i len:8 clob:1
or_imm: dest:i src1:i len:8 clob:1
float_clt_un_membase: dest:i src1:f src2:b len:42
float_conv_to_u: dest:i src1:f len:46
fmove: dest:f src1:f len:8
-call_handler: len:14 clob:c
+call_handler: len:14 clob:c nacl:52
aot_const: dest:i len:10
+nacl_gc_safe_point: clob:c
x86_test_null: src1:i len:5
x86_compare_membase_reg: src1:b src2:i len:9
x86_compare_membase_imm: src1:b len:13
x86_push_membase: src1:b len:8
x86_push_obj: src1:b len:40
x86_lea: dest:i src1:i src2:i len:8
-x86_lea_membase: dest:i src1:i len:11
+x86_lea_membase: dest:i src1:i len:11 nacl:14
x86_xchg: src1:i src2:i clob:x len:2
x86_fpop: src1:f len:3
x86_seteq_membase: src1:b len:9
adc_imm: dest:i src1:i len:8 clob:1
sbb: dest:i src1:i src2:i len:3 clob:1
sbb_imm: dest:i src1:i len:8 clob:1
-br_reg: src1:i len:3
+br_reg: src1:i len:3 nacl:8
sin: dest:f src1:f len:32
cos: dest:f src1:f len:32
abs: dest:f src1:f clob:1 len:32
sext_i4: dest:i src1:i len:8
# 32 bit opcodes
-int_add: dest:i src1:i src2:i clob:1 len:4
-int_sub: dest:i src1:i src2:i clob:1 len:4
+int_add: dest:i src1:i src2:i clob:1 len:4 nacl:7
+int_sub: dest:i src1:i src2:i clob:1 len:4 nacl:7
int_mul: dest:i src1:i src2:i clob:1 len:4
int_mul_ovf: dest:i src1:i src2:i clob:1 len:32
int_mul_ovf_un: dest:i src1:i src2:i clob:1 len:32
int_sbb_imm: dest:i src1:i clob:1 len:8
int_addcc: dest:i src1:i src2:i clob:1 len:16
int_subcc: dest:i src1:i src2:i clob:1 len:16
-int_add_imm: dest:i src1:i clob:1 len:8
-int_sub_imm: dest:i src1:i clob:1 len:8
+int_add_imm: dest:i src1:i clob:1 len:8 nacl:10
+int_sub_imm: dest:i src1:i clob:1 len:8 nacl:10
int_mul_imm: dest:i src1:i clob:1 len:32
int_div_imm: dest:a src1:i clob:d len:32
int_div_un_imm: dest:a src1:i clob:d len:32
cmov_lle_un: dest:i src1:i src2:i len:16 clob:1
cmov_llt_un: dest:i src1:i src2:i len:16 clob:1
-long_add_imm: dest:i src1:i clob:1 len:12
-long_sub_imm: dest:i src1:i clob:1 len:12
+long_add_imm: dest:i src1:i clob:1 len:12 nacl:15
+long_sub_imm: dest:i src1:i clob:1 len:12 nacl:15
long_and_imm: dest:i src1:i clob:1 len:12
long_or_imm: dest:i src1:i clob:1 len:12
long_xor_imm: dest:i src1:i clob:1 len:12
vcall2_reg: src1:i len:64 clob:c
vcall2_membase: src1:b len:64 clob:c
-dyn_call: src1:i src2:i len:64 clob:c
+dyn_call: src1:i src2:i len:64 clob:c nacl:128
localloc_imm: dest:i len:84
liverange_start: len:0
liverange_end: len:0
+ gc_liveness_def: len:0
+ gc_liveness_use: len:0
+ gc_spill_slot_liveness_def: len:0
+ gc_param_slot_liveness_def: len:0
+
aot_const: dest:i len:5
load_gotaddr: dest:i len:64
got_entry: dest:i src1:b len:7
+nacl_gc_safe_point: clob:c
x86_test_null: src1:i len:2
x86_compare_membase_reg: src1:b src2:i len:7
x86_compare_membase_imm: src1:b len:11
liverange_start: len:0
liverange_end: len:0
+ gc_liveness_def: len:0
+ gc_liveness_use: len:0
+ gc_spill_slot_liveness_def: len:0
+ gc_param_slot_liveness_def: len:0
amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
+#if !defined(__native_client_codegen__)
amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
+#endif
if (mono_running_on_valgrind ()) {
/* Prevent 'Address 0x... is just below the stack ptr.' errors */
/* jump to the saved IP */
amd64_jump_reg (code, AMD64_R11);
+ nacl_global_codeman_validate(&start, 256, &code);
+
mono_arch_flush_icache (start, code - start);
if (info)
guint32 pos;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
+ const guint kMaxCodeSize = NACL_SIZE (128, 256);
- start = code = mono_global_codeman_reserve (128);
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
/* call_filter (MonoContext *ctx, unsigned long eip) */
code = start;
amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
+#if !defined(__native_client_codegen__)
amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
+#endif
#ifdef TARGET_WIN32
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
amd64_leave (code);
amd64_ret (code);
- g_assert ((code - start) < 128);
+ g_assert ((code - start) < kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
mono_arch_flush_icache (start, code - start);
guint8 *code;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
- int i, buf_size, stack_size, arg_offsets [16], regs_offset;
+ int i, stack_size, arg_offsets [16], regs_offset;
+ const guint kMaxCodeSize = NACL_SIZE (256, 512);
- buf_size = 256;
- start = code = mono_global_codeman_reserve (buf_size);
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
/* The stack is unaligned on entry */
stack_size = 192 + 8;
*/
arg_offsets [0] = 0;
- arg_offsets [1] = sizeof (gpointer);
- arg_offsets [2] = sizeof (gpointer) * 2;
- arg_offsets [3] = sizeof (gpointer) * 3;
- regs_offset = sizeof (gpointer) * 4;
+ arg_offsets [1] = sizeof(mgreg_t);
+ arg_offsets [2] = sizeof(mgreg_t) * 2;
+ arg_offsets [3] = sizeof(mgreg_t) * 3;
+ regs_offset = sizeof(mgreg_t) * 4;
/* Save registers */
for (i = 0; i < AMD64_NREG; ++i)
if (i != AMD64_RSP)
- amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof (gpointer)), i, 8);
+ amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
/* Save RSP */
- amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof (gpointer));
- amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof (gpointer)), X86_EAX, 8);
+ amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
/* Set arg1 == regs */
amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, regs_offset);
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, 8);
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
/* Set arg2 == eip */
if (llvm_abs)
amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
else
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, 8);
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, 8);
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, sizeof(mgreg_t));
/* Set arg3 == exc/ex_token_index */
if (resume_unwind)
- amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, 8);
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
else
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, 8);
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, sizeof(mgreg_t));
/* Set arg4 == rethrow/pc offset */
if (resume_unwind) {
- amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, 8);
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, sizeof(mgreg_t));
} else if (corlib) {
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, 8);
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, sizeof(mgreg_t));
if (llvm_abs)
/*
* The caller is LLVM code which passes the absolute address not a pc offset,
*/
amd64_neg_membase (code, AMD64_RSP, arg_offsets [3]);
} else {
- amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, 8);
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, sizeof(mgreg_t));
}
if (aot) {
mono_arch_flush_icache (start, code - start);
- g_assert ((code - start) < buf_size);
+ g_assert ((code - start) < kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
if (info)
*info = mono_tramp_info_create (g_strdup (tramp_name), start, code - start, ji, unwind_ops);
gboolean
mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
MonoJitInfo *ji, MonoContext *ctx,
- MonoContext *new_ctx, MonoLMF **lmf,
+ MonoContext *new_ctx, MonoLMF **lmf,
+ mgreg_t **save_locations,
StackFrameInfo *frame)
{
gpointer ip = MONO_CONTEXT_GET_IP (ctx);
*new_ctx = *ctx;
if (ji != NULL) {
- gssize regs [MONO_MAX_IREGS + 1];
+ mgreg_t regs [MONO_MAX_IREGS + 1];
guint8 *cfa;
guint32 unwind_info_len;
guint8 *unwind_info;
unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
else
unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
+
+ frame->unwind_info = unwind_info;
+ frame->unwind_info_len = unwind_info_len;
regs [AMD64_RAX] = new_ctx->rax;
regs [AMD64_RBX] = new_ctx->rbx;
mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
(guint8*)ji->code_start + ji->code_size,
- ip, regs, MONO_MAX_IREGS + 1, &cfa);
+ ip, regs, MONO_MAX_IREGS + 1,
+ save_locations, MONO_MAX_IREGS, &cfa);
new_ctx->rax = regs [AMD64_RAX];
new_ctx->rbx = regs [AMD64_RBX];
new_ctx->r15 = regs [AMD64_R15];
/* The CFA becomes the new SP value */
- new_ctx->rsp = (gssize)cfa;
+ new_ctx->rsp = (mgreg_t)cfa;
/* Adjust IP */
new_ctx->rip --;
* The rsp field is set just before the call which transitioned to native
* code. Obtain the rip from the stack.
*/
- rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
+ rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
}
ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
void
mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
{
+#if defined(__native_client_codegen__) || defined(__native_client__)
+ printf("WARNING: mono_arch_sigctx_to_monoctx() called!\n");
+#endif
+
#if defined(MONO_ARCH_USE_SIGACTION)
ucontext_t *ctx = (ucontext_t*)sigctx;
void
mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
{
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ printf("WARNING: mono_arch_monoctx_to_sigctx() called!\n");
+#endif
+
#if defined(MONO_ARCH_USE_SIGACTION)
ucontext_t *ctx = (ucontext_t*)sigctx;
gpointer throw_trampoline;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
+ const guint kMaxCodeSize = NACL_SIZE (128, 256);
- start = code = mono_global_codeman_reserve (128);
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
/* We are in the frame of a managed method after a call */
/*
/* Return to original code */
amd64_jump_reg (code, AMD64_R11);
- g_assert ((code - start) < 128);
+ g_assert ((code - start) < kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
if (info)
*info = mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start, code - start, ji, unwind_ops);
static guint8* saved = NULL;
guint8 *code, *start;
int cont_reg = AMD64_R9; /* register usable on both call conventions */
+ const guint kMaxCodeSize = NACL_SIZE (64, 128);
+
if (saved)
return (MonoContinuationRestore)saved;
- code = start = mono_global_codeman_reserve (64);
+ code = start = mono_global_codeman_reserve (kMaxCodeSize);
/* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
/* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
* state is in AMD64_ARG_REG2 ($rdx or $rsi)
amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
+#if !defined(__native_client_codegen__)
amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
+#endif
#ifdef TARGET_WIN32
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
/* state is already in rax */
amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
- g_assert ((code - start) <= 64);
+ g_assert ((code - start) <= kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
+
saved = start;
return (MonoContinuationRestore)saved;
}
do {
MonoContext new_ctx;
- mono_arch_find_jit_info (domain, jit_tls, &rji, &ctx, &new_ctx, &lmf, &frame);
+ mono_arch_find_jit_info (domain, jit_tls, &rji, &ctx, &new_ctx, &lmf, NULL, &frame);
if (!frame.ji) {
g_warning ("Exception inside function without unwind info");
g_assert_not_reached ();
/* jump to the saved IP */
x86_ret (code);
+ nacl_global_codeman_validate(&start, 128, &code);
+
if (info)
*info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops);
else {
guint8 *code;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
-#ifdef __native_client_codegen__
- guint kMaxCodeSize = 128;
-#else
- guint kMaxCodeSize = 64;
-#endif /* __native_client_codegen__ */
+ guint kMaxCodeSize = NACL_SIZE (64, 128);
/* call_filter (MonoContext *ctx, unsigned long eip) */
start = code = mono_global_codeman_reserve (kMaxCodeSize);
x86_leave (code);
x86_ret (code);
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
+
if (info)
*info = mono_tramp_info_create (g_strdup_printf ("call_filter"), start, code - start, ji, unwind_ops);
else {
int i, stack_size, stack_offset, arg_offsets [5], regs_offset;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
-#ifdef __native_client_codegen__
- guint kMaxCodeSize = 256;
-#else
- guint kMaxCodeSize = 128;
-#endif
+ guint kMaxCodeSize = NACL_SIZE (128, 256);
+
start = code = mono_global_codeman_reserve (kMaxCodeSize);
stack_size = 128;
}
x86_breakpoint (code);
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
+
g_assert ((code - start) < kMaxCodeSize);
if (info)
gboolean
mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
MonoJitInfo *ji, MonoContext *ctx,
- MonoContext *new_ctx, MonoLMF **lmf,
+ MonoContext *new_ctx, MonoLMF **lmf,
+ mgreg_t **save_locations,
StackFrameInfo *frame)
{
gpointer ip = MONO_CONTEXT_GET_IP (ctx);
mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
(guint8*)ji->code_start + ji->code_size,
- ip, regs, MONO_MAX_IREGS + 1, &cfa);
+ ip, regs, MONO_MAX_IREGS + 1,
+ save_locations, MONO_MAX_IREGS, &cfa);
new_ctx->eax = regs [X86_EAX];
new_ctx->ebx = regs [X86_EBX];
return alloc_dreg (cfg, stack_type);
}
+ /*
+ * mono_alloc_ireg_ref:
+ *
+ * Allocate an IREG, and mark it as holding a GC ref.
+ */
+ guint32
+ mono_alloc_ireg_ref (MonoCompile *cfg)
+ {
+ return alloc_ireg_ref (cfg);
+ }
+
+ /*
+ * mono_alloc_ireg_mp:
+ *
+ * Allocate an IREG, and mark it as holding a managed pointer.
+ */
+ guint32
+ mono_alloc_ireg_mp (MonoCompile *cfg)
+ {
+ return alloc_ireg_mp (cfg);
+ }
+
+ /*
+ * mono_alloc_ireg_copy:
+ *
+ * Allocate an IREG with the same GC type as VREG.
+ */
+ guint32
+ mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
+ {
+ if (vreg_is_ref (cfg, vreg))
+ return alloc_ireg_ref (cfg);
+ else if (vreg_is_mp (cfg, vreg))
+ return alloc_ireg_mp (cfg);
+ else
+ return alloc_ireg (cfg);
+ }
+
guint
mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
{
#if defined(TARGET_X86) || defined(TARGET_AMD64)
#define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
- (dest)->dreg = alloc_preg ((cfg)); \
+ (dest)->dreg = alloc_ireg_mp ((cfg)); \
(dest)->sreg1 = (sr1); \
(dest)->sreg2 = (sr2); \
(dest)->inst_imm = (imm); \
case OP_LCOMPARE:
case OP_ICOMPARE:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
- if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
+ if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE;
else if (src1->type == STACK_R8)
ins->opcode = OP_FCOMPARE;
break;
case OP_ICOMPARE_IMM:
ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
- if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
+ if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE_IMM;
break;
case CEE_BEQ:
break;
case STACK_PTR:
case STACK_MP:
-#if SIZEOF_REGISTER == 8
+#if SIZEOF_VOID_P == 8
ins->opcode = OP_LCONV_TO_U;
#else
ins->opcode = OP_MOVE;
return vtable_var;
} else {
MonoInst *ins;
- int vtable_reg, res_reg;
+ int vtable_reg;
vtable_reg = alloc_preg (cfg);
- res_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
return ins;
}
reset_cast_details (cfg);
}
- NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
+ NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
MONO_ADD_INS (cfg->cbb, add);
add->type = STACK_MP;
add->klass = klass;
MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
int obj_reg = src->dreg;
int vtable_reg = alloc_preg (cfg);
- int res_reg = alloc_preg (cfg);
+ int res_reg = alloc_ireg_ref (cfg);
MonoInst *klass_inst = NULL;
if (context_used) {
static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
- ins->type = STACK_PTR;
+ ins->klass = mono_class_get_element_class (klass);
+ ins->type = STACK_MP;
return ins;
}
#endif
- add_reg = alloc_preg (cfg);
+ add_reg = alloc_ireg_mp (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
- ins->type = STACK_PTR;
+ ins->klass = mono_class_get_element_class (klass);
+ ins->type = STACK_MP;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
{
int bounds_reg = alloc_preg (cfg);
- int add_reg = alloc_preg (cfg);
+ int add_reg = alloc_ireg_mp (cfg);
int mult_reg = alloc_preg (cfg);
int mult2_reg = alloc_preg (cfg);
int low1_reg = alloc_preg (cfg);
} else if (cmethod->klass == mono_defaults.object_class) {
if (strcmp (cmethod->name, "GetType") == 0) {
- int dreg = alloc_preg (cfg);
+ int dreg = alloc_ireg_ref (cfg);
int vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
*/
if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
int dreg = alloc_ireg (cfg);
- int bounds_reg = alloc_ireg (cfg);
+ int bounds_reg = alloc_ireg_mp (cfg);
MonoBasicBlock *end_bb, *szarray_bb;
gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
return NULL;
MONO_INST_NEW (cfg, ins, opcode);
- ins->dreg = mono_alloc_ireg (cfg);
+ ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
size = 8;
if (size == 4) {
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
- ins->dreg = alloc_ireg (cfg);
+ ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
ins->sreg3 = args [2]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else if (size == 8) {
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
- ins->dreg = alloc_ireg (cfg);
+ ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
ins->sreg3 = args [2]->dreg;
dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
+ dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
image = method->klass->image;
header = mono_method_get_header (method);
cfg->bb_entry = start_bblock;
start_bblock->cil_code = NULL;
start_bblock->cil_length = 0;
+#if defined(__native_client_codegen__)
+ MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
+ ins->dreg = alloc_dreg (cfg, STACK_I4);
+ MONO_ADD_INS (start_bblock, ins);
+#endif
/* EXIT BLOCK */
NEW_BBLOCK (cfg, end_bblock);
sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
CHECK_CFG_EXCEPTION;
} else if (!constrained_call->valuetype) {
- int dreg = alloc_preg (cfg);
+ int dreg = alloc_ireg_ref (cfg);
/*
* The type parameter is instantiated as a reference
case CEE_LDIND_I8:
dreg = alloc_lreg (cfg);
break;
+ case CEE_LDIND_REF:
+ dreg = alloc_ireg_ref (cfg);
+ break;
default:
dreg = alloc_preg (cfg);
}
sp -= 2;
if (generic_class_is_reference_type (cfg, klass)) {
MonoInst *store, *load;
- int dreg = alloc_preg (cfg);
+ int dreg = alloc_ireg_ref (cfg);
NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
load->flags |= ins_flag;
MonoInst *ptr;
int dreg;
- dreg = alloc_preg (cfg);
+ dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
emit_write_barrier (cfg, ptr, sp [1], -1);
}
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
}
- dreg = alloc_preg (cfg);
+ dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
ins->klass = mono_class_from_mono_type (field->type);
CHECK_TYPELOAD (array_type);
MONO_INST_NEW (cfg, ins, OP_NEWARR);
- ins->dreg = alloc_preg (cfg);
+ ins->dreg = alloc_ireg_ref (cfg);
ins->sreg1 = sp [0]->dreg;
ins->inst_newa_class = klass;
ins->type = STACK_OBJ;
if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
MonoMethod *memcpy_method = get_memcpy_method ();
MonoInst *iargs [3];
- int add_reg = alloc_preg (cfg);
+ int add_reg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
if (cfg->compile_aot) {
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
- dreg = alloc_preg (cfg);
MONO_INST_NEW (cfg, ins, OP_LDLEN);
ins->dreg = alloc_preg (cfg);
ins->sreg1 = sp [0]->dreg;
CHECK_STACK (1);
--sp;
MONO_INST_NEW (cfg, ins, OP_MOVE);
- ins->dreg = alloc_preg (cfg);
+ ins->dreg = alloc_ireg_mp (cfg);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_MP;
MONO_ADD_INS (cfg->cbb, ins);
cmp->sreg2 = sp [1]->dreg;
type_from_op (cmp, sp [0], sp [1]);
CHECK_TYPE (cmp);
- if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
+ if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
cmp->opcode = OP_LCOMPARE;
else if (sp [0]->type == STACK_R8)
cmp->opcode = OP_FCOMPARE;
switch (opcode) {
case OP_X86_PUSH:
+#ifdef __mono_ilp32__
+ if (load_opcode == OP_LOADI8_MEMBASE)
+#else
if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
+#endif
return OP_X86_PUSH_MEMBASE;
break;
/* FIXME: This only works for 32 bit immediates
break;
case OP_COMPARE:
case OP_LCOMPARE:
+#ifdef __mono_ilp32__
+ if (load_opcode == OP_LOAD_MEMBASE)
+ return OP_AMD64_ICOMPARE_MEMBASE_REG;
+ if (load_opcode == OP_LOADI8_MEMBASE)
+#else
if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
+#endif
return OP_AMD64_COMPARE_MEMBASE_REG;
break;
case OP_ICOMPARE:
#endif
#ifdef TARGET_AMD64
+#ifdef __mono_ilp32__
+ if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
+#else
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
+#endif
switch (opcode) {
case OP_ICOMPARE:
return OP_AMD64_ICOMPARE_REG_MEMBASE;
case OP_IXOR:
return OP_X86_XOR_REG_MEMBASE;
}
+#ifdef __mono_ilp32__
+ } else if (load_opcode == OP_LOADI8_MEMBASE) {
+#else
} else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
+#endif
switch (opcode) {
case OP_COMPARE:
case OP_LCOMPARE:
switch (regtype) {
case 'i':
- mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
+ if (vreg_is_ref (cfg, vreg))
+ mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
+ else
+ mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
break;
case 'l':
mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
}
#endif
+ if (cfg->compute_gc_maps) {
+ /* registers need liveness info even for !non refs */
+ for (i = 0; i < cfg->num_varinfo; i++) {
+ MonoInst *ins = cfg->varinfo [i];
+
+ if (ins->opcode == OP_REGVAR)
+ ins->flags |= MONO_INST_GC_TRACK;
+ }
+ }
+
/* FIXME: widening and truncation */
/*
live_range_start [dreg] = def_ins;
live_range_start_bb [dreg] = bb;
}
+
+ if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
+ MonoInst *tmp;
+
+ MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
+ tmp->inst_c1 = dreg;
+ mono_bblock_insert_after_ins (bb, def_ins, tmp);
+ }
}
/************/
//mono_inst_set_src_registers (ins, sregs);
live_range_end [sreg] = use_ins;
live_range_end_bb [sreg] = bb;
+
+ if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
+ MonoInst *tmp;
+
+ MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
+ /* var->dreg is a hreg */
+ tmp->inst_c1 = sreg;
+ mono_bblock_insert_after_ins (bb, ins, tmp);
+ }
+
continue;
}
live_range_end [var->dreg] = use_ins;
live_range_end_bb [var->dreg] = bb;
}
+
+ if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
+ MonoInst *tmp;
+
+ MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
+ tmp->inst_c1 = var->dreg;
+ mono_bblock_insert_after_ins (bb, ins, tmp);
+ }
}
}
mono_inst_set_src_registers (ins, sregs);
#include "mini-amd64.h"
#include "cpu-amd64.h"
#include "debugger-agent.h"
+ #include "mini-gc.h"
static gint lmf_tls_offset = -1;
static gint lmf_addr_tls_offset = -1;
return code [0] == 0xe8;
}
+#ifdef __native_client_codegen__
+
+/* Keep track of instruction "depth", that is, the level of sub-instruction */
+/* for any given instruction. For instance, amd64_call_reg resolves to */
+/* amd64_call_reg_internal, which uses amd64_alu_* macros, etc. */
+/* We only want to force bundle alignment for the top level instruction, */
+/* so NaCl pseudo-instructions can be implemented with sub instructions. */
+static guint32 nacl_instruction_depth;
+
+static guint32 nacl_rex_tag;
+static guint32 nacl_legacy_prefix_tag;
+
+void
+amd64_nacl_clear_legacy_prefix_tag ()
+{
+ TlsSetValue (nacl_legacy_prefix_tag, NULL);
+}
+
+void
+amd64_nacl_tag_legacy_prefix (guint8* code)
+{
+ if (TlsGetValue (nacl_legacy_prefix_tag) == NULL)
+ TlsSetValue (nacl_legacy_prefix_tag, code);
+}
+
+void
+amd64_nacl_tag_rex (guint8* code)
+{
+ TlsSetValue (nacl_rex_tag, code);
+}
+
+guint8*
+amd64_nacl_get_legacy_prefix_tag ()
+{
+ return (guint8*)TlsGetValue (nacl_legacy_prefix_tag);
+}
+
+guint8*
+amd64_nacl_get_rex_tag ()
+{
+ return (guint8*)TlsGetValue (nacl_rex_tag);
+}
+
+/* Increment the instruction "depth" described above */
+void
+amd64_nacl_instruction_pre ()
+{
+ intptr_t depth = (intptr_t) TlsGetValue (nacl_instruction_depth);
+ depth++;
+ TlsSetValue (nacl_instruction_depth, (gpointer)depth);
+}
+
+/* amd64_nacl_instruction_post: Decrement instruction "depth", force bundle */
+/* alignment if depth == 0 (top level instruction) */
+/* IN: start, end pointers to instruction beginning and end */
+/* OUT: start, end pointers to beginning and end after possible alignment */
+/* GLOBALS: nacl_instruction_depth defined above */
+void
+amd64_nacl_instruction_post (guint8 **start, guint8 **end)
+{
+ intptr_t depth = (intptr_t) TlsGetValue(nacl_instruction_depth);
+ depth--;
+ TlsSetValue (nacl_instruction_depth, (void*)depth);
+
+ g_assert ( depth >= 0 );
+ if (depth == 0) {
+ uintptr_t space_in_block;
+ uintptr_t instlen;
+ guint8 *prefix = amd64_nacl_get_legacy_prefix_tag ();
+ /* if legacy prefix is present, and if it was emitted before */
+ /* the start of the instruction sequence, adjust the start */
+ if (prefix != NULL && prefix < *start) {
+ g_assert (*start - prefix <= 3);/* only 3 are allowed */
+ *start = prefix;
+ }
+ space_in_block = kNaClAlignment - ((uintptr_t)(*start) & kNaClAlignmentMask);
+ instlen = (uintptr_t)(*end - *start);
+ /* Only check for instructions which are less than */
+ /* kNaClAlignment. The only instructions that should ever */
+ /* be that long are call sequences, which are already */
+ /* padded out to align the return to the next bundle. */
+ if (instlen > space_in_block && instlen < kNaClAlignment) {
+ const size_t MAX_NACL_INST_LENGTH = kNaClAlignment;
+ guint8 copy_of_instruction[MAX_NACL_INST_LENGTH];
+ const size_t length = (size_t)((*end)-(*start));
+ g_assert (length < MAX_NACL_INST_LENGTH);
+
+ memcpy (copy_of_instruction, *start, length);
+ *start = mono_arch_nacl_pad (*start, space_in_block);
+ memcpy (*start, copy_of_instruction, length);
+ *end = *start + length;
+ }
+ amd64_nacl_clear_legacy_prefix_tag ();
+ amd64_nacl_tag_rex (NULL);
+ }
+}
+
+/* amd64_nacl_membase_handler: ensure all access to memory of the form */
+/* OFFSET(%rXX) is sandboxed. For allowable base registers %rip, %rbp, */
+/* %rsp, and %r15, emit the membase as usual. For all other registers, */
+/* make sure the upper 32-bits are cleared, and use that register in the */
+/* index field of a new address of this form: OFFSET(%r15,%eXX,1) */
+/* IN: code */
+/* pointer to current instruction stream (in the */
+/* middle of an instruction, after opcode is emitted) */
+/* basereg/offset/dreg */
+/* operands of normal membase address */
+/* OUT: code */
+/* pointer to the end of the membase/memindex emit */
+/* GLOBALS: nacl_rex_tag */
+/* position in instruction stream that rex prefix was emitted */
+/* nacl_legacy_prefix_tag */
+/* (possibly NULL) position in instruction of legacy x86 prefix */
+void
+amd64_nacl_membase_handler (guint8** code, gint8 basereg, gint32 offset, gint8 dreg)
+{
+ gint8 true_basereg = basereg;
+
+ /* Cache these values, they might change */
+ /* as new instructions are emitted below. */
+ guint8* rex_tag = amd64_nacl_get_rex_tag ();
+ guint8* legacy_prefix_tag = amd64_nacl_get_legacy_prefix_tag ();
+
+ /* 'basereg' is given masked to 0x7 at this point, so check */
+ /* the rex prefix to see if this is an extended register. */
+ if ((rex_tag != NULL) && IS_REX(*rex_tag) && (*rex_tag & AMD64_REX_B)) {
+ true_basereg |= 0x8;
+ }
+
+#define X86_LEA_OPCODE (0x8D)
+
+ if (!amd64_is_valid_nacl_base (true_basereg) && (*(*code-1) != X86_LEA_OPCODE)) {
+ guint8* old_instruction_start;
+
+ /* This will hold the 'mov %eXX, %eXX' that clears the upper */
+ /* 32-bits of the old base register (new index register) */
+ guint8 buf[32];
+ guint8* buf_ptr = buf;
+ size_t insert_len;
+
+ g_assert (rex_tag != NULL);
+
+ if (IS_REX(*rex_tag)) {
+ /* The old rex.B should be the new rex.X */
+ if (*rex_tag & AMD64_REX_B) {
+ *rex_tag |= AMD64_REX_X;
+ }
+ /* Since our new base is %r15 set rex.B */
+ *rex_tag |= AMD64_REX_B;
+ } else {
+ /* Shift the instruction by one byte */
+ /* so we can insert a rex prefix */
+ memmove (rex_tag + 1, rex_tag, (size_t)(*code - rex_tag));
+ *code += 1;
+ /* New rex prefix only needs rex.B for %r15 base */
+ *rex_tag = AMD64_REX(AMD64_REX_B);
+ }
+
+ if (legacy_prefix_tag) {
+ old_instruction_start = legacy_prefix_tag;
+ } else {
+ old_instruction_start = rex_tag;
+ }
+
+ /* Clears the upper 32-bits of the previous base register */
+ amd64_mov_reg_reg_size (buf_ptr, true_basereg, true_basereg, 4);
+ insert_len = buf_ptr - buf;
+
+ /* Move the old instruction forward to make */
+ /* room for 'mov' stored in 'buf_ptr' */
+ memmove (old_instruction_start + insert_len, old_instruction_start, (size_t)(*code - old_instruction_start));
+ *code += insert_len;
+ memcpy (old_instruction_start, buf, insert_len);
+
+ /* Sandboxed replacement for the normal membase_emit */
+ x86_memindex_emit (*code, dreg, AMD64_R15, offset, basereg, 0);
+
+ } else {
+ /* Normal default behavior, emit membase memory location */
+ x86_membase_emit_body (*code, dreg, basereg, offset);
+ }
+}
+
+
+static inline unsigned char*
+amd64_skip_nops (unsigned char* code)
+{
+ guint8 in_nop;
+ do {
+ in_nop = 0;
+ if ( code[0] == 0x90) {
+ in_nop = 1;
+ code += 1;
+ }
+ if ( code[0] == 0x66 && code[1] == 0x90) {
+ in_nop = 1;
+ code += 2;
+ }
+ if (code[0] == 0x0f && code[1] == 0x1f
+ && code[2] == 0x00) {
+ in_nop = 1;
+ code += 3;
+ }
+ if (code[0] == 0x0f && code[1] == 0x1f
+ && code[2] == 0x40 && code[3] == 0x00) {
+ in_nop = 1;
+ code += 4;
+ }
+ if (code[0] == 0x0f && code[1] == 0x1f
+ && code[2] == 0x44 && code[3] == 0x00
+ && code[4] == 0x00) {
+ in_nop = 1;
+ code += 5;
+ }
+ if (code[0] == 0x66 && code[1] == 0x0f
+ && code[2] == 0x1f && code[3] == 0x44
+ && code[4] == 0x00 && code[5] == 0x00) {
+ in_nop = 1;
+ code += 6;
+ }
+ if (code[0] == 0x0f && code[1] == 0x1f
+ && code[2] == 0x80 && code[3] == 0x00
+ && code[4] == 0x00 && code[5] == 0x00
+ && code[6] == 0x00) {
+ in_nop = 1;
+ code += 7;
+ }
+ if (code[0] == 0x0f && code[1] == 0x1f
+ && code[2] == 0x84 && code[3] == 0x00
+ && code[4] == 0x00 && code[5] == 0x00
+ && code[6] == 0x00 && code[7] == 0x00) {
+ in_nop = 1;
+ code += 8;
+ }
+ } while ( in_nop );
+ return code;
+}
+
+guint8*
+mono_arch_nacl_skip_nops (guint8* code)
+{
+ return amd64_skip_nops(code);
+}
+
+#endif /*__native_client_codegen__*/
+
static inline void
amd64_patch (unsigned char* code, gpointer target)
{
guint8 rex = 0;
+#ifdef __native_client_codegen__
+ code = amd64_skip_nops (code);
+#endif
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (nacl_is_code_address (code)) {
+ /* For tail calls, code is patched after being installed */
+ /* but not through the normal "patch callsite" method. */
+ unsigned char buf[kNaClAlignment];
+ unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
+ int ret;
+ memcpy (buf, aligned_code, kNaClAlignment);
+ /* Patch a temp buffer of bundle size, */
+ /* then install to actual location. */
+ amd64_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), target);
+ ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
+ g_assert (ret == 0);
+ return;
+ }
+ target = nacl_modify_patch_target (target);
+#endif
+
/* Skip REX */
if ((code [0] >= 0x40) && (code [0] <= 0x4f)) {
rex = code [0];
/* Only if storage == ArgValuetypeInReg */
ArgStorage pair_storage [2];
gint8 pair_regs [2];
+ int nregs;
} ArgInfo;
typedef struct {
if (*gr >= PARAM_REGS) {
ainfo->storage = ArgOnStack;
- (*stack_size) += sizeof (gpointer);
+ /* Since the same stack slot size is used for all arg */
+ /* types, it needs to be big enough to hold them all */
+ (*stack_size) += sizeof(mgreg_t);
}
else {
ainfo->storage = ArgInIReg;
if (*gr >= FLOAT_PARAM_REGS) {
ainfo->storage = ArgOnStack;
- (*stack_size) += sizeof (gpointer);
+ /* Since the same stack slot size is used for both float */
+ /* types, it needs to be big enough to hold them both */
+ (*stack_size) += sizeof(mgreg_t);
}
else {
/* A double register */
return class1;
}
+#ifdef __native_client_codegen__
+const guint kNaClAlignment = kNaClAlignmentAMD64;
+const guint kNaClAlignmentMask = kNaClAlignmentMaskAMD64;
+
+/* Default alignment for Native Client is 32-byte. */
+gint8 nacl_align_byte = -32; /* signed version of 0xe0 */
+
+/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code, */
+/* Check that alignment doesn't cross an alignment boundary. */
+guint8*
+mono_arch_nacl_pad(guint8 *code, int pad)
+{
+ const int kMaxPadding = 8; /* see amd64-codegen.h:amd64_padding_size() */
+
+ if (pad == 0) return code;
+ /* assertion: alignment cannot cross a block boundary */
+ g_assert (((uintptr_t)code & (~kNaClAlignmentMask)) ==
+ (((uintptr_t)code + pad - 1) & (~kNaClAlignmentMask)));
+ while (pad >= kMaxPadding) {
+ amd64_padding (code, kMaxPadding);
+ pad -= kMaxPadding;
+ }
+ if (pad != 0) amd64_padding (code, pad);
+ return code;
+}
+#endif
static void
add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
guint32 *gr, guint32 *fr, guint32 *stack_size)
{
guint32 size, quad, nquads, i;
+ /* Keep track of the size used in each quad so we can */
+ /* use the right size when copying args/return vars. */
+ guint32 quadsize [2] = {8, 8};
ArgumentClass args [2];
MonoMarshalType *info = NULL;
MonoClass *klass;
}
#endif
+ /* If this struct can't be split up naturally into 8-byte */
+ /* chunks (registers), pass it on the stack. */
+ if (sig->pinvoke && !pass_on_stack) {
+ info = mono_marshal_load_type_info (klass);
+ g_assert(info);
+ guint32 align;
+ guint32 field_size;
+ for (i = 0; i < info->num_fields; ++i) {
+ field_size = mono_marshal_type_size (info->fields [i].field->type,
+ info->fields [i].mspec,
+ &align, TRUE, klass->unicode);
+ if ((info->fields [i].offset < 8) && (info->fields [i].offset + field_size) > 8) {
+ pass_on_stack = TRUE;
+ break;
+ }
+ }
+ }
+
if (pass_on_stack) {
/* Allways pass in memory */
ainfo->offset = *stack_size;
if ((quad == 1) && (info->fields [i].offset < 8))
continue;
+ /* How far into this quad this data extends.*/
+ /* (8 is size of quad) */
+ quadsize [quad] = info->fields [i].offset + size - (quad * 8);
+
class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
}
g_assert (class1 != ARG_CLASS_NO_CLASS);
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
+ ainfo->nregs = nquads;
for (quad = 0; quad < nquads; ++quad) {
switch (args [quad]) {
case ARG_CLASS_INTEGER:
if (*fr >= FLOAT_PARAM_REGS)
args [quad] = ARG_CLASS_MEMORY;
else {
- ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
+ if (quadsize[quad] <= 4)
+ ainfo->pair_storage [quad] = ArgInFloatSSEReg;
+ else ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
ainfo->pair_regs [quad] = *fr;
(*fr) ++;
}
if (sig->pinvoke)
*stack_size += ALIGN_TO (info->native_size, 8);
else
- *stack_size += nquads * sizeof (gpointer);
+ *stack_size += nquads * sizeof(mgreg_t);
ainfo->storage = ArgOnStack;
}
}
stack_size += 0x20;
#endif
+ #ifndef MONO_AMD64_NO_PUSHES
if (stack_size & 0x8) {
/* The AMD64 ABI requires each stack frame to be 16 byte aligned */
cinfo->need_stack_align = TRUE;
stack_size += 8;
}
+ #endif
cinfo->stack_usage = stack_size;
cinfo->reg_usage = gr;
static int
cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
{
+#if defined(MONO_CROSS_COMPILE)
+ return 0;
+#else
#ifndef _MSC_VER
__asm__ __volatile__ ("cpuid"
: "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
*p_edx = info[3];
#endif
return 1;
+#endif
}
/*
int flags;
InitializeCriticalSection (&mini_arch_mutex);
+#if defined(__native_client_codegen__)
+ nacl_instruction_depth = TlsAlloc ();
+ TlsSetValue (nacl_instruction_depth, (gpointer)0);
+ nacl_rex_tag = TlsAlloc ();
+ nacl_legacy_prefix_tag = TlsAlloc ();
+#endif
#ifdef MONO_ARCH_NOMAP32BIT
flags = MONO_MMAP_READ;
mono_arch_cleanup (void)
{
DeleteCriticalSection (&mini_arch_mutex);
+#if defined(__native_client_codegen__)
+ TlsFree (nacl_instruction_depth);
+ TlsFree (nacl_rex_tag);
+ TlsFree (nacl_legacy_prefix_tag);
+#endif
}
/*
cfg->arch.omit_fp = TRUE;
cfg->arch.omit_fp_computed = TRUE;
+#ifdef __native_client_codegen__
+ /* NaCl modules may not change the value of RBP, so it cannot be */
+ /* used as a normal register, but it can be used as a frame pointer*/
+ cfg->disable_omit_fp = TRUE;
+ cfg->arch.omit_fp = FALSE;
+#endif
+
if (cfg->disable_omit_fp)
cfg->arch.omit_fp = FALSE;
regs = g_list_prepend (regs, (gpointer)AMD64_R12);
regs = g_list_prepend (regs, (gpointer)AMD64_R13);
regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+#ifndef __native_client_codegen__
regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+#endif
regs = g_list_prepend (regs, (gpointer)AMD64_R10);
regs = g_list_prepend (regs, (gpointer)AMD64_R9);
regs = g_list_prepend (regs, (gpointer)AMD64_R12);
regs = g_list_prepend (regs, (gpointer)AMD64_R13);
regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+#ifndef __native_client_codegen__
regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+#endif
#ifdef HOST_WIN32
regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
regs = g_list_prepend (regs, (gpointer)AMD64_R12);
regs = g_list_prepend (regs, (gpointer)AMD64_R13);
regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+#ifndef __native_client_codegen__
regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+#endif
regs = g_list_prepend (regs, (gpointer)AMD64_R10);
regs = g_list_prepend (regs, (gpointer)AMD64_R9);
/* Reserve space for caller saved registers */
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- offset += sizeof (gpointer);
+ offset += sizeof(mgreg_t);
}
}
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
- offset = ALIGN_TO (offset, sizeof (gpointer));
+ offset = ALIGN_TO (offset, sizeof(mgreg_t));
if (cfg->arch.omit_fp) {
ins->inst_offset = offset;
- offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof(mgreg_t) : sizeof(mgreg_t);
- offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (gpointer) : sizeof (gpointer);
++ offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
} else {
- offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof(mgreg_t) : sizeof(mgreg_t);
- offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (gpointer) : sizeof (gpointer);
++ offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
ins->inst_offset = - offset;
}
break;
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
- offset = ALIGN_TO (offset, sizeof (gpointer));
+ offset = ALIGN_TO (offset, sizeof(mgreg_t));
if (cfg->arch.omit_fp) {
ins->inst_offset = offset;
- offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof(mgreg_t) : sizeof(mgreg_t);
- offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (gpointer) : sizeof (gpointer);
++ offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
// Arguments are yet supported by the stack map creation code
//cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset);
} else {
- offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof(mgreg_t) : sizeof(mgreg_t);
- offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (gpointer) : sizeof (gpointer);
++ offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
ins->inst_offset = - offset;
//cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset);
}
switch (storage) {
case ArgInIReg:
MONO_INST_NEW (cfg, ins, OP_MOVE);
- ins->dreg = mono_alloc_ireg (cfg);
+ ins->dreg = mono_alloc_ireg_copy (cfg, tree->dreg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
{
switch (storage) {
case ArgInIReg:
+#if defined(__mono_ilp32__)
+ return OP_LOADI8_MEMBASE;
+#else
return OP_LOAD_MEMBASE;
+#endif
case ArgInDoubleSSEReg:
return OP_LOADR8_MEMBASE;
case ArgInFloatSSEReg:
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
}
+ if (cfg->compute_gc_maps) {
+ MonoInst *def;
+
+ EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, t);
+ }
}
}
}
MONO_INST_NEW (cfg, load, arg_storage_to_load_membase (ainfo->pair_storage [part]));
load->inst_basereg = src->dreg;
- load->inst_offset = part * sizeof (gpointer);
+ load->inst_offset = part * sizeof(mgreg_t);
switch (ainfo->pair_storage [part]) {
case ArgInIReg:
MONO_ADD_INS (cfg->cbb, arg);
}
}
+
+ if (cfg->compute_gc_maps) {
+ MonoInst *def;
+ EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, &ins->klass->byval_arg);
+ }
}
}
g_free (ainfo);
}
+#if !defined(__native_client__)
+#define PTR_TO_GREG(ptr) (mgreg_t)(ptr)
+#define GREG_TO_PTR(greg) (gpointer)(greg)
+#else
+/* Correctly handle casts to/from 32-bit pointers without compiler warnings */
+#define PTR_TO_GREG(ptr) (mgreg_t)(uintptr_t)(ptr)
+#define GREG_TO_PTR(greg) (gpointer)(guint32)(greg)
+#endif
+
/*
* mono_arch_get_start_dyn_call:
*
pindex = 0;
if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
- p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
+ p->regs [greg ++] = PTR_TO_GREG(*(args [arg_index ++]));
if (!sig->hasthis)
pindex = 1;
}
if (dinfo->cinfo->vtype_retaddr)
- p->regs [greg ++] = (mgreg_t)ret;
+ p->regs [greg ++] = PTR_TO_GREG(ret);
for (i = pindex; i < sig->param_count; i++) {
MonoType *t = mono_type_get_underlying_type (sig->params [i]);
gpointer *arg = args [arg_index ++];
if (t->byref) {
- p->regs [greg ++] = (mgreg_t)*(arg);
+ p->regs [greg ++] = PTR_TO_GREG(*(arg));
continue;
}
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
+#if !defined(__mono_ilp32__)
case MONO_TYPE_I8:
case MONO_TYPE_U8:
+#endif
g_assert (dinfo->cinfo->args [i + sig->hasthis].reg == param_regs [greg]);
- p->regs [greg ++] = (mgreg_t)*(arg);
+ p->regs [greg ++] = PTR_TO_GREG(*(arg));
break;
+#if defined(__mono_ilp32__)
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ g_assert (dinfo->cinfo->args [i + sig->hasthis].reg == param_regs [greg]);
+ p->regs [greg ++] = *(guint64*)(arg);
+ break;
+#endif
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
p->regs [greg ++] = *(guint8*)(arg);
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (t)) {
- p->regs [greg ++] = (mgreg_t)*(arg);
+ p->regs [greg ++] = PTR_TO_GREG(*(arg));
break;
} else {
/* Fall through */
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
- *(gpointer*)ret = (gpointer)res;
+ *(gpointer*)ret = GREG_TO_PTR(res);
break;
case MONO_TYPE_I1:
*(gint8*)ret = res;
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
- *(gpointer*)ret = (gpointer)res;
+ *(gpointer*)ret = GREG_TO_PTR(res);
break;
} else {
/* Fall through */
* not span cache lines. This is required for code patching to work on SMP
* systems.
*/
- if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0)
- amd64_padding (code, 4 - ((guint32)(code + 1 - cfg->native_code) % 4));
+ if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0) {
+ guint32 pad_size = 4 - ((guint32)(code + 1 - cfg->native_code) % 4);
+ amd64_padding (code, pad_size);
+ }
mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
amd64_call_code (code, 0);
}
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
- } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START)) {
+ } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START) || (ins2->opcode == OP_GC_LIVENESS_DEF) || (ins2->opcode == OP_GC_LIVENESS_USE)) {
/* Continue */
} else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) {
NULLIFY_INS (ins2);
ins->sreg2 = temp->dreg;
}
break;
+#ifndef __mono_ilp32__
case OP_LOAD_MEMBASE:
+#endif
case OP_LOADI8_MEMBASE:
+#ifndef __native_client_codegen__
+ /* Don't generate memindex opcodes (to simplify */
+ /* read sandboxing) */
if (!amd64_is_imm32 (ins->inst_offset)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
ins->inst_indexreg = temp->dreg;
}
+#endif
break;
+#ifndef __mono_ilp32__
case OP_STORE_MEMBASE_IMM:
+#endif
case OP_STOREI8_MEMBASE_IMM:
if (!amd64_is_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
if (cfg->param_area && cfg->arch.no_pushes)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area);
amd64_cld (code);
+#if defined(__default_codegen__)
+ amd64_prefix (code, X86_REP_PREFIX);
+ amd64_stosl (code);
+#elif defined(__native_client_codegen__)
+ /* NaCl stos pseudo-instruction */
+ amd64_codegen_pre(code);
+ /* First, clear the upper 32 bits of RDI (mov %edi, %edi) */
+ amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 4);
+ /* Add %r15 to %rdi using lea, condition flags unaffected. */
+ amd64_lea_memindex_size (code, AMD64_RDI, AMD64_R15, 0, AMD64_RDI, 0, 8);
amd64_prefix (code, X86_REP_PREFIX);
amd64_stosl (code);
+ amd64_codegen_post(code);
+#endif /* __native_client_codegen__ */
if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
amd64_pop_reg (code, AMD64_RDI);
/* Load the destination address */
g_assert (loc->opcode == OP_REGOFFSET);
- amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, sizeof(gpointer));
for (quad = 0; quad < 2; quad ++) {
switch (cinfo->ret.pair_storage [quad]) {
case ArgInIReg:
- amd64_mov_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad], 8);
+ amd64_mov_membase_reg (code, AMD64_RCX, (quad * sizeof(mgreg_t)), cinfo->ret.pair_regs [quad], sizeof(mgreg_t));
break;
case ArgInFloatSSEReg:
amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
#ifndef DISABLE_JIT
+#if defined(__native_client__) || defined(__native_client_codegen__)
+void mono_nacl_gc()
+{
+#ifdef __native_client_gc__
+ __nacl_suspend_thread_if_needed();
+#endif
+}
+#endif
+
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
}
+#if defined(__native_client_codegen__)
+ /* For Native Client, all indirect call/jump targets must be */
+ /* 32-byte aligned. Exception handler blocks are jumped to */
+ /* indirectly as well. */
+ gboolean bb_needs_alignment = (bb->flags & BB_INDIRECT_JUMP_TARGET) ||
+ (bb->flags & BB_EXCEPTION_HANDLER);
+
+ if ( bb_needs_alignment && ((cfg->code_len & kNaClAlignmentMask) != 0)) {
+ int pad = kNaClAlignment - (cfg->code_len & kNaClAlignmentMask);
+ if (pad != kNaClAlignment) code = mono_arch_nacl_pad(code, pad);
+ cfg->code_len += pad;
+ bb->native_offset = cfg->code_len;
+ }
+#endif /*__native_client_codegen__*/
+
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
- if (G_UNLIKELY (offset > (cfg->code_size - max_len - 16))) {
+#define EXTRA_CODE_SPACE (NACL_SIZE (16, 16 + kNaClAlignment))
+
+ if (G_UNLIKELY (offset > (cfg->code_size - max_len - EXTRA_CODE_SPACE))) {
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code(cfg);
code = cfg->native_code + offset;
mono_jit_stats.code_reallocs++;
}
case OP_STOREI2_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
break;
+ /* In AMD64 NaCl, pointers are 4 bytes, */
+ /* so STORE_* != STOREI8_*. Likewise below. */
case OP_STORE_MEMBASE_REG:
+ amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, sizeof(gpointer));
+ break;
case OP_STOREI8_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
break;
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
break;
case OP_STORE_MEMBASE_IMM:
+#ifndef __native_client_codegen__
+ /* In NaCl, this could be a PCONST type, which could */
+ /* mean a pointer type was copied directly into the */
+ /* lower 32-bits of inst_imm, so for InvalidPtr==-1 */
+ /* the value would be 0x00000000FFFFFFFF which is */
+ /* not proper for an imm32 unless you cast it. */
+ g_assert (amd64_is_imm32 (ins->inst_imm));
+#endif
+ amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, (gint32)ins->inst_imm, sizeof(gpointer));
+ break;
case OP_STOREI8_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_LOAD_MEM:
+#ifdef __mono_ilp32__
+ /* In ILP32, pointers are 4 bytes, so separate these */
+ /* cases, use literal 8 below where we really want 8 */
+ amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+ amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, sizeof(gpointer));
+ break;
+#endif
case OP_LOADI8_MEM:
// FIXME: Decompose this earlier
if (amd64_is_imm32 (ins->inst_imm))
- amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, sizeof (gpointer));
+ amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8);
else {
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8);
amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, FALSE);
break;
case OP_LOADU2_MEM:
+ /* For NaCl, pointers are 4 bytes, so separate these */
+ /* cases, use literal 8 below where we really want 8 */
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, TRUE);
break;
case OP_LOAD_MEMBASE:
+ g_assert (amd64_is_imm32 (ins->inst_offset));
+ amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof(gpointer));
+ break;
case OP_LOADI8_MEMBASE:
+ /* Use literal 8 instead of sizeof pointer or */
+ /* register, we really want 8 for this opcode */
g_assert (amd64_is_imm32 (ins->inst_offset));
- amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
+ amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 8);
break;
case OP_LOADI4_MEMBASE:
amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_AOTCONST:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
- amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, 8);
+ amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, sizeof(gpointer));
break;
case OP_JUMP_TABLE:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
amd64_mov_reg_imm_size (code, ins->dreg, 0, 8);
break;
case OP_MOVE:
- amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
+ amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof(mgreg_t));
break;
case OP_AMD64_SET_XMMREG_R4: {
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
else {
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
- pos -= sizeof (gpointer);
+ pos -= sizeof(mgreg_t);
/* Restore callee-saved registers */
for (i = AMD64_NREG - 1; i > 0; --i) {
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
- pos += 8;
+ amd64_mov_reg_membase (code, i, AMD64_RBP, pos, sizeof(mgreg_t));
+ pos += sizeof(mgreg_t);
}
}
/* Copy arguments on the stack to our argument area */
- for (i = 0; i < call->stack_usage; i += 8) {
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i, 8);
- amd64_mov_membase_reg (code, AMD64_RBP, 16 + i, AMD64_RAX, 8);
+ for (i = 0; i < call->stack_usage; i += sizeof(mgreg_t)) {
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, 16 + i, AMD64_RAX, sizeof(mgreg_t));
}
if (pos)
else
amd64_set_reg_template (code, AMD64_R11);
amd64_jump_reg (code, AMD64_R11);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CHECK_THIS:
break;
case OP_ARGLIST: {
amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, cfg->sig_cookie);
- amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
+ amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, sizeof(gpointer));
break;
}
case OP_CALL:
code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method, FALSE);
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr, FALSE);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
code = emit_move_return_value (cfg, ins, code);
}
amd64_call_reg (code, ins->sreg1);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
code = emit_move_return_value (cfg, ins, code);
call = (MonoCallInst*)ins;
amd64_call_membase (code, ins->sreg1, ins->inst_offset);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
code = emit_move_return_value (cfg, ins, code);
/* Set argument registers */
for (i = 0; i < PARAM_REGS; ++i)
- amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof (gpointer), 8);
+ amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof(mgreg_t), sizeof(mgreg_t));
/* Make the call */
amd64_call_reg (code, AMD64_R10);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
+
/* Save result */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
amd64_mov_membase_reg (code, AMD64_R11, G_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception", FALSE);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_RETHROW: {
amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception", FALSE);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CALL_HANDLER:
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
break;
case OP_START_HANDLER: {
+ /* Even though we're saving RSP, use sizeof */
+ /* gpointer because spvar is of type IntPtr */
+ /* see: mono_create_spvar_for_region */
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
- amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, 8);
+ amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, sizeof(gpointer));
if ((MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY) ||
MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY)) &&
}
case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
- amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, 8);
+ amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
amd64_ret (code);
break;
}
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
- amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, 8);
+ amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
/* The local allocator will put the result into RAX */
amd64_ret (code);
break;
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
+ case OP_NACL_GC_SAFE_POINT: {
+#if defined(__native_client_codegen__)
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
+#endif
+ break;
+ }
+ case OP_GC_LIVENESS_DEF:
+ case OP_GC_LIVENESS_USE:
+ case OP_GC_PARAM_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ break;
+ case OP_GC_SPILL_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
+ break;
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
if ((code - cfg->native_code - offset) > max_len) {
+#if !defined(__native_client_codegen__)
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
+#endif
}
last_ins = ins;
gint32 lmf_offset = cfg->arch.lmf_offset;
gboolean args_clobbered = FALSE;
gboolean trace = FALSE;
+#ifdef __native_client_codegen__
+ guint alignment_check;
+#endif
cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
+#if defined(__default_codegen__)
code = cfg->native_code = g_malloc (cfg->code_size);
+#elif defined(__native_client_codegen__)
+ /* native_code_alloc is not 32-byte aligned, native_code is. */
+ cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
+
+ /* Align native_code to next nearest kNaclAlignment byte. */
+ cfg->native_code = (uintptr_t)cfg->native_code_alloc + kNaClAlignment;
+ cfg->native_code = (uintptr_t)cfg->native_code & ~kNaClAlignmentMask;
+
+ code = cfg->native_code;
+
+ alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
+ g_assert (alignment_check == 0);
+#endif
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
trace = TRUE;
// IP saved at CFA - 8
mono_emit_unwind_op_offset (cfg, code, AMD64_RIP, -cfa_offset);
async_exc_point (code);
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
if (!cfg->arch.omit_fp) {
amd64_push_reg (code, AMD64_RBP);
#ifdef HOST_WIN32
mono_arch_unwindinfo_add_push_nonvol (&cfg->arch.unwindinfo, cfg->native_code, code, AMD64_RBP);
#endif
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
- amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
+ amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP);
async_exc_point (code);
#ifdef HOST_WIN32
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
amd64_push_reg (code, i);
- pos += sizeof (gpointer);
+ pos += 8; /* AMD64 push inst is always 8 bytes, no way to change it */
offset += 8;
mono_emit_unwind_op_offset (cfg, code, i, - offset);
async_exc_point (code);
+
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_cfa (cfg, - offset, SLOT_NOREF);
}
}
if (cfg->arch.omit_fp)
// FIXME:
g_assert_not_reached ();
- cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof (gpointer));
+ cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof(mgreg_t));
}
if (cfg->arch.omit_fp) {
/*
- * On enter, the stack is misaligned by the the pushing of the return
+ * On enter, the stack is misaligned by the pushing of the return
* address. It is either made aligned by the pushing of %rbp, or by
* this.
*/
alloc_size = ALIGN_TO (cfg->stack_offset, 8);
- if ((alloc_size % 16) == 0)
+ if ((alloc_size % 16) == 0) {
alloc_size += 8;
+ /* Mark the padding slot as NOREF */
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset - sizeof (mgreg_t), SLOT_NOREF);
+ }
} else {
alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
-
+ if (cfg->stack_offset != alloc_size) {
+ /* Mark the padding slot as NOREF */
+ mini_gc_set_slot_type_from_fp (cfg, -alloc_size + cfg->param_area, SLOT_NOREF);
+ }
+ cfg->arch.sp_fp_offset = alloc_size;
alloc_size -= pos;
}
if (G_UNLIKELY (required_code_size >= (cfg->code_size - offset))) {
while (required_code_size >= (cfg->code_size - offset))
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code (cfg);
code = cfg->native_code + offset;
mono_jit_stats.code_reallocs++;
}
amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8);
amd64_cld (code);
+#if defined(__default_codegen__)
+ amd64_prefix (code, X86_REP_PREFIX);
+ amd64_stosl (code);
+#elif defined(__native_client_codegen__)
+ /* NaCl stos pseudo-instruction */
+ amd64_codegen_pre (code);
+ /* First, clear the upper 32 bits of RDI (mov %edi, %edi) */
+ amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 4);
+ /* Add %r15 to %rdi using lea, condition flags unaffected. */
+ amd64_lea_memindex_size (code, AMD64_RDI, AMD64_R15, 0, AMD64_RDI, 0, 8);
amd64_prefix (code, X86_REP_PREFIX);
amd64_stosl (code);
+ amd64_codegen_post (code);
+#endif /* __native_client_codegen__ */
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8);
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
case AMD64_R12: offset = G_STRUCT_OFFSET (MonoLMF, r12); break;
case AMD64_R13: offset = G_STRUCT_OFFSET (MonoLMF, r13); break;
case AMD64_R14: offset = G_STRUCT_OFFSET (MonoLMF, r14); break;
+#ifndef __native_client_codegen__
case AMD64_R15: offset = G_STRUCT_OFFSET (MonoLMF, r15); break;
+#endif
#ifdef HOST_WIN32
case AMD64_RDI: offset = G_STRUCT_OFFSET (MonoLMF, rdi); break;
case AMD64_RSI: offset = G_STRUCT_OFFSET (MonoLMF, rsi); break;
mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - (lmf_offset + offset)));
}
}
+
+ /* These can't contain refs */
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF);
+
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), SLOT_NOREF);
+ #ifdef HOST_WIN32
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), SLOT_NOREF);
+ #endif
+
}
/* Save callee saved registers */
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
amd64_mov_membase_reg (code, AMD64_RSP, save_area_offset, i, 8);
mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset));
+
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF);
+
save_area_offset += 8;
async_exc_point (code);
}
g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET &&
(cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP));
- amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, 8);
+ amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer));
}
/* compute max_length in order to use short forward jumps */
/* max alignment for loops */
if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
max_length += LOOP_ALIGNMENT;
+#ifdef __native_client_codegen__
+ /* max alignment for native client */
+ max_length += kNaClAlignment;
+#endif
MONO_BB_FOR_EACH_INS (bb, ins) {
+#ifdef __native_client_codegen__
+ {
+ int space_in_block = kNaClAlignment -
+ ((max_length + cfg->code_len) & kNaClAlignmentMask);
+ int max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
+ if (space_in_block < max_len && max_len < kNaClAlignment) {
+ max_length += space_in_block;
+ }
+ }
+#endif /*__native_client_codegen__*/
max_length += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
- amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
break;
case ArgInFloatSSEReg:
- amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+ amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgInDoubleSSEReg:
- amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+ amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgNone:
break;
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
- amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
break;
case ArgInFloatSSEReg:
- amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+ amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgInDoubleSSEReg:
- amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+ amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgNone:
break;
}
/* Save lmf_addr */
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
/* Save previous_lmf */
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
/* Set new lmf */
amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
- amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
+ amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
}
}
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code (cfg);
mono_jit_stats.code_reallocs++;
}
* through the mono_lmf_addr TLS variable.
*/
/* reg = previous_lmf */
- amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
+ amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
x86_prefix (code, X86_FS_PREFIX);
amd64_mov_mem_reg (code, lmf_tls_offset, AMD64_R11, 8);
} else {
/* Restore previous lmf */
- amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
- amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
- amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
+ amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
+ amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
}
/* Restore caller saved regs */
amd64_mov_reg_membase (code, AMD64_R14, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
}
if (cfg->used_int_regs & (1 << AMD64_R15)) {
+#if defined(__default_codegen__)
amd64_mov_reg_membase (code, AMD64_R15, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
+#elif defined(__native_client_codegen__)
+ g_assert_not_reached();
+#endif
}
#ifdef HOST_WIN32
if (cfg->used_int_regs & (1 << AMD64_RDI)) {
else {
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
- pos -= sizeof (gpointer);
+ pos -= sizeof(mgreg_t);
if (pos) {
- if (pos == - sizeof (gpointer)) {
+ if (pos == - sizeof(mgreg_t)) {
/* Only one register, so avoid lea */
for (i = AMD64_NREG - 1; i > 0; --i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
- amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), sizeof (gpointer));
+ amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)), sizeof(mgreg_t));
break;
case ArgInFloatSSEReg:
- amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
+ amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)));
break;
case ArgInDoubleSSEReg:
- amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
+ amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)));
break;
case ArgNone:
break;
code_size += 8 + 7; /*sizeof (void*) + alignment */
}
+#ifdef __native_client_codegen__
+ /* Give us extra room on Native Client. This could be */
+ /* more carefully calculated, but bundle alignment makes */
+ /* it much trickier, so *2 like other places is good. */
+ code_size *= 2;
+#endif
+
while (cfg->code_len + code_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code (cfg);
mono_jit_stats.code_reallocs++;
}
/* do nothing */
break;
}
+ g_assert(code < cfg->native_code + cfg->code_size);
}
/* Handle relocations with RIP relative addressing */
switch (patch_info->type) {
case MONO_PATCH_INFO_R8:
case MONO_PATCH_INFO_R4: {
- guint8 *pos;
+ guint8 *pos, *patch_pos, *target_pos;
/* The SSE opcodes require a 16 byte alignment */
+#if defined(__default_codegen__)
code = (guint8*)ALIGN_TO (code, 16);
- memset (orig_code, 0, code - orig_code);
+#elif defined(__native_client_codegen__)
+ {
+ /* Pad this out with HLT instructions */
+ /* or we can get garbage bytes emitted */
+ /* which will fail validation */
+ guint8 *aligned_code;
+ /* extra align to make room for */
+ /* mov/push below */
+ int extra_align = patch_info->type == MONO_PATCH_INFO_R8 ? 2 : 1;
+ aligned_code = (guint8*)ALIGN_TO (code + extra_align, 16);
+ /* The technique of hiding data in an */
+ /* instruction has a problem here: we */
+ /* need the data aligned to a 16-byte */
+ /* boundary but the instruction cannot */
+ /* cross the bundle boundary. so only */
+ /* odd multiples of 16 can be used */
+ if ((intptr_t)aligned_code % kNaClAlignment == 0) {
+ aligned_code += 16;
+ }
+ while (code < aligned_code) {
+ *(code++) = 0xf4; /* hlt */
+ }
+ }
+#endif
pos = cfg->native_code + patch_info->ip.i;
-
- if (IS_REX (pos [1]))
- *(guint32*)(pos + 5) = (guint8*)code - pos - 9;
- else
- *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
+ if (IS_REX (pos [1])) {
+ patch_pos = pos + 5;
+ target_pos = code - pos - 9;
+ }
+ else {
+ patch_pos = pos + 4;
+ target_pos = code - pos - 8;
+ }
if (patch_info->type == MONO_PATCH_INFO_R8) {
+#ifdef __native_client_codegen__
+ /* Hide 64-bit data in a */
+ /* "mov imm64, r11" instruction. */
+ /* write it before the start of */
+ /* the data*/
+ *(code-2) = 0x49; /* prefix */
+ *(code-1) = 0xbb; /* mov X, %r11 */
+#endif
*(double*)code = *(double*)patch_info->data.target;
code += sizeof (double);
} else {
+#ifdef __native_client_codegen__
+ /* Hide 32-bit data in a */
+ /* "push imm32" instruction. */
+ *(code-1) = 0x68; /* push */
+#endif
*(float*)code = *(float*)patch_info->data.target;
code += sizeof (float);
}
+ *(guint32*)(patch_pos) = target_pos;
+
remove = TRUE;
break;
}
tmp->next = patch_info->next;
}
}
+ g_assert (code < cfg->native_code + cfg->code_size);
}
cfg->code_len = code - cfg->native_code;
return can_write;
}
+#if defined(__native_client_codegen__)
+/* For membase calls, we want the base register. for Native Client, */
+/* all indirect calls have the following sequence with the given sizes: */
+/* mov %eXX,%eXX [2-3] */
+/* mov disp(%r15,%rXX,scale),%r11d [4-8] */
+/* and $0xffffffffffffffe0,%r11d [4] */
+/* add %r15,%r11 [3] */
+/* callq *%r11 [3] */
+
+
+/* Determine if code points to a NaCl call-through-register sequence, */
+/* (i.e., the last 3 instructions listed above) */
+int
+is_nacl_call_reg_sequence(guint8* code)
+{
+ const char *sequence = "\x41\x83\xe3\xe0" /* and */
+ "\x4d\x03\xdf" /* add */
+ "\x41\xff\xd3"; /* call */
+ return memcmp(code, sequence, 10) == 0;
+}
+
+/* Determine if code points to the first opcode of the mov membase component */
+/* of an indirect call sequence (i.e. the first 2 instructions listed above) */
+/* (there could be a REX prefix before the opcode but it is ignored) */
+static int
+is_nacl_indirect_call_membase_sequence(guint8* code)
+{
+ /* Check for mov opcode, reg-reg addressing mode (mod = 3), */
+ return code[0] == 0x8b && amd64_modrm_mod(code[1]) == 3 &&
+ /* and that src reg = dest reg */
+ amd64_modrm_reg(code[1]) == amd64_modrm_rm(code[1]) &&
+ /* Check that next inst is mov, uses SIB byte (rm = 4), */
+ IS_REX(code[2]) &&
+ code[3] == 0x8b && amd64_modrm_rm(code[4]) == 4 &&
+ /* and has dst of r11 and base of r15 */
+ (amd64_modrm_reg(code[4]) + amd64_rex_r(code[2])) == AMD64_R11 &&
+ (amd64_sib_base(code[5]) + amd64_rex_b(code[2])) == AMD64_R15;
+}
+#endif /* __native_client_codegen__ */
+
int
mono_arch_get_this_arg_reg (guint8 *code)
{
g_assert ((code - start) < 64);
}
+ nacl_global_codeman_validate(&start, 64, &code);
+
mono_debug_add_delegate_trampoline (start, code - start);
if (code_len)
#ifdef MONO_ARCH_HAVE_IMT
+#if defined(__default_codegen__)
#define CMP_SIZE (6 + 1)
#define CMP_REG_REG_SIZE (4 + 1)
#define BR_SMALL_SIZE 2
#define MOV_REG_IMM_SIZE 10
#define MOV_REG_IMM_32BIT_SIZE 6
#define JUMP_REG_SIZE (2 + 1)
+#elif defined(__native_client_codegen__)
+/* NaCl N-byte instructions can be padded up to N-1 bytes */
+#define CMP_SIZE ((6 + 1) * 2 - 1)
+#define CMP_REG_REG_SIZE ((4 + 1) * 2 - 1)
+#define BR_SMALL_SIZE (2 * 2 - 1)
+#define BR_LARGE_SIZE (6 * 2 - 1)
+#define MOV_REG_IMM_SIZE (10 * 2 - 1)
+#define MOV_REG_IMM_32BIT_SIZE (6 * 2 - 1)
+/* Jump reg for NaCl adds a mask (+4) and add (+3) */
+#define JUMP_REG_SIZE ((2 + 1 + 4 + 3) * 2 - 1)
+/* Jump membase's size is large and unpredictable */
+/* in native client, just pad it out a whole bundle. */
+#define JUMP_MEMBASE_SIZE (kNaClAlignment)
+#endif
static int
imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
item->chunk_size += MOV_REG_IMM_32BIT_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE;
+#ifdef __native_client_codegen__
+ item->chunk_size += JUMP_MEMBASE_SIZE;
+#endif
}
item->chunk_size += BR_SMALL_SIZE + JUMP_REG_SIZE;
} else {
/* with assert below:
* item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
*/
+#ifdef __native_client_codegen__
+ item->chunk_size += JUMP_MEMBASE_SIZE;
+#endif
}
}
} else {
}
size += item->chunk_size;
}
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* In Native Client, we don't re-use thunks, allocate from the */
+ /* normal code manager paths. */
+ code = mono_domain_code_reserve (domain, size);
+#else
if (fail_tramp)
code = mono_method_alloc_generic_virtual_thunk (domain, size);
else
code = mono_domain_code_reserve (domain, size);
+#endif
start = code;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
else {
- amd64_mov_reg_imm (code, AMD64_R11, item->key);
- amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
+ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+ amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
if (item->has_target_code) {
- amd64_mov_reg_imm (code, AMD64_R11, item->value.target_code);
- amd64_jump_reg (code, AMD64_R11);
+ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->value.target_code);
+ amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
} else {
- amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
- amd64_jump_membase (code, AMD64_R11, 0);
+ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
+ amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
}
if (fail_case) {
amd64_patch (item->jmp_code, code);
- amd64_mov_reg_imm (code, AMD64_R11, fail_tramp);
- amd64_jump_reg (code, AMD64_R11);
+ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, fail_tramp);
+ amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
item->jmp_code = NULL;
}
} else {
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
else {
- amd64_mov_reg_imm (code, AMD64_R11, item->key);
- amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
+ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+ amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
- amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
- amd64_jump_membase (code, AMD64_R11, 0);
+ /* See the comment below about R10 */
+ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
+ amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
amd64_patch (item->jmp_code, code);
amd64_breakpoint (code);
item->jmp_code = NULL;
#else
- amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
- amd64_jump_membase (code, AMD64_R11, 0);
+ /* We're using R10 (MONO_ARCH_IMT_SCRATCH_REG) here because R11 (MONO_ARCH_IMT_REG)
+ needs to be preserved. R10 needs
+ to be preserved for calls which
+ require a runtime generic context,
+ but interface calls don't. */
+ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
+ amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
#endif
}
} else {
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
else {
- amd64_mov_reg_imm (code, AMD64_R11, item->key);
- amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
+ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+ amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
item->jmp_code = code;
if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
mono_stats.imt_thunks_size += code - start;
g_assert (code - start <= size);
+ nacl_domain_code_validate(domain, &start, size, &code);
+
return start;
}
#include <mono/utils/mono-sigcontext.h>
#include <glib.h>
+#ifdef __native_client_codegen__
+#define kNaClAlignmentAMD64 32
+#define kNaClAlignmentMaskAMD64 (kNaClAlignmentAMD64 - 1)
+
+/* TODO: use kamd64NaClLengthOfCallImm */
+/* temporarily using kNaClAlignmentAMD64 so padding in */
+/* image-writer.c doesn't happen */
+#define kNaClLengthOfCallImm kNaClAlignmentAMD64
+
+int is_nacl_call_reg_sequence(guint8* code);
+#endif
+
#ifdef HOST_WIN32
#include <windows.h>
/* use SIG* defines if possible */
gpointer lmf_addr;
/* This is only set in trampoline LMF frames */
MonoMethod *method;
+#if defined(__default_codegen__) || defined(HOST_WIN32)
guint64 rip;
+#elif defined(__native_client_codegen__)
+ /* On 64-bit compilers, default alignment is 8 for this field, */
+ /* this allows the structure to match for 32-bit compilers. */
+ guint64 rip __attribute__ ((aligned(8)));
+#endif
guint64 rbx;
guint64 rbp;
guint64 rsp;
gint32 localloc_offset;
gint32 reg_save_area_offset;
gint32 stack_alloc_size;
+ gint32 sp_fp_offset;
gboolean omit_fp, omit_fp_computed, no_pushes;
gpointer cinfo;
gint32 async_point_count;
*/
#define MONO_ARCH_VARARG_ICALLS 1
-#ifndef HOST_WIN32
+#if !defined( HOST_WIN32 ) && !defined(__native_client__) && !defined(__native_client_codegen__)
#define MONO_ARCH_USE_SIGACTION 1
#endif
-#endif /* HOST_WIN32 */
+#endif /* !HOST_WIN32 && !__native_client__ */
#if defined (__APPLE__)
#define MONO_ARCH_HAVE_IMT 1
#define MONO_ARCH_HAVE_TLS_GET 1
#define MONO_ARCH_IMT_REG AMD64_R10
+#define MONO_ARCH_IMT_SCRATCH_REG AMD64_R11
#define MONO_ARCH_VTABLE_REG MONO_AMD64_ARG_REG1
/*
* We use r10 for the imt/rgctx register rather than r11 because r11 is
#define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
#define MONO_ARCH_AOT_SUPPORTED 1
-#ifndef HOST_WIN32
+#if !defined( HOST_WIN32 ) && !defined( __native_client__ )
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
#else
#define DISABLE_DEBUGGER_AGENT 1
#define MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD 1
#define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
+ #define MONO_ARCH_GC_MAPS_SUPPORTED 1
gboolean
mono_amd64_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig) MONO_INTERNAL;
*/
MINI_OP(OP_LIVERANGE_END, "liverange_end", NONE, NONE, NONE)
+ /* GC support */
+ /*
+ * mono_arch_output_basic_block () will set the backend.pc_offset field to the current pc
+ * offset.
+ */
+ MINI_OP(OP_GC_LIVENESS_DEF, "gc_liveness_def", NONE, NONE, NONE)
+ MINI_OP(OP_GC_LIVENESS_USE, "gc_liveness_use", NONE, NONE, NONE)
+
+ /*
+ * This marks the location inside a basic block where a GC tracked spill slot has been
+ * defined. The spill slot is assumed to be alive until the end of the bblock.
+ */
+ MINI_OP(OP_GC_SPILL_SLOT_LIVENESS_DEF, "gc_spill_slot_liveness_def", NONE, NONE, NONE)
+
+ /*
+ * This marks the location inside a basic block where a GC tracked param area slot has
+ * been defined. The slot is assumed to be alive until the next call.
+ */
+ MINI_OP(OP_GC_PARAM_SLOT_LIVENESS_DEF, "gc_param_slot_liveness_def", NONE, NONE, NONE)
+
/* Arch specific opcodes */
+/* #if defined(__native_client_codegen__) || defined(__native_client__) */
+/* We have to define these in terms of the TARGET defines, not NaCl defines */
+/* because genmdesc.pl doesn't have multiple defines per platform. */
+#if defined(TARGET_AMD64) || defined(TARGET_X86)
+MINI_OP(OP_NACL_GC_SAFE_POINT, "nacl_gc_safe_point", IREG, NONE, NONE)
+#endif
+
#if defined(TARGET_X86) || defined(TARGET_AMD64)
MINI_OP(OP_X86_TEST_NULL, "x86_test_null", NONE, IREG, NONE)
MINI_OP(OP_X86_COMPARE_MEMBASE_REG,"x86_compare_membase_reg", NONE, IREG, IREG)
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
-static gpointer
-mono_realloc_native_code (MonoCompile *cfg)
-{
-#ifdef __native_client_codegen__
- guint old_padding;
- gpointer native_code;
- guint alignment_check;
-
- /* Save the old alignment offset so we can re-align after the realloc. */
- old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
-
- cfg->native_code_alloc = g_realloc (cfg->native_code_alloc,
- cfg->code_size + kNaClAlignment);
-
- /* Align native_code to next nearest kNaClAlignment byte. */
- native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
- native_code = (guint)native_code & ~kNaClAlignmentMask;
-
- /* Shift the data to be 32-byte aligned again. */
- memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
-
- alignment_check = (guint)native_code & kNaClAlignmentMask;
- g_assert (alignment_check == 0);
- return native_code;
-#else
- return g_realloc (cfg->native_code, cfg->code_size);
-#endif
-}
#ifdef __native_client_codegen__
+const guint kNaClAlignment = kNaClAlignmentX86;
+const guint kNaClAlignmentMask = kNaClAlignmentMaskX86;
+
+/* Default alignment for Native Client is 32-byte. */
+gint8 nacl_align_byte = -32; /* signed version of 0xe0 */
/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code, */
/* Check that alignment doesn't cross an alignment boundary. */
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
+ cfg->locals_min_stack_offset = - (offset + locals_stack_size);
+ cfg->locals_max_stack_offset = - offset;
/*
* EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we
* have locals larger than 8 bytes we need to make sure that
#ifndef DISABLE_JIT
+#if defined(__native_client__) || defined(__native_client_codegen__)
+void
+mono_nacl_gc()
+{
+#ifdef __native_client_gc__
+ __nacl_suspend_thread_if_needed();
+#endif
+}
+#endif
+
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
/* a pop is one byte, while an add reg, imm is 3. So if there are 4 or 8
* bytes to pop, we want to use pops. GCC does this (note it won't happen
case OP_CALL_REG:
call = (MonoCallInst*)ins;
x86_call_reg (code, ins->sreg1);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
call = (MonoCallInst*)ins;
x86_call_membase (code, ins->sreg1, ins->inst_offset);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_RETHROW: {
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CALL_HANDLER:
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
+ case OP_NACL_GC_SAFE_POINT: {
+#if defined(__native_client_codegen__)
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc);
+#endif
+ break;
+ }
+ case OP_GC_LIVENESS_DEF:
+ case OP_GC_LIVENESS_USE:
+ case OP_GC_PARAM_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ break;
+ case OP_GC_SPILL_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
+ break;
default:
g_warning ("unknown opcode %s\n", mono_inst_name (ins->opcode));
g_assert_not_reached ();
case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
case MONO_PATCH_INFO_MONITOR_ENTER:
case MONO_PATCH_INFO_MONITOR_EXIT:
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (nacl_is_code_address (code)) {
+ /* For tail calls, code is patched after being installed */
+ /* but not through the normal "patch callsite" method. */
+ unsigned char buf[kNaClAlignment];
+ unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
+ unsigned char *_target = target;
+ int ret;
+ /* All patch targets modified in x86_patch */
+ /* are IP relative. */
+ _target = _target + (uintptr_t)buf - (uintptr_t)aligned_code;
+ memcpy (buf, aligned_code, kNaClAlignment);
+ /* Patch a temp buffer of bundle size, */
+ /* then install to actual location. */
+ x86_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), _target);
+ ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
+ g_assert (ret == 0);
+ }
+ else {
+ x86_patch (ip, target);
+ }
+#else
x86_patch (ip, target);
+#endif
break;
case MONO_PATCH_INFO_NONE:
break;
+ case MONO_PATCH_INFO_R4:
+ case MONO_PATCH_INFO_R8: {
+ guint32 offset = mono_arch_get_patch_offset (ip);
+ *((gconstpointer *)(ip + offset)) = target;
+ break;
+ }
default: {
guint32 offset = mono_arch_get_patch_offset (ip);
+#if !defined(__native_client__)
*((gconstpointer *)(ip + offset)) = target;
+#else
+ *((gconstpointer *)(ip + offset)) = nacl_modify_patch_target (target);
+#endif
break;
}
}
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
cfg->code_size += 512;
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ code = cfg->native_code = g_malloc (cfg->code_size);
+#elif defined(__native_client_codegen__)
/* native_code_alloc is not 32-byte aligned, native_code is. */
cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
g_assert(alignment_check == 0);
-#else
- code = cfg->native_code = g_malloc (cfg->code_size);
#endif
/* Offset between RSP and the CFA */
mono_emit_unwind_op_offset (cfg, code, X86_EBP, - cfa_offset);
x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
mono_emit_unwind_op_def_cfa_reg (cfg, code, X86_EBP);
+ } else {
+ cfg->frame_reg = X86_ESP;
}
alloc_size = cfg->stack_offset;
guint32 size;
/* Compute size of code following the push <OFFSET> */
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ size = 5 + 5;
+#elif defined(__native_client_codegen__)
code = mono_nacl_align (code);
size = kNaClAlignment;
-#else
- size = 5 + 5;
#endif
/*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/
//[1 + 5] x86_jump_mem(inst,mem)
#define CMP_SIZE 6
-#ifdef __native_client_codegen__
-/* These constants should be coming from cpu-x86.md */
+#if defined(__default_codegen__)
+#define BR_SMALL_SIZE 2
+#define BR_LARGE_SIZE 5
+#elif defined(__native_client_codegen__)
/* I suspect the size calculation below is actually incorrect. */
-/* TODO: fix the calculation that uses these sizes. */
+/* TODO: fix the calculation that uses these sizes. */
#define BR_SMALL_SIZE 16
#define BR_LARGE_SIZE 12
-#else
-#define BR_SMALL_SIZE 2
-#define BR_LARGE_SIZE 5
-#endif /* __native_client_codegen__ */
+#endif /*__native_client_codegen__*/
#define JUMP_IMM_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
#define DEBUG_IMT 0
int size = 0;
guint8 *code, *start;
-#ifdef __native_client_codegen__
- /* g_print("mono_arch_build_imt_thunk needs to be aligned.\n"); */
-#endif
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
}
size += item->chunk_size;
}
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* In Native Client, we don't re-use thunks, allocate from the */
+ /* normal code manager paths. */
+ code = mono_domain_code_reserve (domain, size);
+#else
if (fail_tramp)
code = mono_method_alloc_generic_virtual_thunk (domain, size);
else
code = mono_domain_code_reserve (domain, size);
+#endif
start = code;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
g_free (buff);
}
+ nacl_domain_code_validate (domain, &start, size, &code);
+
return start;
}
get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
{
guint8 *code, *start;
+ int code_reserve = 64;
/*
* The stack contains:
*/
if (has_target) {
- start = code = mono_global_codeman_reserve (64);
+ start = code = mono_global_codeman_reserve (code_reserve);
/* Replace the this argument with the target */
x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4);
x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
- g_assert ((code - start) < 64);
+ g_assert ((code - start) < code_reserve);
} else {
int i = 0;
/* 8 for mov_reg and jump, plus 8 for each parameter */
#ifdef __native_client_codegen__
/* TODO: calculate this size correctly */
- int code_reserve = 13 + (param_count * 8) + 2 * kNaClAlignment;
+ code_reserve = 13 + (param_count * 8) + 2 * kNaClAlignment;
#else
- int code_reserve = 8 + (param_count * 8);
+ code_reserve = 8 + (param_count * 8);
#endif /* __native_client_codegen__ */
/*
* The stack contains:
g_assert ((code - start) < code_reserve);
}
+ nacl_global_codeman_validate(&start, code_reserve, &code);
mono_debug_add_delegate_trampoline (start, code - start);
if (code_len)
#include <mono/arch/x86/x86-codegen.h>
#include <mono/utils/mono-sigcontext.h>
+
+#ifdef __native_client_codegen__
+#define kNaClAlignmentX86 32
+#define kNaClAlignmentMaskX86 (kNaClAlignmentX86 - 1)
+
+#define kNaClLengthOfCallImm kx86NaClLengthOfCallImm
+#endif
+
#ifdef HOST_WIN32
#include <windows.h>
/* use SIG* defines if possible */
#undef MONO_ARCH_USE_SIGACTION
#endif
-#if defined(__native_client_codegen__) || defined(__native_client__)
-#define NACL_SIZE(a, b) (b)
-#else
-#define NACL_SIZE(a, b) (a)
-#endif
-
#ifndef HOST_WIN32
#ifdef HAVE_WORKING_SIGALTSTACK
/*This is the max size of the locals area of a given frame. I think 1MB is a safe default for now*/
#define MONO_ARCH_MAX_FRAME_SIZE 0x100000
+ /*This is how much a try block must be extended when is is preceeded by a Monitor.Enter() call.
+ It's 4 bytes as this is how many bytes + 1 that 'add 0x10, %esp' takes. It is used to pop the arguments from
+ the monitor.enter call and must be already protected.*/
+ #define MONO_ARCH_MONITOR_ENTER_ADJUSTMENT 4
+
struct MonoLMF {
/*
* If the lowest bit is set to 1, then this is a trampoline LMF frame.
#define MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD 1
#define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER 1
+ #define MONO_ARCH_GC_MAPS_SUPPORTED 1
gboolean
mono_x86_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig) MONO_INTERNAL;
static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, MonoException **ex);
-#ifdef __native_client_codegen__
-/* Default alignment for Native Client is 32-byte. */
-guint8 nacl_align_byte = 0xe0;
-#endif
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
MINI_FAST_TLS_DECLARE(mono_jit_tls);
#endif
+ #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
+ #define MONO_ARCH_MONITOR_ENTER_ADJUSTMENT 1
+ #endif
+
MonoTraceSpec *mono_jit_trace_calls = NULL;
gboolean mono_break_on_exc = FALSE;
gboolean mono_compile_aot = FALSE;
gboolean mono_dont_free_global_codeman;
+gpointer
+mono_realloc_native_code (MonoCompile *cfg)
+{
+#if defined(__default_codegen__)
+ return g_realloc (cfg->native_code, cfg->code_size);
+#elif defined(__native_client_codegen__)
+ guint old_padding;
+ gpointer native_code;
+ guint alignment_check;
+
+ /* Save the old alignment offset so we can re-align after the realloc. */
+ old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
+
+ cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc,
+ cfg->code_size + kNaClAlignment );
+
+ /* Align native_code to next nearest kNaClAlignment byte. */
+ native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
+ native_code = (guint)native_code & ~kNaClAlignmentMask;
+
+ /* Shift the data to be 32-byte aligned again. */
+ memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
+
+ alignment_check = (guint)native_code & kNaClAlignmentMask;
+ g_assert (alignment_check == 0);
+ return native_code;
+#else
+ g_assert_not_reached ();
+ return cfg->native_code;
+#endif
+}
+
#ifdef __native_client_codegen__
/* Prevent instructions from straddling a 32-byte alignment boundary. */
}
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+/* Given the temporary buffer (allocated by mono_global_codeman_reserve) into
+ * which we are generating code, return a pointer to the destination in the
+ * dynamic code segment into which the code will be copied when
+ * mono_global_codeman_commit is called.
+ * LOCKING: Acquires the jit lock.
+ */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ void *dest;
+ mono_jit_lock ();
+ dest = nacl_code_manager_get_code_dest (global_codeman, data);
+ mono_jit_unlock ();
+ return dest;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+ mono_jit_lock ();
+ mono_code_manager_commit (global_codeman, data, size, newsize);
+ mono_jit_unlock ();
+}
+
+/*
+ * Convenience function which calls mono_global_codeman_commit to validate and
+ * copy the code. The caller sets *buf_base and *buf_size to the start and size
+ * of the buffer (allocated by mono_global_codeman_reserve), and *code_end to
+ * the byte after the last instruction byte. On return, *buf_base will point to
+ * the start of the copied in the code segment, and *code_end will point after
+ * the end of the copied code.
+ */
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+ guint8 *tmp = nacl_global_codeman_get_dest (*buf_base);
+ mono_global_codeman_commit (*buf_base, buf_size, *code_end - *buf_base);
+ *code_end = tmp + (*code_end - *buf_base);
+ *buf_base = tmp;
+}
+#else
+/* no-op versions of Native Client functions */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ return data;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+}
+
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+}
+
+#endif /* __native_client__ */
+
/**
* mono_create_unwind_op:
*
inst->backend.is_pinvoke = 0;
inst->dreg = vreg;
+ if (cfg->compute_gc_maps) {
+ if (type->byref) {
+ mono_mark_vreg_as_mp (cfg, vreg);
+ } else {
+ MonoType *t = mini_type_get_underlying_type (NULL, type);
+ if ((MONO_TYPE_ISSTRUCT (t) && inst->klass->has_references) || MONO_TYPE_IS_REFERENCE (t)) {
+ inst->flags |= MONO_INST_GC_TRACK;
+ mono_mark_vreg_as_ref (cfg, vreg);
+ }
+ }
+ }
+
cfg->varinfo [num] = inst;
MONO_INIT_VARINFO (&cfg->vars [num], num);
return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
}
+ void
+ mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
+ {
+ if (vreg >= cfg->vreg_is_ref_len) {
+ gboolean *tmp = cfg->vreg_is_ref;
+ int size = cfg->vreg_is_ref_len;
+
+ while (vreg >= cfg->vreg_is_ref_len)
+ cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
+ cfg->vreg_is_ref = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
+ if (size)
+ memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
+ }
+ cfg->vreg_is_ref [vreg] = TRUE;
+ }
+
+ void
+ mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
+ {
+ if (vreg >= cfg->vreg_is_mp_len) {
+ gboolean *tmp = cfg->vreg_is_mp;
+ int size = cfg->vreg_is_mp_len;
+
+ while (vreg >= cfg->vreg_is_mp_len)
+ cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
+ cfg->vreg_is_mp = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
+ if (size)
+ memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
+ }
+ cfg->vreg_is_mp [vreg] = TRUE;
+ }
+
/*
* Transform a MonoInst into a load from the variable of index var_index.
*/
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
vars = mono_varlist_sort (cfg, vars, 0);
offset = 0;
- *stack_align = sizeof (gpointer);
+ *stack_align = sizeof(mgreg_t);
for (l = vars; l; l = l->next) {
vmv = l->data;
inst = cfg->varinfo [vmv->idx];
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
{
if (ins == NULL) {
ins = bb->code;
+ if (ins)
+ ins->prev = ins_to_insert;
bb->code = ins_to_insert;
ins_to_insert->next = ins;
if (bb->last_ins == NULL)
if ((jit_tls = TlsGetValue (mono_jit_tls_id)))
return jit_tls->lmf;
-
- g_assert_not_reached ();
+ /*
+ * We do not assert here because this function can be called from
+ * mini-gc.c on a thread that has not executed any managed code, yet
+ * (the thread object allocation can trigger a collection).
+ */
return NULL;
#endif
}
target = patch_info->data.inst->inst_c0 + code;
break;
case MONO_PATCH_INFO_IP:
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* Need to transform to the destination address, it's */
+ /* emitted as an immediate in the code. */
+ target = nacl_inverse_modify_patch_target(ip);
+#else
target = ip;
+#endif
break;
case MONO_PATCH_INFO_METHOD_REL:
target = code + patch_info->data.offset;
}
case MONO_PATCH_INFO_METHOD_JUMP:
target = mono_create_jump_trampoline (domain, patch_info->data.method, FALSE);
+#if defined(__native_client__) && defined(__native_client_codegen__)
+#if defined(TARGET_AMD64)
+ /* This target is an absolute address, not relative to the */
+ /* current code being emitted on AMD64. */
+ target = nacl_inverse_modify_patch_target(target);
+#endif
+#endif
break;
case MONO_PATCH_INFO_METHOD:
if (patch_info->data.method == method) {
gpointer *jump_table;
int i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak, but we don't care if we're */
+ /* not deleting JIT'd methods anyway */
+ jump_table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (method && method->dynamic) {
jump_table = mono_code_manager_reserve (mono_dynamic_code_hash_lookup (domain, method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
jump_table = mono_domain_code_reserve (domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
}
+#endif
- for (i = 0; i < patch_info->data.table->table_size; i++)
+ for (i = 0; i < patch_info->data.table->table_size; i++) {
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* 'code' is relative to the current code blob, we */
+ /* need to do this transform on it to make the */
+ /* pointers in this table absolute */
+ jump_table [i] = nacl_inverse_modify_patch_target (code) + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#else
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#endif
+ }
+
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* jump_table is in the data section, we need to transform */
+ /* it here so when it gets modified in amd64_patch it will */
+ /* then point back to the absolute data address */
+ target = nacl_inverse_modify_patch_target (jump_table);
+#else
target = jump_table;
+#endif
break;
}
case MONO_PATCH_INFO_METHODCONST:
}
case MONO_PATCH_INFO_SWITCH: {
gpointer *table;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak. */
+ /* TODO: can we free this when */
+ /* making the final jump table? */
+ table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (cfg->method->dynamic) {
table = mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
table = mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
+#endif
for (i = 0; i < patch_info->data.table->table_size; i++) {
/* Might be NULL if the switch is eliminated */
GSList *list;
MonoDomain *domain = cfg->domain;
unsigned char *ip = cfg->native_code + patch_info->ip.i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* When this jump target gets evaluated, the method */
+ /* will be installed in the dynamic code section, */
+ /* not at the location of cfg->native_code. */
+ ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i;
+#endif
mono_domain_lock (domain);
if (!domain_jit_info (domain)->jump_target_hash)
int max_epilog_size;
guint8 *code;
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ void *code_dest;
+
+ /* This keeps patch targets from being transformed during
+ * ordinary method compilation, for local branches and jumps.
+ */
+ nacl_allow_target_modification (FALSE);
+#endif
+
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->spill_count = 0;
/* we reuse dfn here */
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
+ bb->real_native_offset = cfg->code_len;
//if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
mono_arch_output_basic_block (cfg, bb);
bb->native_length = cfg->code_len - bb->native_offset;
}
}
+#ifdef __native_client_codegen__
+ mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
+#endif
mono_arch_emit_exceptions (cfg);
max_epilog_size = 0;
#endif
code = mono_domain_code_reserve (cfg->domain, cfg->code_size + unwindlen);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ nacl_allow_target_modification (TRUE);
+#endif
memcpy (code, cfg->native_code, cfg->code_len);
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ g_free (cfg->native_code);
+#elif defined(__native_client_codegen__)
if (cfg->native_code_alloc) {
g_free (cfg->native_code_alloc);
cfg->native_code_alloc = 0;
else if (cfg->native_code) {
g_free (cfg->native_code);
}
-#else
- g_free (cfg->native_code);
-#endif
+#endif /* __native_client_codegen__ */
cfg->native_code = code;
code = cfg->native_code + cfg->code_len;
#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
mono_arch_save_unwind_info (cfg);
#endif
-
-#ifdef __native_client_codegen__
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (!cfg->compile_aot) {
+ if (cfg->method->dynamic) {
+ code_dest = nacl_code_manager_get_code_dest(cfg->dynamic_info->code_mp, cfg->native_code);
+ } else {
+ code_dest = nacl_domain_get_code_dest(cfg->domain, cfg->native_code);
+ }
+ }
+#endif
+
+#if defined(__native_client_codegen__)
mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
#endif
} else {
mono_domain_code_commit (cfg->domain, cfg->native_code, cfg->code_size, cfg->code_len);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ cfg->native_code = code_dest;
+#endif
mono_profiler_code_buffer_new (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method);
mono_arch_flush_icache (cfg->native_code, cfg->code_len);
/*
* Extend the try block backwards to include parts of the previous call
* instruction.
- * FIXME: This is arch specific.
*/
- ei->try_start = (guint8*)ei->try_start - 1;
+ ei->try_start = (guint8*)ei->try_start - MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
}
tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
g_assert (tblock);
else
InterlockedIncrement (&mono_jit_stats.methods_without_llvm);
- if (cfg->verbose_level >= 2) {
- char *id = mono_method_full_name (cfg->method, FALSE);
- mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
- g_free (id);
- }
-
cfg->jit_info = create_jit_info (cfg, method_to_compile);
#ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
mono_save_seq_point_info (cfg);
+ if (cfg->verbose_level >= 2) {
+ char *id = mono_method_full_name (cfg->method, FALSE);
+ mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
+ g_free (id);
+ }
+
if (!cfg->compile_aot) {
mono_domain_lock (cfg->domain);
mono_jit_info_table_add (cfg->domain, cfg->jit_info);
mono_icall_init ();
+ /* This should come after mono_init () too */
+ mini_gc_init ();
+
mono_add_internal_call ("System.Diagnostics.StackFrame::get_frame_info",
ves_icall_get_frame_info);
mono_add_internal_call ("System.Diagnostics.StackTrace::get_trace",
register_icall (mono_load_remote_field_new, "mono_load_remote_field_new", "object object ptr ptr", FALSE);
register_icall (mono_store_remote_field_new, "mono_store_remote_field_new", "void object ptr ptr object", FALSE);
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ register_icall (mono_nacl_gc, "mono_nacl_gc", "void", TRUE);
+#endif
/*
* NOTE, NOTE, NOTE, NOTE:
* when adding emulation for some opcodes, remember to also add a dummy
mono_register_opcode_emulation (OP_LCONV_TO_R_UN, "__emul_lconv_to_r8_un", "double long", mono_lconv_to_r8_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_FREM
+#if defined(__default_codegen__)
mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", fmod, FALSE);
+#elif defined(__native_client_codegen__)
+ mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", mono_fmod, FALSE);
+#endif
#endif
#ifdef MONO_ARCH_SOFT_FLOAT
#endif
/* Version number of the AOT file format */
- #define MONO_AOT_FILE_VERSION "70"
+ #define MONO_AOT_FILE_VERSION 72
//TODO: This is x86/amd64 specific.
#define mono_simd_shuffle_mask(a,b,c,d) ((a) | ((b) << 2) | ((c) << 4) | ((d) << 6))
/* This structure is stored in the AOT file */
typedef struct MonoAotFileInfo
{
+ /* The version number of the AOT file format, should match MONO_AOT_FILE_VERSION */
+ guint32 version;
+ /* For alignment */
+ guint32 dummy;
+
+ /* All the pointers should be at the start to avoid alignment problems */
+
+ /* Mono's Global Offset Table */
+ gpointer got;
+ /* Compiled code for methods */
+ gpointer methods;
+ /* Mono EH Frame created by llc when using LLVM */
+ gpointer mono_eh_frame;
+ /* Data blob */
+ gpointer blob;
+ gpointer class_name_table;
+ gpointer class_info_offsets;
+ gpointer method_info_offsets;
+ gpointer ex_info_offsets;
+ gpointer code_offsets;
+ gpointer extra_method_info_offsets;
+ gpointer extra_method_table;
+ gpointer got_info_offsets;
+ gpointer methods_end;
+ gpointer unwind_info;
+ gpointer mem_end;
+ gpointer image_table;
+ /* Start of Mono's Program Linkage Table */
+ gpointer plt;
+ /* End of Mono's Program Linkage Table */
+ gpointer plt_end;
+ /* The GUID of the assembly which the AOT image was generated from */
+ gpointer assembly_guid;
+ /*
+ * The runtime version string for AOT images generated using 'bind-to-runtime-version',
+ * NULL otherwise.
+ */
+ gpointer runtime_version;
+ /* Blocks of various kinds of trampolines */
+ gpointer specific_trampolines;
+ gpointer static_rgctx_trampolines;
+ gpointer imt_thunks;
+ /*
+ * The end of LLVM generated thumb code, or NULL.
+ */
+ gpointer thumb_end;
+
+ /* The index of the first GOT slot used by the PLT */
guint32 plt_got_offset_base;
+ /* Number of entries in the GOT */
guint32 got_size;
+ /* Number of entries in the PLT */
guint32 plt_size;
+ /* Number of methods */
guint32 nmethods;
+ /* A union of MonoAotFileFlags */
guint32 flags;
/* Optimization flags used to compile the module */
guint32 opts;
/* Index of the blob entry holding the GC used by this module */
gint32 gc_name_index;
+ /* Number of trampolines */
guint32 num_trampolines [MONO_AOT_TRAMP_NUM];
+ /* The indexes of the first GOT slots used by the trampolines */
guint32 trampoline_got_offset_base [MONO_AOT_TRAMP_NUM];
+ /* The size of one trampoline */
guint32 trampoline_size [MONO_AOT_TRAMP_NUM];
} MonoAotFileInfo;
int native_offset;
int il_offset;
gpointer lmf;
+ guint32 unwind_info_len;
+ guint8 *unwind_info;
} StackFrameInfo;
typedef struct {
int offset;
};
+ /*
+ * Information about a call site for the GC map creation code
+ */
+ typedef struct {
+ /* The next offset after the call instruction */
+ int pc_offset;
+ /* The basic block containing the call site */
+ MonoBasicBlock *bb;
+ /*
+ * The set of variables live at the call site.
+ * Has length cfg->num_varinfo in bits.
+ */
+ guint8 *liveness;
+ /*
+ * List of OP_GC_PARAM_SLOT_LIVENESS_DEF instructions defining the param slots
+ * used by this call.
+ */
+ GSList *param_slots;
+ } GCCallSite;
+
/*
* The IR-level extended basic block.
*
/* The offset of the generated code, used for fixups */
int native_offset;
+ /* The length of the generated code, doesn't include alignment padding */
int native_length;
+ /* The real native offset, which includes alignment padding too */
+ int real_native_offset;
int max_offset;
int max_length;
GSList *seq_points;
MonoInst *last_seq_point;
+ GSList *spill_slot_defs;
+
+ /* List of call sites in this bblock sorted by pc_offset */
+ GSList *gc_callsites;
+
/*
* The region encodes whether the basic block is inside
* a finally, catch, filter or none of these.
gboolean record_cast_details; /* For CEE_CASTCLASS */
MonoInst *spill_var; /* for OP_ICONV_TO_R8_RAW and OP_FCONV_TO_R8_X */
guint16 source_opcode; /*OP_XCONV_R8_TO_I4 needs to know which op was used to do proper widening*/
+ int pc_offset; /* OP_GC_LIVERANGE_START/END */
} backend;
MonoClass *klass;
/* On loads, the source address can be null */
MONO_INST_FAULT = 32,
/* On loads, the source address points to a constant value */
- MONO_INST_CONSTANT_LOAD = 64
+ MONO_INST_CONSTANT_LOAD = 64,
+ /* On variables, the variable needs GC tracking */
+ MONO_INST_GC_TRACK = 128,
+ /*
+ * Set on instructions during code emission which make calls, i.e. OP_CALL, OP_THROW.
+ * backend.pc_offset will be set to the pc offset at the end of the native call instructions.
+ */
+ MONO_INST_GC_CALLSITE = 128
};
#define inst_c0 data.op[0].const_val
/* Stores state needed by handler block with a guard */
MonoContext ex_ctx;
ResumeState resume_state;
- /* handle block return address */
+
+ /*Variabled use to implement handler blocks (finally/catch/etc) guards during interruption*/
+ /* handler block return address */
gpointer handler_block_return_address;
- /* handler block been guarded */
+
+ /* handler block been guarded. It's safe to store this even for dynamic methods since there
+ is an activation on stack making sure it will remain alive.*/
MonoJitExceptionInfo *handler_block;
+ /* context to be used by the guard trampoline when resuming interruption.*/
+ MonoContext handler_block_context;
/*
* Stores the state at the exception throw site to be used by mono_stack_walk ()
* when it is called from profiler functions during exception handling.
#define vreg_is_volatile(cfg, vreg) (G_UNLIKELY (get_vreg_to_inst ((cfg), (vreg)) && (get_vreg_to_inst ((cfg), (vreg))->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))))
+ #ifdef HAVE_SGEN_GC
+ #define vreg_is_ref(cfg, vreg) ((vreg) < (cfg)->vreg_is_ref_len ? (cfg)->vreg_is_ref [(vreg)] : 0)
+ #define vreg_is_mp(cfg, vreg) ((vreg) < (cfg)->vreg_is_mp_len ? (cfg)->vreg_is_mp [(vreg)] : 0)
+ #else
+ #define vreg_is_ref(cfg, vreg) FALSE
+ #define vreg_is_mp(cfg, vreg) FALSE
+ #endif
+
/*
* Control Flow Graph and compilation unit information
*/
gint stack_offset;
gint max_ireg;
gint cil_offset_to_bb_len;
- gint locals_min_stack_offset, locals_max_stack_offset;
MonoRegState *rs;
MonoSpillInfo *spill_info [16]; /* machine register spills */
gint spill_count;
guint keep_cil_nops : 1;
guint gen_seq_points : 1;
guint explicit_null_checks : 1;
+ guint compute_gc_maps : 1;
gpointer debug_info;
guint32 lmf_offset;
guint16 *intvars;
/* Size of above array */
guint32 vreg_to_inst_len;
+ /* Marks vregs which hold a GC ref */
+ /* FIXME: Use a bitmap */
+ gboolean *vreg_is_ref;
+
+ /* Size of above array */
+ guint32 vreg_is_ref_len;
+
+ /* Marks vregs which hold a managed pointer */
+ /* FIXME: Use a bitmap */
+ gboolean *vreg_is_mp;
+
+ /* Size of above array */
+ guint32 vreg_is_mp_len;
+
/*
* The original method to compile, differs from 'method' when doing generic
* sharing.
guint32 got_offset, ex_info_offset, method_info_offset;
/* Symbol used to refer to this method in generated assembly */
char *asm_symbol;
+ char *llvm_method_name;
MonoJitExceptionInfo *llvm_ex_info;
guint32 llvm_ex_info_len;
int llvm_this_reg, llvm_this_offset;
GSList *try_block_holes;
+
+ /* GC Maps */
+
+ /* The offsets of the locals area relative to the frame pointer */
+ gint locals_min_stack_offset, locals_max_stack_offset;
+
+ /* The final CFA rule at the end of the prolog */
+ int cfa_reg, cfa_offset;
+
+ /* Points to a MonoCompileGC */
+ gpointer gc_info;
+
+ /*
+ * The encoded GC map along with its size. This contains binary data so it can be saved in an AOT
+ * image etc, but it requires a 4 byte alignment.
+ */
+ guint8 *gc_map;
+ guint32 gc_map_size;
} MonoCompile;
typedef enum {
#endif
/* Opcodes to load/store regsize quantities */
-#ifdef __mono_ilp32__
+#if defined (__mono_ilp32__)
#define OP_LOADR_MEMBASE OP_LOADI8_MEMBASE
#define OP_STORER_MEMBASE_REG OP_STOREI8_MEMBASE_REG
#else
guint32 mono_alloc_freg (MonoCompile *cfg) MONO_LLVM_INTERNAL;
guint32 mono_alloc_preg (MonoCompile *cfg) MONO_LLVM_INTERNAL;
guint32 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type) MONO_INTERNAL;
+ guint32 mono_alloc_ireg_ref (MonoCompile *cfg) MONO_LLVM_INTERNAL;
+ guint32 mono_alloc_ireg_mp (MonoCompile *cfg) MONO_LLVM_INTERNAL;
+ guint32 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg) MONO_LLVM_INTERNAL;
+ void mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg) MONO_INTERNAL;
+ void mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg) MONO_INTERNAL;
void mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to) MONO_INTERNAL;
void mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to) MONO_INTERNAL;
GList *mono_varlist_insert_sorted (MonoCompile *cfg, GList *list, MonoMethodVar *mv, int sort_type) MONO_INTERNAL;
GList *mono_varlist_sort (MonoCompile *cfg, GList *list, int sort_type) MONO_INTERNAL;
void mono_analyze_liveness (MonoCompile *cfg) MONO_INTERNAL;
+ void mono_analyze_liveness_gc (MonoCompile *cfg) MONO_INTERNAL;
void mono_linear_scan (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask) MONO_INTERNAL;
void mono_global_regalloc (MonoCompile *cfg) MONO_INTERNAL;
void mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks) MONO_INTERNAL;
void mono_liveness_handle_exception_clauses (MonoCompile *cfg) MONO_INTERNAL;
/* Native Client functions */
+gpointer mono_realloc_native_code(MonoCompile *cfg);
#ifdef __native_client_codegen__
void mono_nacl_align_inst(guint8 **pcode, int instlen);
void mono_nacl_align_call(guint8 **start, guint8 **pcode);
guint8 *mono_arch_nacl_pad(guint8 *code, int pad);
guint8 *mono_arch_nacl_skip_nops(guint8 *code);
+extern const guint kNaClAlignment;
+extern const guint kNaClAlignmentMask;
+#endif
+
+#if defined(__native_client__) || defined(__native_client_codegen__)
+void mono_nacl_gc();
+#endif
+
+#if defined(__native_client_codegen__) || defined(__native_client__)
+#define NACL_SIZE(a, b) (b)
+#else
+#define NACL_SIZE(a, b) (a)
#endif
/* AOT */
guint32 mono_aot_get_got_offset (MonoJumpInfo *ji) MONO_LLVM_INTERNAL;
char* mono_aot_get_method_name (MonoCompile *cfg) MONO_LLVM_INTERNAL;
char* mono_aot_get_plt_symbol (MonoJumpInfoType type, gconstpointer data) MONO_LLVM_INTERNAL;
- char* mono_aot_get_method_debug_name (MonoCompile *cfg) MONO_LLVM_INTERNAL;
MonoJumpInfo* mono_aot_patch_info_dup (MonoJumpInfo* ji) MONO_LLVM_INTERNAL;
void mono_aot_set_make_unreadable (gboolean unreadable) MONO_INTERNAL;
gboolean mono_aot_is_pagefault (void *ptr) MONO_INTERNAL;
gboolean mono_running_on_valgrind (void) MONO_INTERNAL;
void* mono_global_codeman_reserve (int size) MONO_INTERNAL;
+void* nacl_global_codeman_get_dest(void *data) MONO_INTERNAL;
+void mono_global_codeman_commit(void *data, int size, int newsize) MONO_INTERNAL;
+void nacl_global_codeman_validate(guint8 **buf_base, int buf_size, guint8 **code_end) MONO_INTERNAL;
const char *mono_regname_full (int reg, int bank) MONO_INTERNAL;
gint32* mono_allocate_stack_slots_full (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align) MONO_INTERNAL;
gint32* mono_allocate_stack_slots (MonoCompile *cfg, guint32 *stack_size, guint32 *stack_align) MONO_INTERNAL;
gboolean
mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
MonoJitInfo *ji, MonoContext *ctx,
- MonoContext *new_ctx, MonoLMF **lmf,
+ MonoContext *new_ctx, MonoLMF **lmf,
+ mgreg_t **save_locations,
StackFrameInfo *frame_info) MONO_INTERNAL;
gpointer mono_arch_get_throw_exception_by_name (void) MONO_INTERNAL;
gpointer mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot) MONO_INTERNAL;
mono_find_jit_info_ext (MonoDomain *domain, MonoJitTlsData *jit_tls,
MonoJitInfo *prev_ji, MonoContext *ctx,
MonoContext *new_ctx, char **trace, MonoLMF **lmf,
+ mgreg_t **save_locations,
StackFrameInfo *frame) MONO_INTERNAL;
gpointer mono_get_throw_exception (void) MONO_INTERNAL;
MONO_AOT_WRAPPER_MONO_ENTER,
MONO_AOT_WRAPPER_MONO_EXIT,
MONO_AOT_WRAPPER_ELEMENT_ADDR,
+ MONO_AOT_WRAPPER_PTR_TO_STRUCTURE,
+ MONO_AOT_WRAPPER_STRUCTURE_TO_PTR,
MONO_AOT_WRAPPER_LAST
};
x86_jump_code (code, addr);
g_assert ((code - start) < 16);
+ nacl_domain_code_validate (domain, &start, 16, &code);
+
return start;
}
x86_jump_code (code, addr);
g_assert ((code - start) <= buf_len);
+ nacl_domain_code_validate (domain, &start, buf_len, &code);
mono_arch_flush_icache (start, code - start);
return start;
g_assert ((code - start) < buf_len);
+ nacl_domain_code_validate (domain, &start, buf_len, &code);
+
mono_arch_flush_icache (start, code - start);
return start;
void
mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
{
+#if defined(__default_codegen__)
guint8 *code;
guint8 buf [8];
gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 8, buf, sizeof (buf));
code [4], code [5], code [6]);
g_assert_not_reached ();
}
+#elif defined(__native_client__)
+ /* Target must be bundle-aligned */
+ g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
+
+ /* 0xe8 = call <DISP>, 0xe9 = jump <DISP> */
+ if ((orig_code [-5] == 0xe8) || orig_code [-6] == 0xe9) {
+ int ret;
+ gint32 offset = (gint32)addr - (gint32)orig_code;
+ guint8 buf[sizeof(gint32)];
+ *((gint32*)(buf)) = offset;
+ ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
+ g_assert (ret == 0);
+ } else {
+ printf ("Invalid trampoline sequence %p: %02x %02x %02x %02x %02x\n", orig_code, orig_code [-5], orig_code [-4], orig_code [-3], orig_code [-2], orig_code[-1]);
+ g_assert_not_reached ();
+ }
+#endif
}
void
g_assert (code [1] == 0x8b);
offset = *(guint32*)(code + 2);
-#else
+#elif defined(__default_codegen__)
/* A PLT entry: jmp *<DISP>(%ebx) */
g_assert (code [0] == 0xff);
g_assert (code [1] == 0xa3);
code -= 5;
if (code [0] == 0xe8) {
+#if defined(__default_codegen__)
if (!mono_running_on_valgrind ()) {
guint32 ops;
/*
/* Tell valgrind to recompile the patched code */
//VALGRIND_DISCARD_TRANSLATIONS (code, 8);
}
+#elif defined(__native_client_codegen__)
+ mono_arch_patch_callsite (code, code + 5, nullified_class_init_trampoline);
+#endif
} else if (code [0] == 0x90 || code [0] == 0xeb) {
/* Already changed by another thread */
;
x86_ret (code);
+ nacl_global_codeman_validate (&buf, 256, &code);
g_assert ((code - buf) <= 256);
if (info)
mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
{
guint8 *code, *buf;
+ int tramp_size = NACL_SIZE (16, kNaClAlignment);
- code = buf = mono_global_codeman_reserve (16);
+ code = buf = mono_global_codeman_reserve (tramp_size);
x86_ret (code);
+ nacl_global_codeman_validate (&buf, tramp_size, &code);
+
mono_arch_flush_icache (buf, code - buf);
if (info)
x86_jump_code (buf, tramp);
g_assert ((buf - code) <= TRAMPOLINE_SIZE);
+ nacl_domain_code_validate (domain, &code, kNaClAlignment, &buf);
+
mono_arch_flush_icache (code, buf - code);
if (code_len)
index -= size - 1;
}
-#ifdef __native_client_codegen__
- /* TODO: align for Native Client */
- tramp_size = (aot ? 64 : 36) + 2 * kNaClAlignment +
- 6 * (depth + kNaClAlignment);
-#else
+#if defined(__default_codegen__)
tramp_size = (aot ? 64 : 36) + 6 * depth;
-#endif /* __native_client_codegen__ */
+#elif defined(__native_client_codegen__)
+ tramp_size = (aot ? 64 : 36) + 2 * kNaClAlignment +
+ 6 * (depth + kNaClAlignment);
+#endif
code = buf = mono_global_codeman_reserve (tramp_size);
x86_jump_code (code, tramp);
}
+ nacl_global_codeman_validate (&buf, tramp_size, &code);
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
#ifdef __native_client_codegen__
g_assert (code - buf <= kNaClAlignment);
#endif
+
+ nacl_global_codeman_validate (&buf, tramp_size, &code);
+
if (info)
*info = mono_tramp_info_create (g_strdup_printf ("generic_class_init_trampoline"), buf, code - buf, ji, unwind_ops);
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
+ nacl_global_codeman_validate (&buf, tramp_size, &code);
+
if (info)
*info = mono_tramp_info_create (g_strdup_printf ("monitor_enter_trampoline"), buf, code - buf, ji, unwind_ops);
x86_jump_code (code, tramp);
}
+ nacl_global_codeman_validate (&buf, tramp_size, &code);
+
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
code = mono_x86_emit_tls_get (code, X86_EAX, mono_get_jit_tls_offset ());
x86_mov_reg_membase (code, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 4);
/*simulate a call*/
+ /*Fix stack alignment*/
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x8);
x86_push_reg (code, X86_EAX);
x86_jump_code (code, tramp);
} else {
/*Slow path uses a c helper*/
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x8);
x86_push_reg (code, X86_ESP);
x86_push_imm (code, tramp);
x86_jump_code (code, handler_block_trampoline_helper);
}
+ nacl_global_codeman_validate (&buf, tramp_size, &code);
+
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);