Native Client codegen requirements.
basic-simd.cs
regtests=basic.exe basic-float.exe basic-long.exe basic-calls.exe objects.exe arrays.exe basic-math.exe exceptions.exe iltests.exe devirtualization.exe generics.exe basic-simd.exe
+fsatests=basic.exe basic-float.exe basic-long.exe basic-calls.exe objects.exe arrays.exe basic-math.exe exceptions.exe devirtualization.exe basic-simd.exe
if X86
if MONO_DEBUGGER_SUPPORTED
generics-variant-types.dll: generics-variant-types.il
$(ILASM) -dll -output=$@ $<
+if NACL_CODEGEN
+GENMDESC_OPTS=--nacl
+else !NACL_CODEGEN
+GENMDESC_OPTS=
+endif !NACL_CODEGEN
+
# we don't always use the perl impl because it's an additional
# build dependency for the poor windows users
# $(arch_define) is the preprocessor symbol that enables all the opcodes
# for the specific platform in mini-ops.h
if CROSS_COMPILING
-GENMDESC_PRG=perl $(srcdir)/genmdesc.pl $(arch_define) $(srcdir)
+GENMDESC_PRG=perl $(srcdir)/genmdesc.pl $(arch_define) $(srcdir) $(GENMDESC_OPTS)
else !CROSS_COMPILING
-GENMDESC_PRG=./genmdesc
+GENMDESC_PRG=./genmdesc $(GENMDESC_OPTS)
endif !CROSS_COMPILING
cpu-x86.h: cpu-x86.md genmdesc$(EXEEXT)
MONO_PATH=fullaot-tmp $(top_builddir)/runtime/mono-wrapper --aot=full fullaot-tmp/* || exit 1
for i in $(regtests); do echo $$i; MONO_PATH=fullaot-tmp $(top_builddir)/runtime/mono-wrapper --full-aot fullaot-tmp/$$i --exclude '!FULLAOT' || exit 1; done
+fsacheck: mono $(fsatests) fsacheck.c generics.exe
+ rm -rf fsa-tmp
+ mkdir fsa-tmp
+ cp $(CLASS)/mscorlib.dll $(CLASS)/System.Core.dll $(CLASS)/System.dll $(CLASS)/Mono.Posix.dll $(CLASS)/System.Configuration.dll $(CLASS)/System.Security.dll $(CLASS)/System.Xml.dll $(CLASS)/Mono.Security.dll $(CLASS)/Mono.Simd.dll \
+ $(fsatests) generics-variant-types.dll TestDriver.dll fsa-tmp/
+ cp $(fsatests) fsa-tmp/
+ MONO_PATH=fsa-tmp $(top_builddir)/runtime/mono-wrapper --aot=full,static fsa-tmp/*.dll || exit 1
+ MONO_PATH=fsa-tmp $(top_builddir)/runtime/mono-wrapper --aot=full,static fsa-tmp/*.exe || exit 1
+ $(CC) -o $@.out -g -static $(VPATH)/fsacheck.c fsa-tmp/*.o \
+ -lmono-2.0 -lpthread -lm -ldl -lrt \
+ -DTARGET_X86 -L.libs -I${prefix}/include/mono-2.0 \
+ -I${prefix} -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include
+ for i in $(fsatests); do echo $$i; MONO_PATH=fsa-tmp ./$@.out $$i || exit 1; done
+
bench: mono test.exe
time env $(RUNTIME) --ncompile $(count) --compile Test:$(mtest) test.exe
#if !defined(DISABLE_AOT) && !defined(DISABLE_JIT)
-#if defined(__linux__)
+#if defined(__linux__) || defined(__native_client_codegen__)
#define RODATA_SECT ".rodata"
#else
#define RODATA_SECT ".text"
img_writer_emit_byte (acfg->w, val);
}
+#ifdef __native_client_codegen__
+static inline void
+emit_nacl_call_alignment (MonoAotCompile *acfg)
+{
+ img_writer_emit_nacl_call_alignment (acfg->w);
+}
+#endif
+
static G_GNUC_UNUSED void
emit_global_inner (MonoAotCompile *acfg, const char *name, gboolean func)
{
#else
#define AOT_FUNC_ALIGNMENT 16
#endif
+#if defined(TARGET_X86) && defined(__native_client_codegen__)
+#undef AOT_FUNC_ALIGNMENT
+#define AOT_FUNC_ALIGNMENT 32
+#endif
#if defined(TARGET_POWERPC64) && !defined(__mono_ilp32__)
#define PPC_LD_OP "ld"
#if defined(TARGET_X86)
guint32 offset = (acfg->plt_got_offset_base + index) * sizeof (gpointer);
+#ifdef __native_client_codegen__
+ const guint8 kSizeOfNaClJmp = 11;
+ guint8 bytes[kSizeOfNaClJmp];
+ guint8 *pbytes = &bytes[0];
+
+ x86_jump_membase32 (pbytes, X86_EBX, offset);
+ emit_bytes (acfg, bytes, kSizeOfNaClJmp);
+ /* four bytes of data, used by mono_arch_patch_plt_entry */
+ /* For Native Client, make this work with data embedded in push. */
+ emit_byte (acfg, 0x68); /* hide data in a push */
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#else
/* jmp *<offset>(%ebx) */
emit_byte (acfg, 0xff);
emit_byte (acfg, 0xa3);
emit_int32 (acfg, offset);
/* Used by mono_aot_get_plt_info_offset */
emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+#endif /* __native_client_codegen__ */
#elif defined(TARGET_AMD64)
/*
* We can't emit jumps because they are 32 bits only so they can't be patched.
/* Branch to generic trampoline */
x86_jump_reg (code, X86_ECX);
+#ifdef __native_client_codegen__
+ {
+ /* emit nops to next 32 byte alignment */
+ int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
+ while (code < (buf + a)) x86_nop(code);
+ }
+#endif
emit_bytes (acfg, buf, code - buf);
- *tramp_size = 17;
+ *tramp_size = NACL_SIZE(17, kNaClAlignment);
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
/* Branch to the target address */
x86_jump_membase (code, X86_ECX, (offset + 1) * sizeof (gpointer));
+#ifdef __native_client_codegen__
+ {
+ /* emit nops to next 32 byte alignment */
+ int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
+ while (code < (buf + a)) x86_nop(code);
+ }
+#endif
+
emit_bytes (acfg, buf, code - buf);
- *tramp_size = 15;
+ *tramp_size = NACL_SIZE (15, kNaClAlignment);
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
*tramp_size = code - buf + 7;
#elif defined(TARGET_X86)
guint8 *buf, *code;
+#ifdef __native_client_codegen__
+ guint8 *buf_alloc;
+#endif
guint8 *labels [3];
+#ifdef __native_client_codegen__
+ buf_alloc = g_malloc (256 + kNaClAlignment);
+ code = buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
+#else
code = buf = g_malloc (256);
+#endif
/* Allocate a temporary stack slot */
x86_push_reg (code, X86_EAX);
mono_x86_patch (labels [1], code);
x86_breakpoint (code);
+#ifdef __native_client_codegen__
+ {
+ /* emit nops to next 32 byte alignment */
+ int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
+ while (code < (buf + a)) x86_nop(code);
+ }
+#endif
emit_bytes (acfg, buf, code - buf);
*tramp_size = code - buf;
ji = info->ji;
unwind_ops = info->unwind_ops;
+#ifdef __native_client_codegen__
+ mono_nacl_fix_patches (code, ji);
+#endif
+
/* Emit code */
sprintf (start_symbol, "%s", name);
emit_section_change (acfg, ".text", 0);
emit_global (acfg, start_symbol, TRUE);
- emit_alignment (acfg, 16);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
emit_label (acfg, start_symbol);
sprintf (symbol, "%snamed_%s", acfg->temp_prefix, name);
}
emit_global (acfg, symbol, TRUE);
- emit_alignment (acfg, 16);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
emit_label (acfg, symbol);
acfg->trampoline_got_offset_base [ntype] = tramp_got_offset;
default:
g_assert_not_reached ();
}
+#ifdef __native_client_codegen__
+ /* align to avoid 32-byte boundary crossings */
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#endif
if (!acfg->trampoline_size [ntype]) {
g_assert (tramp_size);
}
emit_section_change (acfg, ".text", 0);
+#ifdef __native_client_codegen__
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#endif
emit_global (acfg, symbol, TRUE);
emit_label (acfg, symbol);
* Emit a global symbol which can be passed by an embedding app to
* mono_aot_register_module ().
*/
-#if defined(__MACH__)
+#if defined(__MACH__) && !defined(__native_client_codegen__)
sprintf (symbol, "_mono_aot_module_%s_info", acfg->image->assembly->aname.name);
#else
sprintf (symbol, "mono_aot_module_%s_info", acfg->image->assembly->aname.name);
#define AS_OPTIONS ""
#endif
+#ifdef __native_client_codegen__
+#define AS_NAME "nacl-as"
+#else
+#define AS_NAME "as"
+#endif
+
#ifndef LD_OPTIONS
#define LD_OPTIONS ""
#endif
} else {
objfile = g_strdup_printf ("%s.o", acfg->tmpfname);
}
- command = g_strdup_printf ("%sas %s %s -o %s", tool_prefix, AS_OPTIONS, acfg->tmpfname, objfile);
+ command = g_strdup_printf ("%s%s %s %s -o %s", tool_prefix, AS_NAME, AS_OPTIONS, acfg->tmpfname, objfile);
printf ("Executing the native assembler: %s\n", command);
if (system (command) != 0) {
g_free (command);
# See the code in mini-x86.c for more details on how the specifiers are used.
#
break: len:1
-jmp: len:32
+jmp: len:32 clob:c
call: dest:a clob:c len:17
br: len:5
seq_point: len:16
-int_beq: len:6 nacl:28
+int_beq: len:6
int_bge: len:6
int_bgt: len:6
int_ble: len:6
throw: src1:i len:13
rethrow: src1:i len:13
start_handler: len:16
-endfinally: len:16
-endfilter: src1:a len:16
+endfinally: len:16 nacl:21
+endfilter: src1:a len:16 nacl:21
ckfinite: dest:f src1:f len:32
ceq: dest:y len:6
checkthis: src1:b len:3
voidcall: len:17 clob:c
voidcall_reg: src1:i len:11 clob:c
-voidcall_membase: src1:b len:16 clob:c
+voidcall_membase: src1:b len:16 nacl:17 clob:c
fcall: dest:f len:17 clob:c
fcall_reg: dest:f src1:i len:11 clob:c
-fcall_membase: dest:f src1:b len:16 clob:c
+fcall_membase: dest:f src1:b len:16 nacl:17 clob:c
lcall: dest:l len:17 clob:c
lcall_reg: dest:l src1:i len:11 clob:c
-lcall_membase: dest:l src1:b len:16 clob:c
+lcall_membase: dest:l src1:b len:16 nacl:17 clob:c
vcall: len:17 clob:c
vcall_reg: src1:i len:11 clob:c
-vcall_membase: src1:b len:16 clob:c
-call_reg: dest:a src1:i len:11 clob:c
-call_membase: dest:a src1:b len:16 clob:c
+vcall_membase: src1:b len:16 nacl:17 clob:c
+call_reg: dest:a src1:i len:11 nacl:14 clob:c
+call_membase: dest:a src1:b len:16 nacl:18 clob:c
iconst: dest:i len:5
r4const: dest:f len:15
r8const: dest:f len:16
adc_imm: dest:i src1:i len:6 clob:1
sbb: dest:i src1:i src2:i len:2 clob:1
sbb_imm: dest:i src1:i len:6 clob:1
-br_reg: src1:i len:2
+br_reg: src1:i len:2 nacl:5
sin: dest:f src1:f len:6
cos: dest:f src1:f len:6
abs: dest:f src1:f len:2
vcall2: len:17 clob:c
vcall2_reg: src1:i len:11 clob:c
-vcall2_membase: src1:b len:16 clob:c
+vcall2_membase: src1:b len:16 nacl:17 clob:c
localloc_imm: dest:i len:120
NULL
};
+#ifdef __native_client_codegen__
+extern guint8 nacl_align_byte;
+#endif
#define DEFAULT_OPTIMIZATIONS ( \
MONO_OPT_PEEPHOLE | \
" --trace[=EXPR] Enable tracing, use --help-trace for details\n"
" --jitmap Output a jit method map to /tmp/perf-PID.map\n"
" --help-devel Shows more options available to developers\n"
+#ifdef __native_client_codegen__
+ " --nacl-align-mask-off Turn off Native Client 32-byte alignment mask (for debug only)\n"
+#endif
"\n"
"Runtime:\n"
" --config FILE Loads FILE as the Mono config\n"
#endif
} else if (strcmp (argv [i], "--nollvm") == 0){
mono_use_llvm = FALSE;
+#ifdef __native_client_codegen__
+ } else if (strcmp (argv [i], "--nacl-align-mask-off") == 0){
+ nacl_align_byte = 0xff;
+#endif
} else {
fprintf (stderr, "Unknown command line option: '%s'\n", argv [i]);
return 1;
}
}
+#ifdef __native_client_codegen__
+ if (getenv ("MONO_NACL_ALIGN_MASK_OFF"))
+ {
+ nacl_align_byte = 0xff;
+ }
+#endif
+
if (!argv [i]) {
mini_usage ();
return 1;
guint8 *code;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
+#ifdef __native_client_codegen__
+ guint kMaxCodeSize = 128;
+#else
+ guint kMaxCodeSize = 64;
+#endif /* __native_client_codegen__ */
/* call_filter (MonoContext *ctx, unsigned long eip) */
- start = code = mono_global_codeman_reserve (64);
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
x86_push_reg (code, X86_EBP);
x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
if (info)
*info = mono_tramp_info_create (g_strdup_printf ("call_filter"), start, code - start, ji, unwind_ops);
- g_assert ((code - start) < 64);
+ g_assert ((code - start) < kMaxCodeSize);
return start;
}
int i, stack_size, stack_offset, arg_offsets [5], regs_offset;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
-
- start = code = mono_global_codeman_reserve (128);
+#ifdef __native_client_codegen__
+ guint kMaxCodeSize = 256;
+#else
+ guint kMaxCodeSize = 128;
+#endif
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
stack_size = 128;
}
x86_breakpoint (code);
- g_assert ((code - start) < 128);
+ g_assert ((code - start) < kMaxCodeSize);
if (info)
*info = mono_tramp_info_create (g_strdup (name), start, code - start, ji, unwind_ops);
gpointer
mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
{
- return get_throw_trampoline ("rethow_exception", TRUE, FALSE, FALSE, FALSE, FALSE, info, aot);
+ return get_throw_trampoline ("rethrow_exception", TRUE, FALSE, FALSE, FALSE, FALSE, info, aot);
}
/**
void
mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
{
+#if defined (__native_client__)
+ printf("WARNING: mono_arch_sigctx_to_monoctx() called!\n");
+ mctx->eax = 0xDEADBEEF;
+ mctx->ebx = 0xDEADBEEF;
+ mctx->ecx = 0xDEADBEEF;
+ mctx->edx = 0xDEADBEEF;
+ mctx->ebp = 0xDEADBEEF;
+ mctx->esp = 0xDEADBEEF;
+ mctx->esi = 0xDEADBEEF;
+ mctx->edi = 0xDEADBEEF;
+ mctx->eip = 0xDEADBEEF;
+#else
#ifdef MONO_ARCH_USE_SIGACTION
ucontext_t *ctx = (ucontext_t*)sigctx;
mctx->edi = ctx->SC_EDI;
mctx->eip = ctx->SC_EIP;
#endif
+#endif /* if defined(__native_client__) */
}
void
mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
{
+#if defined(__native_client__)
+ printf("WARNING: mono_arch_monoctx_to_sigctx() called!\n");
+#else
#ifdef MONO_ARCH_USE_SIGACTION
ucontext_t *ctx = (ucontext_t*)sigctx;
ctx->SC_EDI = mctx->edi;
ctx->SC_EIP = mctx->eip;
#endif
+#endif /* __native_client__ */
}
gpointer
mono_arch_ip_from_context (void *sigctx)
{
+#if defined(__native_client__)
+ printf("WARNING: mono_arch_ip_from_context() called!\n");
+ return (NULL);
+#else
#ifdef MONO_ARCH_USE_SIGACTION
ucontext_t *ctx = (ucontext_t*)sigctx;
return (gpointer)UCONTEXT_REG_EIP (ctx);
#else
struct sigcontext *ctx = sigctx;
return (gpointer)ctx->SC_EIP;
-#endif
+#endif
+#endif /* __native_client__ */
}
/*
static guint8* saved = NULL;
guint8 *code, *start;
+#ifdef __native_client_codegen__
+ g_print("mono_tasklets_arch_restore needs to be aligned for Native Client\n");
+#endif
if (saved)
return (MonoContinuationRestore)saved;
code = start = mono_global_codeman_reserve (48);
char spec [MONO_INST_MAX];
} OpDesc;
-static int nacl;
+static int nacl = 0;
static GHashTable *table;
static GHashTable *template_table;
line = 0;
while ((str = fgets (buf, sizeof (buf), f))) {
gboolean is_template = FALSE;
+ gboolean nacl_length_set = FALSE;
+
++line;
eat_whitespace (str);
if (!str [0])
p += 7;
*/
} else if (strncmp (p, "len:", 4) == 0) {
+ unsigned long size;
p += 4;
- desc->spec [MONO_INST_LEN] += strtoul (p, &p, 10);
- } else if (strncmp (p, "nacl:", 5) == 0){
+ size = strtoul (p, &p, 10);
+ if (!nacl_length_set) {
+ desc->spec [MONO_INST_LEN] = size;
+ }
+ } else if (strncmp (p, "nacl:", 5) == 0) {
unsigned long size;
p += 5;
size = strtoul (p, &p, 10);
- if (nacl)
- desc->spec [MONO_INST_LEN] += size;
+ if (nacl) {
+ desc->spec [MONO_INST_LEN] = size;
+ nacl_length_set = TRUE;
+ }
} else if (strncmp (p, "template:", 9) == 0) {
char *tname;
int i;
return 1;
} else {
int i = 3;
- if (strcmp (argv [1], "--nacl") == 0){
+ if (strcmp (argv [1], "--nacl") == 0) {
nacl = 1;
i++;
}
sub INST_SRC3 () {return 3;}
sub INST_LEN () {return 4;}
sub INST_CLOB () {return 5;}
+# making INST_NACL the same as INST_MAX is not a mistake,
+# INST_NACL writes over INST_LEN, it's not its own field
sub INST_NACL () {return 6;}
sub INST_MAX () {return 6;}
my %template_table =();
my @opcodes = ();
+my $nacl = 0;
+
sub parse_file
{
my ($define, $file) = @_;
my $res = "";
my $n = 0;
for (my $i = 0; $i < @vals; ++$i) {
+ next if $i == INST_NACL;
if (defined $vals [$i]) {
if ($i == INST_LEN) {
$n = $vals [$i];
- if (defined $vals [INST_NACL]){
- $n += $vals [INST_NACL];
+ if ((defined $vals [INST_NACL]) and $nacl == 1){
+ $n = $vals [INST_NACL];
}
$res .= sprintf ("\\x%x\" \"", + $n);
- } elsif ($i != INST_NACL) {
+ } else {
if ($vals [$i] =~ /^[a-zA-Z0-9]$/) {
$res .= $vals [$i];
} else {
$res .= sprintf ("\\x%x\" \"", $vals [$i]);
}
}
- } elsif ($i != INST_NACL) {
+ } else {
$res .= "\\x0\" \"";
}
}
}
sub usage {
- die "genmdesc.pl arch srcdir output name desc [desc2 ...]\n";
+ die "genmdesc.pl arch srcdir [--nacl] output name desc [desc2 ...]\n";
}
my $arch = shift || usage ();
my $srcdir = shift || usage ();
my $output = shift || usage ();
+if ($output eq "--nacl")
+{
+ $nacl = 1;
+ $output = shift || usage();
+}
my $name = shift || usage ();
usage () unless @ARGV;
my @files = @ARGV;
* TARGET_ASM_GAS == GNU assembler
*/
#if !defined(TARGET_ASM_APPLE) && !defined(TARGET_ASM_GAS)
-#ifdef __MACH__
+#if defined(__MACH__) && !defined(__native_client_codegen__)
#define TARGET_ASM_APPLE
#else
#define TARGET_ASM_GAS
while (new_size <= new_offset)
new_size *= 2;
data = g_malloc0 (new_size);
+#ifdef __native_client_codegen__
+ /* for Native Client, fill empty space with HLT instruction */
+ /* instead of 00. */
+ memset(data, 0xf4, new_size);
+#endif
memcpy (data, section->data, section->data_len);
g_free (section->data);
section->data = data;
}
}
+#ifdef __native_client_codegen__
+static void
+bin_writer_emit_nacl_call_alignment (MonoImageWriter *acfg) {
+ int offset = acfg->cur_section->cur_offset;
+ int padding = kNaClAlignment - (offset & kNaClAlignmentMask) - kNaClLengthOfCallImm;
+ guint8 padc = '\x90';
+
+ if (padding < 0) padding += kNaClAlignment;
+
+ while (padding > 0) {
+ bin_writer_emit_bytes(acfg, &padc, 1);
+ padding -= 1;
+ }
+}
+#endif /* __native_client_codegen__ */
+
static void
bin_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
#endif
}
+#ifdef __native_client_codegen__
+static void
+asm_writer_emit_nacl_call_alignment (MonoImageWriter *acfg) {
+ int padding = kNaClAlignment - kNaClLengthOfCallImm;
+ guint8 padc = '\x90';
+
+ fprintf (acfg->fp, "\n\t.align %d", kNaClAlignment);
+ while (padding > 0) {
+ fprintf (acfg->fp, "\n\t.byte %d", padc);
+ padding -= 1;
+ }
+}
+#endif /* __native_client_codegen__ */
+
static void
asm_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
#endif
}
+#ifdef __native_client_codegen__
+void
+img_writer_emit_nacl_call_alignment (MonoImageWriter *acfg) {
+#ifdef USE_BIN_WRITER
+ if (acfg->use_bin_writer)
+ bin_writer_emit_nacl_call_alignment (acfg);
+ else
+ asm_writer_emit_nacl_call_alignment (acfg);
+#else
+ g_assert_not_reached();
+#endif
+}
+#endif /* __native_client_codegen__ */
+
void
img_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
void img_writer_emit_alignment (MonoImageWriter *w, int size) MONO_INTERNAL;
+#ifdef __native_client_codegen__
+void img_writer_emit_nacl_call_alignment (MonoImageWriter *w) MONO_INTERNAL;
+#endif
+
void img_writer_emit_pointer_unaligned (MonoImageWriter *w, const char *target) MONO_INTERNAL;
void img_writer_emit_pointer (MonoImageWriter *w, const char *target) MONO_INTERNAL;
cfg->bb_exit = end_bblock;
end_bblock->cil_code = NULL;
end_bblock->cil_length = 0;
+ end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
g_assert (cfg->num_bblocks == 2);
arg_array = cfg->args;
target = ip + n * sizeof (guint32);
GET_BBLOCK (cfg, default_bblock, target);
+ default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
for (i = 0; i < n; ++i) {
GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
targets [i] = tblock;
+ targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
ip += 4;
}
#include "jit-icalls.h"
+#if defined(__native_client__)
+
+void
+mono_runtime_setup_stat_profiler (void)
+{
+ printf("WARNING: mono_runtime_setup_stat_profiler() called!\n");
+}
+
+
+void
+mono_runtime_shutdown_stat_profiler (void)
+{
+}
+
+
+gboolean
+SIG_HANDLER_SIGNATURE (mono_chain_signal)
+{
+ return FALSE;
+}
+
+void
+mono_runtime_install_handlers (void)
+{
+}
+
+void
+mono_runtime_shutdown_handlers (void)
+{
+}
+
+void
+mono_runtime_cleanup_handlers (void)
+{
+}
+
+
+
+#else
+
static GHashTable *mono_saved_signal_handlers = NULL;
static gpointer
return TRUE;
}
#endif
+#endif /* __native_client__ */
+
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
+static gpointer
+mono_realloc_native_code (MonoCompile *cfg)
+{
+#ifdef __native_client_codegen__
+ guint old_padding;
+ gpointer native_code;
+ guint alignment_check;
+
+ /* Save the old alignment offset so we can re-align after the realloc. */
+ old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
+
+ cfg->native_code_alloc = g_realloc (cfg->native_code_alloc,
+ cfg->code_size + kNaClAlignment);
+
+ /* Align native_code to next nearest kNaClAlignment byte. */
+ native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
+ native_code = (guint)native_code & ~kNaClAlignmentMask;
+
+ /* Shift the data to be 32-byte aligned again. */
+ memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
+
+ alignment_check = (guint)native_code & kNaClAlignmentMask;
+ g_assert (alignment_check == 0);
+ return native_code;
+#else
+ return g_realloc (cfg->native_code, cfg->code_size);
+#endif
+}
+
+#ifdef __native_client_codegen__
+
+/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code, */
+/* Check that alignment doesn't cross an alignment boundary. */
+guint8 *
+mono_arch_nacl_pad (guint8 *code, int pad)
+{
+ const int kMaxPadding = 7; /* see x86-codegen.h: x86_padding() */
+
+ if (pad == 0) return code;
+ /* assertion: alignment cannot cross a block boundary */
+ g_assert(((uintptr_t)code & (~kNaClAlignmentMask)) ==
+ (((uintptr_t)code + pad - 1) & (~kNaClAlignmentMask)));
+ while (pad >= kMaxPadding) {
+ x86_padding (code, kMaxPadding);
+ pad -= kMaxPadding;
+ }
+ if (pad != 0) x86_padding (code, pad);
+ return code;
+}
+
+guint8 *
+mono_arch_nacl_skip_nops (guint8 *code)
+{
+ x86_skip_nops (code);
+ return code;
+}
+
+#endif /* __native_client_codegen__ */
+
/*
* The code generated for sequence points reads from this location, which is
* made read-only when single stepping is enabled.
static int
cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
{
+#if defined(__native_client__)
+ /* Taken from below, the bug listed in the comment is */
+ /* only valid for non-static cases. */
+ __asm__ __volatile__ ("cpuid"
+ : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
+ : "a" (id));
+ return 1;
+#else
int have_cpuid = 0;
#ifndef _MSC_VER
__asm__ __volatile__ (
return 1;
}
return 0;
+#endif
}
/*
guint32
mono_arch_cpu_optimizazions (guint32 *exclude_mask)
{
+#if !defined(__native_client__)
int eax, ebx, ecx, edx;
guint32 opts = 0;
#endif
}
return opts;
+#else
+ return MONO_OPT_CMOV | MONO_OPT_FCMOV | MONO_OPT_SSE2;
+#endif
}
/*
x86_pop_reg (code, X86_EDX); \
x86_pop_reg (code, X86_EAX);
+/* REAL_PRINT_REG does not appear to be used, and was not adapted to work with Native Client. */
+#ifdef __native__client_codegen__
+#define REAL_PRINT_REG(text, reg) g_assert_not_reached()
+#endif
+
/* benchmark and set based on cpu */
#define LOOP_ALIGNMENT 8
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
bb->native_offset = cfg->code_len;
}
}
-
+#ifdef __native_client_codegen__
+ {
+ /* For Native Client, all indirect call/jump targets must be */
+ /* 32-byte aligned. Exception handler blocks are jumped to */
+ /* indirectly as well. */
+ gboolean bb_needs_alignment = (bb->flags & BB_INDIRECT_JUMP_TARGET) ||
+ (bb->flags & BB_EXCEPTION_HANDLER);
+
+ /* if ((cfg->code_len & kNaClAlignmentMask) != 0) { */
+ if ( bb_needs_alignment && ((cfg->code_len & kNaClAlignmentMask) != 0)) {
+ int pad = kNaClAlignment - (cfg->code_len & kNaClAlignmentMask);
+ if (pad != kNaClAlignment) code = mono_arch_nacl_pad(code, pad);
+ cfg->code_len += pad;
+ bb->native_offset = cfg->code_len;
+ }
+ }
+#endif /* __native_client_codegen__ */
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
- if (G_UNLIKELY (offset > (cfg->code_size - max_len - 16))) {
+#define EXTRA_CODE_SPACE (NACL_SIZE (16, 16 + kNaClAlignment))
+
+ if (G_UNLIKELY (offset > (cfg->code_size - max_len - EXTRA_CODE_SPACE))) {
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code(cfg);
code = cfg->native_code + offset;
mono_jit_stats.code_reallocs++;
}
}
if (G_UNLIKELY ((code - cfg->native_code - offset) > max_len)) {
+#ifndef __native_client_codegen__
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
- mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
+ mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
+#endif /* __native_client_codegen__ */
}
cpos += max_len;
int alloc_size, pos, max_offset, i, cfa_offset;
guint8 *code;
gboolean need_stack_frame;
+#ifdef __native_client_codegen__
+ guint alignment_check;
+#endif
cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
cfg->code_size += 512;
+#ifdef __native_client_codegen__
+ /* native_code_alloc is not 32-byte aligned, native_code is. */
+ cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
+
+ /* Align native_code to next nearest kNaclAlignment byte. */
+ cfg->native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
+ cfg->native_code = (guint)cfg->native_code & ~kNaClAlignmentMask;
+
+ code = cfg->native_code;
+
+ alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
+ g_assert(alignment_check == 0);
+#else
code = cfg->native_code = g_malloc (cfg->code_size);
+#endif
/* Offset between RSP and the CFA */
cfa_offset = 0;
if (G_UNLIKELY (required_code_size >= (cfg->code_size - offset))) {
while (required_code_size >= (cfg->code_size - offset))
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code(cfg);
code = cfg->native_code + offset;
mono_jit_stats.code_reallocs++;
}
/* max alignment for loops */
if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
max_offset += LOOP_ALIGNMENT;
-
+#ifdef __native_client_codegen__
+ /* max alignment for native client */
+ max_offset += kNaClAlignment;
+#endif
MONO_BB_FOR_EACH_INS (bb, ins) {
if (ins->opcode == OP_LABEL)
ins->inst_c1 = max_offset;
-
+#ifdef __native_client_codegen__
+ {
+ int space_in_block = kNaClAlignment -
+ ((max_offset + cfg->code_len) & kNaClAlignmentMask);
+ int max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
+ if (space_in_block < max_len && max_len < kNaClAlignment) {
+ max_offset += space_in_block;
+ }
+ }
+#endif /* __native_client_codegen__ */
max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
}
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code(cfg);
mono_jit_stats.code_reallocs++;
}
while (cfg->code_len + code_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code(cfg);
mono_jit_stats.code_reallocs++;
}
guint32 size;
/* Compute size of code following the push <OFFSET> */
+#ifdef __native_client_codegen__
+ code = mono_nacl_align (code);
+ size = kNaClAlignment;
+#else
size = 5 + 5;
-
+#endif
/*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/
if ((code - cfg->native_code) - throw_ip < 126 - size) {
//[1 + 5] x86_jump_mem(inst,mem)
#define CMP_SIZE 6
+#ifdef __native_client_codegen__
+/* These constants should be coming from cpu-x86.md */
+/* I suspect the size calculation below is actually incorrect. */
+/* TODO: fix the calculation that uses these sizes. */
+#define BR_SMALL_SIZE 16
+#define BR_LARGE_SIZE 12
+#else
#define BR_SMALL_SIZE 2
#define BR_LARGE_SIZE 5
+#endif /* __native_client_codegen__ */
#define JUMP_IMM_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
#define DEBUG_IMT 0
int size = 0;
guint8 *code, *start;
+#ifdef __native_client_codegen__
+ /* g_print("mono_arch_build_imt_thunk needs to be aligned.\n"); */
+#endif
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
} else {
int i = 0;
/* 8 for mov_reg and jump, plus 8 for each parameter */
+#ifdef __native_client_codegen__
+ /* TODO: calculate this size correctly */
+ int code_reserve = 13 + (param_count * 8) + 2 * kNaClAlignment;
+#else
int code_reserve = 8 + (param_count * 8);
-
+#endif /* __native_client_codegen__ */
/*
* The stack contains:
* <args in reverse order>
#define MONO_ARCH_USE_SIGACTION
#endif
+#if defined(__native_client__)
+#undef MONO_ARCH_USE_SIGACTION
+#endif
+
+#if defined(__native_client_codegen__) || defined(__native_client__)
+#define NACL_SIZE(a, b) (b)
+#else
+#define NACL_SIZE(a, b) (a)
+#endif
+
#ifndef HOST_WIN32
#ifdef HAVE_WORKING_SIGALTSTACK
#define MONO_ARCH_HAVE_DECOMPOSE_LONG_OPTS 1
-#if !defined(__APPLE__)
+#if !defined(__APPLE__) || defined(__native_client_codegen__)
#define MONO_ARCH_AOT_SUPPORTED 1
#endif
MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
+#ifdef __native_client_codegen__
+/* Default alignment for Native Client is 32-byte. */
+guint8 nacl_align_byte = 0xe0;
+#endif
+
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
gboolean mono_dont_free_global_codeman;
+#ifdef __native_client_codegen__
+
+/* Prevent instructions from straddling a 32-byte alignment boundary. */
+/* Instructions longer than 32 bytes must be aligned internally. */
+/* IN: pcode, instlen */
+/* OUT: pcode */
+void mono_nacl_align_inst(guint8 **pcode, int instlen) {
+ int space_in_block;
+
+ space_in_block = kNaClAlignment - ((uintptr_t)(*pcode) & kNaClAlignmentMask);
+
+ if (G_UNLIKELY (instlen >= kNaClAlignment)) {
+ g_assert_not_reached();
+ } else if (instlen > space_in_block) {
+ *pcode = mono_arch_nacl_pad(*pcode, space_in_block);
+ }
+}
+
+/* Move emitted call sequence to the end of a kNaClAlignment-byte block. */
+/* IN: start pointer to start of call sequence */
+/* IN: pcode pointer to end of call sequence (current "IP") */
+/* OUT: start pointer to the start of the call sequence after padding */
+/* OUT: pcode pointer to the end of the call sequence after padding */
+void mono_nacl_align_call(guint8 **start, guint8 **pcode) {
+ const size_t MAX_NACL_CALL_LENGTH = kNaClAlignment;
+ guint8 copy_of_call[MAX_NACL_CALL_LENGTH];
+ guint8 *temp;
+
+ const size_t length = (size_t)((*pcode)-(*start));
+ g_assert(length < MAX_NACL_CALL_LENGTH);
+
+ memcpy(copy_of_call, *start, length);
+ temp = mono_nacl_pad_call(*start, (guint8)length);
+ memcpy(temp, copy_of_call, length);
+ (*start) = temp;
+ (*pcode) = temp + length;
+}
+
+/* mono_nacl_pad_call(): Insert padding for Native Client call instructions */
+/* code pointer to buffer for emitting code */
+/* ilength length of call instruction */
+guint8 *mono_nacl_pad_call(guint8 *code, guint8 ilength) {
+ int freeSpaceInBlock = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
+ int padding = freeSpaceInBlock - ilength;
+
+ if (padding < 0) {
+ /* There isn't enough space in this block for the instruction. */
+ /* Fill this block and start a new one. */
+ code = mono_arch_nacl_pad(code, freeSpaceInBlock);
+ freeSpaceInBlock = kNaClAlignment;
+ padding = freeSpaceInBlock - ilength;
+ }
+ g_assert(ilength > 0);
+ g_assert(padding >= 0);
+ g_assert(padding < kNaClAlignment);
+ if (0 == padding) return code;
+ return mono_arch_nacl_pad(code, padding);
+}
+
+guint8 *mono_nacl_align(guint8 *code) {
+ int padding = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
+ if (padding != kNaClAlignment) code = mono_arch_nacl_pad(code, padding);
+ return code;
+}
+
+void mono_nacl_fix_patches(const guint8 *code, MonoJumpInfo *ji)
+{
+ MonoJumpInfo *patch_info;
+ for (patch_info = ji; patch_info; patch_info = patch_info->next) {
+ unsigned char *ip = patch_info->ip.i + code;
+ ip = mono_arch_nacl_skip_nops(ip);
+ patch_info->ip.i = ip - code;
+ }
+}
+#endif /* __native_client_codegen__ */
+
gboolean
mono_running_on_valgrind (void)
{
}
memcpy (code, cfg->native_code, cfg->code_len);
+#ifdef __native_client_codegen__
+ if (cfg->native_code_alloc) {
+ g_free (cfg->native_code_alloc);
+ cfg->native_code_alloc = 0;
+ }
+ else if (cfg->native_code) {
+ g_free (cfg->native_code);
+ }
+#else
g_free (cfg->native_code);
+#endif
cfg->native_code = code;
code = cfg->native_code + cfg->code_len;
mono_arch_save_unwind_info (cfg);
#endif
+#ifdef __native_client_codegen__
+ mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
+#endif
+
mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
if (cfg->method->dynamic) {
MONO_PROBE_VES_INIT_BEGIN ();
-#ifdef __linux__
+#if defined(__linux__) && !defined(__native_client__)
if (access ("/proc/self/maps", F_OK) != 0) {
g_print ("Mono requires /proc to be mounted.\n");
exit (1);
/* BBlock flags */
enum {
- BB_VISITED = 1 << 0,
- BB_REACHABLE = 1 << 1,
- BB_EXCEPTION_DEAD_OBJ = 1 << 2,
- BB_EXCEPTION_UNSAFE = 1 << 3,
- BB_EXCEPTION_HANDLER = 1 << 4
+ BB_VISITED = 1 << 0,
+ BB_REACHABLE = 1 << 1,
+ BB_EXCEPTION_DEAD_OBJ = 1 << 2,
+ BB_EXCEPTION_UNSAFE = 1 << 3,
+ BB_EXCEPTION_HANDLER = 1 << 4,
+ /* for Native Client, mark the blocks that can be jumped to indirectly */
+ BB_INDIRECT_JUMP_TARGET = 1 << 5
};
typedef struct MonoMemcpyArgs {
MonoGenericSharingContext *generic_sharing_context;
unsigned char *cil_start;
+#ifdef __native_client_codegen__
+ /* this alloc is not aligned, native_code */
+ /* is the 32-byte aligned version of this */
+ unsigned char *native_code_alloc;
+#endif
unsigned char *native_code;
guint code_size;
guint code_len;
void mono_linterval_split (MonoCompile *cfg, MonoLiveInterval *interval, MonoLiveInterval **i1, MonoLiveInterval **i2, int pos) MONO_INTERNAL;
void mono_liveness_handle_exception_clauses (MonoCompile *cfg) MONO_INTERNAL;
+/* Native Client functions */
+#ifdef __native_client_codegen__
+void mono_nacl_align_inst(guint8 **pcode, int instlen);
+void mono_nacl_align_call(guint8 **start, guint8 **pcode);
+guint8 *mono_nacl_pad_call(guint8 *code, guint8 ilength);
+guint8 *mono_nacl_align(guint8 *code);
+void mono_nacl_fix_patches(const guint8 *code, MonoJumpInfo *ji);
+/* Defined for each arch */
+guint8 *mono_arch_nacl_pad(guint8 *code, int pad);
+guint8 *mono_arch_nacl_skip_nops(guint8 *code);
+
+#endif
+
/* AOT */
void mono_aot_init (void) MONO_INTERNAL;
gpointer mono_aot_get_method (MonoDomain *domain,
/* Patch the jump table entry used by the plt entry */
+#if defined(__native_client_codegen__) || defined(__native_client__)
+ /* for both compiler and runtime */
+ /* A PLT entry: */
+ /* mov <DISP>(%ebx), %ecx */
+ /* and 0xffffffe0, %ecx */
+ /* jmp *%ecx */
+ g_assert (code [0] == 0x8b);
+ g_assert (code [1] == 0x8b);
+
+ offset = *(guint32*)(code + 2);
+#else
/* A PLT entry: jmp *<DISP>(%ebx) */
g_assert (code [0] == 0xff);
g_assert (code [1] == 0xa3);
offset = *(guint32*)(code + 2);
-
+#endif /* __native_client_codegen__ */
if (!got)
got = (gpointer*)(gsize) regs [MONO_ARCH_GOT_REG];
*(guint8**)((guint8*)got + offset) = addr;
static gpointer
get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
{
- guint8 buf [8];
+ const int kBufSize = NACL_SIZE (8, 16);
+ guint8 buf [kBufSize];
guint8 reg = 0;
gint32 disp = 0;
- mono_breakpoint_clean_code (NULL, code, 8, buf, sizeof (buf));
+ mono_breakpoint_clean_code (NULL, code, kBufSize, buf, sizeof (buf));
code = buf + 8;
*displacement = 0;
- code -= 6;
-
if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
reg = code [1] & 0x07;
disp = *((gint32*)(code + 2));
+#if defined(__native_client_codegen__) || defined(__native_client__)
+ } else if ((code[1] == 0x83) && (code[2] == 0xe1) && (code[4] == 0xff) &&
+ (code[5] == 0xd1) && (code[-5] == 0x8b)) {
+ disp = *((gint32*)(code - 3));
+ reg = code[-4] & 0x07;
+ } else if ((code[-2] == 0x8b) && (code[1] == 0x83) && (code[4] == 0xff)) {
+ reg = code[-1] & 0x07;
+ disp = (signed char)code[0];
+#endif
} else {
g_assert_not_reached ();
return NULL;
tramp = mono_get_trampoline_code (tramp_type);
- code = buf = mono_domain_code_reserve_align (domain, TRAMPOLINE_SIZE, 4);
+ code = buf = mono_domain_code_reserve_align (domain, TRAMPOLINE_SIZE, NACL_SIZE (4, kNaClAlignment));
x86_push_imm (buf, arg1);
x86_jump_code (buf, tramp);
index -= size - 1;
}
+#ifdef __native_client_codegen__
+ /* TODO: align for Native Client */
+ tramp_size = (aot ? 64 : 36) + 2 * kNaClAlignment +
+ 6 * (depth + kNaClAlignment);
+#else
tramp_size = (aot ? 64 : 36) + 6 * depth;
+#endif /* __native_client_codegen__ */
code = buf = mono_global_codeman_reserve (tramp_size);
mono_arch_flush_icache (code, code - buf);
g_assert (code - buf <= tramp_size);
-
+#ifdef __native_client_codegen__
+ g_assert (code - buf <= kNaClAlignment);
+#endif
if (info)
*info = mono_tramp_info_create (g_strdup_printf ("generic_class_init_trampoline"), buf, code - buf, ji, unwind_ops);
owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
- tramp_size = 64;
+ tramp_size = NACL_SIZE (64, 128);
code = buf = mono_global_codeman_reserve (tramp_size);
nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
- tramp_size = 64;
+ tramp_size = NACL_SIZE (64, 128);
code = buf = mono_global_codeman_reserve (tramp_size);
guint32
mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
{
- return *(guint32*)(plt_entry + 6);
+ return *(guint32*)(plt_entry + NACL_SIZE (6, 12));
}