#if !defined(DISABLE_AOT) && !defined(DISABLE_JIT)
-#if defined(__linux__)
+#if defined(__linux__) || defined(__native_client_codegen__)
#define RODATA_SECT ".rodata"
#else
#define RODATA_SECT ".text"
#define SHARED_EXT ".dll"
#elif defined(__ppc__) && defined(__MACH__)
#define SHARED_EXT ".dylib"
+#elif defined(__APPLE__) && defined(TARGET_X86) && !defined(__native_client_codegen__)
+#define SHARED_EXT ".dylib"
#else
#define SHARED_EXT ".so"
#endif
img_writer_emit_byte (acfg->w, val);
}
+#ifdef __native_client_codegen__
+static inline void
+emit_nacl_call_alignment (MonoAotCompile *acfg)
+{
+ img_writer_emit_nacl_call_alignment (acfg->w);
+}
+#endif
+
static G_GNUC_UNUSED void
emit_global_inner (MonoAotCompile *acfg, const char *name, gboolean func)
{
#else
#define AOT_FUNC_ALIGNMENT 16
#endif
+#if defined(TARGET_X86) && defined(__native_client_codegen__)
+#undef AOT_FUNC_ALIGNMENT
+#define AOT_FUNC_ALIGNMENT 32
+#endif
#if defined(TARGET_POWERPC64) && !defined(__mono_ilp32__)
#define PPC_LD_OP "ld"
#if defined(TARGET_X86)
guint32 offset = (acfg->plt_got_offset_base + index) * sizeof (gpointer);
+#ifdef __native_client_codegen__
+ const guint8 kSizeOfNaClJmp = 11;
+ guint8 bytes[kSizeOfNaClJmp];
+ guint8 *pbytes = &bytes[0];
+
+ x86_jump_membase32 (pbytes, X86_EBX, offset);
+ emit_bytes (acfg, bytes, kSizeOfNaClJmp);
+ /* four bytes of data, used by mono_arch_patch_plt_entry */
+ /* For Native Client, make this work with data embedded in push. */
+ emit_byte (acfg, 0x68); /* hide data in a push */
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#else
/* jmp *<offset>(%ebx) */
emit_byte (acfg, 0xff);
emit_byte (acfg, 0xa3);
emit_int32 (acfg, offset);
/* Used by mono_aot_get_plt_info_offset */
emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+#endif /* __native_client_codegen__ */
#elif defined(TARGET_AMD64)
/*
* We can't emit jumps because they are 32 bits only so they can't be patched.
/* Branch to generic trampoline */
x86_jump_reg (code, X86_ECX);
+#ifdef __native_client_codegen__
+ {
+ /* emit nops to next 32 byte alignment */
+ int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
+ while (code < (buf + a)) x86_nop(code);
+ }
+#endif
emit_bytes (acfg, buf, code - buf);
- *tramp_size = 17;
+ *tramp_size = NACL_SIZE(17, kNaClAlignment);
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
* CALL_TARGET is the symbol pointing to the native code of METHOD.
*/
static void
-arch_emit_unbox_trampoline (MonoAotCompile *acfg, MonoMethod *method, MonoGenericSharingContext *gsctx, const char *call_target)
+arch_emit_unbox_trampoline (MonoAotCompile *acfg, MonoMethod *method, const char *call_target)
{
#if defined(TARGET_AMD64)
guint8 buf [32];
guint8 *code;
int this_reg;
- this_reg = mono_arch_get_this_arg_reg (mono_method_signature (method), gsctx, NULL);
+ this_reg = mono_arch_get_this_arg_reg (NULL);
code = buf;
amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
guint8 *code;
int this_pos = 4;
- if (MONO_TYPE_ISSTRUCT (mono_method_signature (method)->ret))
- this_pos = 8;
-
code = buf;
x86_alu_membase_imm (code, X86_ADD, X86_ESP, this_pos, sizeof (MonoObject));
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
- int this_pos = 0;
code = buf;
- if (MONO_TYPE_ISSTRUCT (mono_method_signature (method)->ret))
- this_pos = 1;
-
- ARM_ADD_REG_IMM8 (code, this_pos, this_pos, sizeof (MonoObject));
+ ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, sizeof (MonoObject));
emit_bytes (acfg, buf, code - buf);
/* jump to method */
#elif defined(TARGET_POWERPC)
int this_pos = 3;
- if (MONO_TYPE_ISSTRUCT (mono_method_signature (method)->ret))
- this_pos = 4;
-
g_assert (!acfg->use_bin_writer);
fprintf (acfg->fp, "\n\taddi %d, %d, %d\n", this_pos, this_pos, (int)sizeof (MonoObject));
/* Branch to the target address */
x86_jump_membase (code, X86_ECX, (offset + 1) * sizeof (gpointer));
+#ifdef __native_client_codegen__
+ {
+ /* emit nops to next 32 byte alignment */
+ int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
+ while (code < (buf + a)) x86_nop(code);
+ }
+#endif
+
emit_bytes (acfg, buf, code - buf);
- *tramp_size = 15;
+ *tramp_size = NACL_SIZE (15, kNaClAlignment);
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
/* FIXME: Optimize this, i.e. use binary search etc. */
/* Maybe move the body into a separate function (slower, but much smaller) */
- /* R10 is a free register */
+ /* R11 is a free register */
labels [0] = code;
- amd64_alu_membase_imm (code, X86_CMP, AMD64_R10, 0, 0);
+ amd64_alu_membase_imm (code, X86_CMP, AMD64_R11, 0, 0);
labels [1] = code;
amd64_branch8 (code, X86_CC_Z, FALSE, 0);
/* Check key */
- amd64_alu_membase_reg (code, X86_CMP, AMD64_R10, 0, MONO_ARCH_IMT_REG);
+ amd64_alu_membase_reg (code, X86_CMP, AMD64_R11, 0, MONO_ARCH_IMT_REG);
labels [2] = code;
amd64_branch8 (code, X86_CC_Z, FALSE, 0);
/* Loop footer */
- amd64_alu_reg_imm (code, X86_ADD, AMD64_R10, 2 * sizeof (gpointer));
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, 2 * sizeof (gpointer));
amd64_jump_code (code, labels [0]);
/* Match */
mono_amd64_patch (labels [2], code);
- amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, sizeof (gpointer), 8);
- amd64_jump_membase (code, AMD64_R10, 0);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, sizeof (gpointer), 8);
+ amd64_jump_membase (code, AMD64_R11, 0);
/* No match */
/* FIXME: */
mono_amd64_patch (labels [1], code);
x86_breakpoint (code);
- /* mov <OFFSET>(%rip), %r10 */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 12345678, 8);
+
+ /* mov <OFFSET>(%rip), %r11 */
emit_byte (acfg, '\x4d');
emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x15');
+ emit_byte (acfg, '\x1d');
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
emit_bytes (acfg, buf, code - buf);
*tramp_size = code - buf + 7;
#elif defined(TARGET_X86)
guint8 *buf, *code;
+#ifdef __native_client_codegen__
+ guint8 *buf_alloc;
+#endif
guint8 *labels [3];
+#ifdef __native_client_codegen__
+ buf_alloc = g_malloc (256 + kNaClAlignment);
+ code = buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
+#else
code = buf = g_malloc (256);
+#endif
/* Allocate a temporary stack slot */
x86_push_reg (code, X86_EAX);
mono_x86_patch (labels [1], code);
x86_breakpoint (code);
+#ifdef __native_client_codegen__
+ {
+ /* emit nops to next 32 byte alignment */
+ int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
+ while (code < (buf + a)) x86_nop(code);
+ }
+#endif
emit_bytes (acfg, buf, code - buf);
*tramp_size = code - buf;
* blob where the data was stored.
*/
static guint32
-add_to_blob (MonoAotCompile *acfg, guint8 *data, guint32 data_len)
+add_to_blob (MonoAotCompile *acfg, const guint8 *data, guint32 data_len)
{
if (acfg->blob.alloc_size == 0)
stream_init (&acfg->blob);
case MONO_WRAPPER_ALLOC: {
AllocatorWrapperInfo *info = mono_marshal_get_wrapper_info (method);
+ /* The GC name is saved once in MonoAotFileInfo */
g_assert (info->alloc_type != -1);
encode_value (info->alloc_type, p, &p);
break;
}
case MONO_WRAPPER_WRITE_BARRIER:
break;
- case MONO_WRAPPER_STELEMREF:
+ case MONO_WRAPPER_STELEMREF: {
+ MonoClass *klass = mono_marshal_get_wrapper_info (method);
+
+ /* Make sure this is the 'normal' stelemref wrapper, not the virtual one */
+ g_assert (!klass);
break;
+ }
case MONO_WRAPPER_UNKNOWN:
if (strcmp (method->name, "FastMonitorEnter") == 0)
encode_value (MONO_AOT_WRAPPER_MONO_ENTER, p, &p);
if (method)
add_method (acfg, method);
#endif
+
+ /* Stelemref wrappers */
+ /* There is only a constant number of these, iterating over all types should handle them all */
+ for (i = 0; i < acfg->image->tables [MONO_TABLE_TYPEDEF].rows; ++i) {
+ MonoClass *klass;
+
+ token = MONO_TOKEN_TYPE_DEF | (i + 1);
+ klass = mono_class_get (acfg->image, token);
+ if (klass)
+ add_method (acfg, mono_marshal_get_virtual_stelemref (mono_array_class_get (klass, 1)));
+ }
}
/*
return FALSE;
}
+static void add_generic_class_with_depth (MonoAotCompile *acfg, MonoClass *klass, int depth);
+
+static void
+add_generic_class (MonoAotCompile *acfg, MonoClass *klass)
+{
+ add_generic_class_with_depth (acfg, klass, 0);
+}
+
/*
* add_generic_class:
*
* Add all methods of a generic class.
*/
static void
-add_generic_class (MonoAotCompile *acfg, MonoClass *klass)
+add_generic_class_with_depth (MonoAotCompile *acfg, MonoClass *klass, int depth)
{
MonoMethod *method;
gpointer iter;
* FIXME: Instances which are referenced by these methods are not added,
* for example Array.Resize<int> for List<int>.Add ().
*/
- add_extra_method (acfg, method);
+ add_extra_method_with_depth (acfg, method, depth);
}
if (klass->delegate) {
while ((method = mono_class_get_methods (array_class, &iter))) {
if (strstr (method->name, name_prefix)) {
MonoMethod *m = mono_aot_get_array_helper_from_wrapper (method);
- add_extra_method (acfg, m);
+ add_extra_method_with_depth (acfg, m, depth);
}
}
add_generic_class (acfg, mono_class_inflate_generic_class (gcomparer, &ctx));
}
}
+
+ /* Add an instance of GenericEqualityComparer<T> which is created dynamically by EqualityComparer<T> */
+ if (klass->image == mono_defaults.corlib && !strcmp (klass->name_space, "System.Collections.Generic") && !strcmp (klass->name, "EqualityComparer`1")) {
+ MonoClass *tclass = mono_class_from_mono_type (klass->generic_class->context.class_inst->type_argv [0]);
+ MonoClass *iface, *gcomparer;
+ MonoGenericContext ctx;
+ MonoType *args [16];
+
+ memset (&ctx, 0, sizeof (ctx));
+
+ iface = mono_class_from_name (mono_defaults.corlib, "System", "IEquatable`1");
+ g_assert (iface);
+ args [0] = &tclass->byval_arg;
+ ctx.class_inst = mono_metadata_get_generic_inst (1, args);
+
+ if (mono_class_is_assignable_from (mono_class_inflate_generic_class (iface, &ctx), tclass)) {
+ gcomparer = mono_class_from_name (mono_defaults.corlib, "System.Collections.Generic", "GenericEqualityComparer`1");
+ g_assert (gcomparer);
+ add_generic_class (acfg, mono_class_inflate_generic_class (gcomparer, &ctx));
+ }
+ }
}
static void
method = cfg->orig_method;
code = cfg->native_code;
- header = mono_method_get_header (method);
+ header = cfg->header;
method_index = get_method_index (acfg, method);
encode_value (get_image_index (acfg, patch_info->data.image), p, &p);
break;
case MONO_PATCH_INFO_MSCORLIB_GOT_ADDR:
+ case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR:
break;
case MONO_PATCH_INFO_METHOD_REL:
encode_value ((gint)patch_info->data.offset, p, &p);
continue;
}
+ if (patch_info->type == MONO_PATCH_INFO_GC_CARD_TABLE_ADDR) {
+ /* Stored in a GOT slot initialized at module load time */
+ patch_info->type = MONO_PATCH_INFO_NONE;
+ continue;
+ }
+
if (is_plt_patch (patch_info)) {
/* Calls are made through the PLT */
patch_info->type = MONO_PATCH_INFO_NONE;
method = cfg->orig_method;
code = cfg->native_code;
- header = mono_method_get_header (method);
+ header = cfg->header;
method_index = get_method_index (acfg, method);
/* Exception table */
if (cfg->compile_llvm) {
+ /*
+ * When using LLVM, we can't emit some data, like pc offsets, this reg/offset etc.,
+ * since the information is only available to llc. Instead, we let llc save the data
+ * into the LSDA, and read it from there at runtime.
+ */
/* The assembly might be CIL stripped so emit the data ourselves */
if (header->num_clauses)
encode_value (header->num_clauses, p, &p);
} else {
encode_value (0, p, &p);
}
+
+ /* Emit a list of nesting clauses */
+ for (i = 0; i < header->num_clauses; ++i) {
+ gint32 cindex1 = k;
+ MonoExceptionClause *clause1 = &header->clauses [cindex1];
+ gint32 cindex2 = i;
+ MonoExceptionClause *clause2 = &header->clauses [cindex2];
+
+ if (cindex1 != cindex2 && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset)
+ encode_value (i, p, &p);
+ }
+ encode_value (-1, p, &p);
}
} else {
if (jinfo->num_clauses)
if (jinfo->has_generic_jit_info) {
MonoGenericJitInfo *gi = mono_jit_info_get_generic_jit_info (jinfo);
- encode_value (gi->has_this ? 1 : 0, p, &p);
- encode_value (gi->this_reg, p, &p);
- encode_value (gi->this_offset, p, &p);
+ if (!cfg->compile_llvm) {
+ encode_value (gi->has_this ? 1 : 0, p, &p);
+ encode_value (gi->this_reg, p, &p);
+ encode_value (gi->this_offset, p, &p);
+ }
/*
* Need to encode jinfo->method too, since it is not equal to 'method'
gboolean no_special_static, cant_encode;
gpointer iter = NULL;
+ if (!klass) {
+ buf_size = 16;
+
+ p = buf = g_malloc (buf_size);
+
+ /* Mark as unusable */
+ encode_value (-1, p, &p);
+
+ res = add_to_blob (acfg, buf, p - buf);
+ g_free (buf);
+
+ return res;
+ }
+
buf_size = 10240 + (klass->vtable_size * 16);
p = buf = g_malloc (buf_size);
ji = info->ji;
unwind_ops = info->unwind_ops;
+#ifdef __native_client_codegen__
+ mono_nacl_fix_patches (code, ji);
+#endif
+
/* Emit code */
sprintf (start_symbol, "%s", name);
emit_section_change (acfg, ".text", 0);
emit_global (acfg, start_symbol, TRUE);
- emit_alignment (acfg, 16);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
emit_label (acfg, start_symbol);
sprintf (symbol, "%snamed_%s", acfg->temp_prefix, name);
}
emit_global (acfg, symbol, TRUE);
- emit_alignment (acfg, 16);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
emit_label (acfg, symbol);
acfg->trampoline_got_offset_base [ntype] = tramp_got_offset;
default:
g_assert_not_reached ();
}
+#ifdef __native_client_codegen__
+ /* align to avoid 32-byte boundary crossings */
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#endif
if (!acfg->trampoline_size [ntype]) {
g_assert (tramp_size);
add_extra_method_with_depth (acfg, m, depth + 1);
}
}
- add_generic_class (acfg, m->klass);
+ add_generic_class_with_depth (acfg, m->klass, depth + 5);
}
if (m->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED && !strcmp (m->name, "ElementAddr"))
add_extra_method_with_depth (acfg, m, depth + 1);
MonoClass *klass = patch_info->data.klass;
if (klass->generic_class && !mono_generic_context_is_sharable (&klass->generic_class->context, FALSE))
- add_generic_class (acfg, klass);
+ add_generic_class_with_depth (acfg, klass, depth + 5);
break;
}
default:
switch (patch_info->type) {
case MONO_PATCH_INFO_GOT_OFFSET:
case MONO_PATCH_INFO_NONE:
+ case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR:
break;
case MONO_PATCH_INFO_IMAGE:
/* The assembly is stored in GOT slot 0 */
* a lot of time, and doesn't seem to save much space.
* The following optimizations cannot be enabled:
* - 'tailcallelim'
+ * - 'jump-threading' changes our blockaddress references to int constants.
* The opt list below was produced by taking the output of:
* llvm-as < /dev/null | opt -O2 -disable-output -debug-pass=Arguments
* then removing tailcallelim + the global opts, and adding a second gvn.
*/
opts = g_strdup ("-instcombine -simplifycfg");
- opts = g_strdup ("-simplifycfg -domtree -domfrontier -scalarrepl -instcombine -simplifycfg -basiccg -prune-eh -inline -functionattrs -domtree -domfrontier -scalarrepl -simplify-libcalls -instcombine -jump-threading -simplifycfg -instcombine -simplifycfg -reassociate -domtree -loops -loopsimplify -domfrontier -loopsimplify -lcssa -loop-rotate -licm -lcssa -loop-unswitch -instcombine -scalar-evolution -loopsimplify -lcssa -iv-users -indvars -loop-deletion -loopsimplify -lcssa -loop-unroll -instcombine -memdep -gvn -memdep -memcpyopt -sccp -instcombine -jump-threading -domtree -memdep -dse -adce -gvn -simplifycfg -preverify -domtree -verify");
+ opts = g_strdup ("-simplifycfg -domtree -domfrontier -scalarrepl -instcombine -simplifycfg -basiccg -prune-eh -inline -functionattrs -domtree -domfrontier -scalarrepl -simplify-libcalls -instcombine -simplifycfg -instcombine -simplifycfg -reassociate -domtree -loops -loopsimplify -domfrontier -loopsimplify -lcssa -loop-rotate -licm -lcssa -loop-unswitch -instcombine -scalar-evolution -loopsimplify -lcssa -iv-users -indvars -loop-deletion -loopsimplify -lcssa -loop-unroll -instcombine -memdep -gvn -memdep -memcpyopt -sccp -instcombine -domtree -memdep -dse -adce -gvn -simplifycfg -preverify -domtree -verify");
#if 1
command = g_strdup_printf ("opt -f %s -o temp.opt.bc temp.bc", opts);
printf ("Executing opt: %s\n", command);
}
emit_section_change (acfg, ".text", 0);
+#ifdef __native_client_codegen__
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#endif
emit_global (acfg, symbol, TRUE);
emit_label (acfg, symbol);
sprintf (call_target, "%s", cfg->asm_symbol);
- arch_emit_unbox_trampoline (acfg, cfg->orig_method, cfg->generic_sharing_context, call_target);
+ arch_emit_unbox_trampoline (acfg, cfg->orig_method, call_target);
}
if (cfg->compile_llvm)
{
MonoMethodSignature *sig;
MonoClass *klass;
- int i;
+ int i, hindex;
int hashes_count;
guint32 *hashes_start, *hashes;
guint32 a, b, c;
+ MonoGenericInst *ginst = NULL;
/* Similar to the hash in mono_method_get_imt_slot () */
sig = mono_method_signature (method);
- hashes_count = sig->param_count + 5;
- hashes_start = malloc (hashes_count * sizeof (guint32));
+ if (method->is_inflated)
+ ginst = ((MonoMethodInflated*)method)->context.method_inst;
+
+ hashes_count = sig->param_count + 5 + (ginst ? ginst->type_argc : 0);
+ hashes_start = g_malloc0 (hashes_count * sizeof (guint32));
hashes = hashes_start;
/* Some wrappers are assigned to random classes */
hashes [2] = mono_metadata_str_hash (method->name);
hashes [3] = method->wrapper_type;
hashes [4] = mono_aot_type_hash (sig->ret);
+ hindex = 5;
for (i = 0; i < sig->param_count; i++) {
- hashes [5 + i] = mono_aot_type_hash (sig->params [i]);
+ hashes [hindex ++] = mono_aot_type_hash (sig->params [i]);
}
-
+ if (ginst) {
+ for (i = 0; i < ginst->type_argc; ++i)
+ hashes [hindex ++] = mono_aot_type_hash (ginst->type_argv [i]);
+ }
+ g_assert (hindex == hashes_count);
+
/* Setup internal state */
a = b = c = 0xdeadbeef + (((guint32)hashes_count)<<2);
if (!cfg)
continue;
- buf_size = 512;
+ buf_size = 10240;
p = buf = g_malloc (buf_size);
nmethods ++;
for (i = 0; i < acfg->image->tables [MONO_TABLE_TYPEDEF].rows; ++i) {
token = MONO_TOKEN_TYPE_DEF | (i + 1);
klass = mono_class_get (acfg->image, token);
+ if (!klass)
+ continue;
full_name = mono_type_get_name_full (mono_class_get_type (klass), MONO_TYPE_NAME_FORMAT_FULL_NAME);
hash = mono_metadata_str_hash (full_name) % table_size;
g_free (full_name);
* Emit a global symbol which can be passed by an embedding app to
* mono_aot_register_module ().
*/
-#if defined(__MACH__)
+#if defined(__MACH__) && !defined(__native_client_codegen__)
sprintf (symbol, "_mono_aot_module_%s_info", acfg->image->assembly->aname.name);
#else
sprintf (symbol, "mono_aot_module_%s_info", acfg->image->assembly->aname.name);
{
char symbol [128];
int i;
+ int gc_name_offset;
+ const char *gc_name;
+
+ /*
+ * The managed allocators are GC specific, so can't use an AOT image created by one GC
+ * in another.
+ */
+ gc_name = mono_gc_get_gc_name ();
+ gc_name_offset = add_to_blob (acfg, (guint8*)gc_name, strlen (gc_name) + 1);
sprintf (symbol, "mono_aot_file_info");
emit_section_change (acfg, ".data", 0);
emit_label (acfg, symbol);
emit_global (acfg, symbol, FALSE);
- /* The data emitted here must match MonoAotFileInfo in aot-runtime.c. */
+ /* The data emitted here must match MonoAotFileInfo. */
emit_int32 (acfg, acfg->plt_got_offset_base);
emit_int32 (acfg, (int)(acfg->got_offset * sizeof (gpointer)));
emit_int32 (acfg, acfg->plt_offset);
emit_int32 (acfg, acfg->nmethods);
emit_int32 (acfg, acfg->flags);
emit_int32 (acfg, acfg->opts);
+ emit_int32 (acfg, gc_name_offset);
for (i = 0; i < MONO_AOT_TRAMP_NUM; ++i)
emit_int32 (acfg, acfg->num_trampolines [i]);
#define LD_OPTIONS "-m elf64ppc"
#elif defined(sparc) && SIZEOF_VOID_P == 8
#define AS_OPTIONS "-xarch=v9"
+#elif defined(TARGET_X86) && defined(__APPLE__) && !defined(__native_client_codegen__)
+#define AS_OPTIONS "-arch i386 -W"
#else
#define AS_OPTIONS ""
#endif
+#ifdef __native_client_codegen__
+#define AS_NAME "nacl-as"
+#else
+#define AS_NAME "as"
+#endif
+
#ifndef LD_OPTIONS
#define LD_OPTIONS ""
#endif
} else {
objfile = g_strdup_printf ("%s.o", acfg->tmpfname);
}
- command = g_strdup_printf ("%sas %s %s -o %s", tool_prefix, AS_OPTIONS, acfg->tmpfname, objfile);
+ command = g_strdup_printf ("%s%s %s %s -o %s", tool_prefix, AS_NAME, AS_OPTIONS, acfg->tmpfname, objfile);
printf ("Executing the native assembler: %s\n", command);
if (system (command) != 0) {
g_free (command);
command = g_strdup_printf ("gcc -dynamiclib -o %s %s.o", tmp_outfile_name, acfg->tmpfname);
#elif defined(HOST_WIN32)
command = g_strdup_printf ("gcc -shared --dll -mno-cygwin -o %s %s.o", tmp_outfile_name, acfg->tmpfname);
+#elif defined(TARGET_X86) && defined(__APPLE__) && !defined(__native_client_codegen__)
+ command = g_strdup_printf ("gcc -m32 -dynamiclib -o %s %s.o", tmp_outfile_name, acfg->tmpfname);
#else
command = g_strdup_printf ("%sld %s %s -shared -o %s %s.o", tool_prefix, EH_LD_OPTIONS, LD_OPTIONS, tmp_outfile_name, acfg->tmpfname);
#endif
}
}
-#ifdef ENABLE_LLVM
- acfg->llvm = TRUE;
- acfg->aot_opts.asm_writer = TRUE;
- acfg->flags |= MONO_AOT_FILE_FLAG_WITH_LLVM;
-#endif
+ if (mono_use_llvm) {
+ acfg->llvm = TRUE;
+ acfg->aot_opts.asm_writer = TRUE;
+ acfg->flags |= MONO_AOT_FILE_FLAG_WITH_LLVM;
+ }
if (acfg->aot_opts.full_aot)
acfg->flags |= MONO_AOT_FILE_FLAG_FULL_AOT;
acfg->plt_offset = 1;
#ifdef ENABLE_LLVM
- llvm_acfg = acfg;
- mono_llvm_create_aot_module (acfg->got_symbol_base);
+ if (acfg->llvm) {
+ llvm_acfg = acfg;
+ mono_llvm_create_aot_module (acfg->got_symbol_base);
+ }
#endif
/* GOT offset 0 is reserved for the address of the current assembly */
ji = mono_mempool_alloc0 (acfg->mempool, sizeof (MonoAotCompile));
ji->type = MONO_PATCH_INFO_MSCORLIB_GOT_ADDR;
get_got_offset (acfg, ji);
+
+ /* This is very common */
+ ji = mono_mempool_alloc0 (acfg->mempool, sizeof (MonoAotCompile));
+ ji->type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR;
+ get_got_offset (acfg, ji);
}
TV_GETTIME (atv);
} else {
acfg->tmpfname = g_strdup ("temp.s");
}
- }
- emit_llvm_file (acfg);
+ emit_llvm_file (acfg);
+ }
#endif
if (!acfg->aot_opts.asm_only && !acfg->aot_opts.asm_writer && bin_writer_supported ()) {