#if !defined(DISABLE_AOT) && !defined(DISABLE_JIT)
-#if defined(__linux__)
+#if defined(__linux__) || defined(__native_client_codegen__)
#define RODATA_SECT ".rodata"
#else
#define RODATA_SECT ".text"
#define SHARED_EXT ".dll"
#elif defined(__ppc__) && defined(__MACH__)
#define SHARED_EXT ".dylib"
+#elif defined(__APPLE__) && defined(TARGET_X86) && !defined(__native_client_codegen__)
+#define SHARED_EXT ".dylib"
#else
#define SHARED_EXT ".so"
#endif
gboolean stats;
char *tool_prefix;
gboolean autoreg;
+ char *mtriple;
} MonoAotOptions;
typedef struct MonoAotStats {
gboolean llvm;
MonoAotFileFlags flags;
MonoDynamicStream blob;
+ MonoClass **typespec_classes;
+ GString *llc_args;
+ GString *as_args;
+ gboolean thumb_mixed;
} MonoAotCompile;
typedef struct {
int plt_offset;
- char *symbol;
+ char *symbol, *llvm_symbol, *debug_sym;
MonoJumpInfo *ji;
+ gboolean jit_used, llvm_used;
} MonoPltEntry;
#define mono_acfg_lock(acfg) EnterCriticalSection (&((acfg)->mutex))
img_writer_emit_byte (acfg->w, val);
}
+#ifdef __native_client_codegen__
+static inline void
+emit_nacl_call_alignment (MonoAotCompile *acfg)
+{
+ img_writer_emit_nacl_call_alignment (acfg->w);
+}
+#endif
+
static G_GNUC_UNUSED void
emit_global_inner (MonoAotCompile *acfg, const char *name, gboolean func)
{
img_writer_emit_string (acfg->w, value);
}
+static void
+emit_local_string_symbol (MonoAotCompile *acfg, const char *name, const char *value)
+{
+ img_writer_emit_section_change (acfg->w, RODATA_SECT, 1);
+ img_writer_emit_label (acfg->w, name);
+ img_writer_emit_string (acfg->w, value);
+}
+
static G_GNUC_UNUSED void
emit_uleb128 (MonoAotCompile *acfg, guint32 value)
{
#else
#define AOT_FUNC_ALIGNMENT 16
#endif
+#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && defined(__native_client_codegen__)
+#undef AOT_FUNC_ALIGNMENT
+#define AOT_FUNC_ALIGNMENT 32
+#endif
#if defined(TARGET_POWERPC64) && !defined(__mono_ilp32__)
#define PPC_LD_OP "ld"
#define PPC_LDX_OP "lwzx"
#endif
-//#define TARGET_ARM
+static void
+arch_init (MonoAotCompile *acfg)
+{
+ acfg->llc_args = g_string_new ("");
+ acfg->as_args = g_string_new ("");
+
+ /*
+ * The prefix LLVM likes to put in front of symbol names on darwin.
+ * The mach-os specs require this for globals, but LLVM puts them in front of all
+ * symbols. We need to handle this, since we need to refer to LLVM generated
+ * symbols.
+ */
+ acfg->llvm_label_prefix = "";
#ifdef TARGET_ARM
-#define LLVM_LABEL_PREFIX "_"
+ if (acfg->aot_opts.mtriple && strstr (acfg->aot_opts.mtriple, "darwin")) {
+ g_string_append (acfg->llc_args, "-mattr=+v6");
+ acfg->llvm_label_prefix = "_";
+ } else {
+#ifdef ARM_FPU_VFP
+ g_string_append (acfg->llc_args, " -mattr=+vfp2,+d16");
+ g_string_append (acfg->as_args, " -mfpu=vfp3");
#else
-#define LLVM_LABEL_PREFIX ""
+ g_string_append (acfg->llc_args, " -soft-float");
#endif
+ }
+ if (acfg->aot_opts.mtriple && strstr (acfg->aot_opts.mtriple, "thumb"))
+ acfg->thumb_mixed = TRUE;
-#ifdef TARGET_ARM
-/* iphone */
-#define LLC_TARGET_ARGS "-march=arm -mattr=+v6 -mtriple=arm-apple-darwin"
-/* ELF */
-//#define LLC_TARGET_ARGS "-march=arm -mtriple=arm-linux-gnueabi -soft-float"
-#else
-#define LLC_TARGET_ARGS ""
+ if (acfg->aot_opts.mtriple)
+ mono_arch_set_target (acfg->aot_opts.mtriple);
#endif
+}
/*
* arch_emit_direct_call:
{
#if defined(TARGET_X86)
guint32 offset = (acfg->plt_got_offset_base + index) * sizeof (gpointer);
-
+#if defined(__default_codegen__)
/* jmp *<offset>(%ebx) */
emit_byte (acfg, 0xff);
emit_byte (acfg, 0xa3);
emit_int32 (acfg, offset);
/* Used by mono_aot_get_plt_info_offset */
emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+#elif defined(__native_client_codegen__)
+ const guint8 kSizeOfNaClJmp = 11;
+ guint8 bytes[kSizeOfNaClJmp];
+ guint8 *pbytes = &bytes[0];
+
+ x86_jump_membase32 (pbytes, X86_EBX, offset);
+ emit_bytes (acfg, bytes, kSizeOfNaClJmp);
+ /* four bytes of data, used by mono_arch_patch_plt_entry */
+ /* For Native Client, make this work with data embedded in push. */
+ emit_byte (acfg, 0x68); /* hide data in a push */
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#endif /*__native_client_codegen__*/
#elif defined(TARGET_AMD64)
+#if defined(__default_codegen__)
/*
* We can't emit jumps because they are 32 bits only so they can't be patched.
* So we make indirect calls through GOT entries which are patched by the AOT
emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) -4);
/* Used by mono_aot_get_plt_info_offset */
emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+#elif defined(__native_client_codegen__)
+ guint8 buf [256];
+ guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
+ guint8 *code = buf_aligned;
+
+ /* mov <OFFSET>(%rip), %r11d */
+ emit_byte (acfg, '\x45');
+ emit_byte (acfg, '\x8b');
+ emit_byte (acfg, '\x1d');
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) -4);
+
+ amd64_jump_reg (code, AMD64_R11);
+ /* This should be constant for the plt patch */
+ g_assert ((size_t)(code-buf_aligned) == 10);
+ emit_bytes (acfg, buf_aligned, code - buf_aligned);
+
+ /* Hide data in a push imm32 so it passes validation */
+ emit_byte (acfg, 0x68); /* push */
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#endif /*__native_client_codegen__*/
#elif defined(TARGET_ARM)
guint8 buf [256];
guint8 *code;
- /* FIXME:
- * - optimize OP_AOTCONST implementation
- * - optimize the PLT entries
- * - optimize SWITCH AOT implementation
- */
code = buf;
- if (acfg->use_bin_writer && FALSE) {
- /* FIXME: mono_arch_patch_plt_entry () needs to decode this */
- /* We only emit 1 relocation since we implement it ourselves anyway */
- img_writer_emit_reloc (acfg->w, R_ARM_ALU_PC_G0_NC, acfg->got_symbol, ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) - 8);
- /* FIXME: A 2 instruction encoding is sufficient in most cases */
- ARM_ADD_REG_IMM (code, ARMREG_IP, ARMREG_PC, 0, 0);
- ARM_ADD_REG_IMM (code, ARMREG_IP, ARMREG_IP, 0, 0);
- ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0);
- emit_bytes (acfg, buf, code - buf);
- /* Used by mono_aot_get_plt_info_offset */
- emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
- } else {
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
- ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
- emit_bytes (acfg, buf, code - buf);
- emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) - 4);
- /* Used by mono_aot_get_plt_info_offset */
- emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
- }
- /*
- * The plt_got_info_offset is computed automatically by
- * mono_aot_get_plt_info_offset (), so no need to save it here.
- */
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
+ ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
+ emit_bytes (acfg, buf, code - buf);
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) - 4);
+ /* Used by mono_aot_get_plt_info_offset */
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
#elif defined(TARGET_POWERPC)
guint32 offset = (acfg->plt_got_offset_base + index) * sizeof (gpointer);
#endif
}
+static void
+arch_emit_llvm_plt_entry (MonoAotCompile *acfg, int index)
+{
+#if defined(TARGET_ARM)
+#if 0
+ /* LLVM calls the PLT entries using bl, so emit a stub */
+ /* FIXME: Too much overhead on every call */
+ fprintf (acfg->fp, ".thumb_func\n");
+ fprintf (acfg->fp, "bx pc\n");
+ fprintf (acfg->fp, "nop\n");
+ fprintf (acfg->fp, ".arm\n");
+#endif
+ /* LLVM calls the PLT entries using bl, so these have to be thumb2 */
+ fprintf (acfg->fp, ".thumb_func\n");
+ /* The code below should be 12 bytes long */
+ fprintf (acfg->fp, "ldr ip, [pc, #8]\n");
+ /* thumb can't encode ld pc, [pc, ip] */
+ fprintf (acfg->fp, "add ip, pc, ip\n");
+ fprintf (acfg->fp, "ldr ip, [ip, #0]\n");
+ fprintf (acfg->fp, "bx ip\n");
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((acfg->plt_got_offset_base + index) * sizeof (gpointer)) + 4);
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
+#else
+ g_assert_not_reached ();
+#endif
+}
+
/*
* arch_emit_specific_trampoline:
*
* - all the trampolines should be of the same length.
*/
#if defined(TARGET_AMD64)
+#if defined(__default_codegen__)
/* This should be exactly 16 bytes long */
*tramp_size = 16;
/* call *<offset>(%rip) */
emit_byte (acfg, '\x15');
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
/* This should be relative to the start of the trampoline */
- emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4 + 19);
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset+1) * sizeof (gpointer)) + 7);
emit_zero_bytes (acfg, 5);
+#elif defined(__native_client_codegen__)
+ guint8 buf [256];
+ guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
+ guint8 *code = buf_aligned;
+ guint8 *call_start;
+ size_t call_len;
+ int got_offset;
+
+ /* Emit this call in 'code' so we can find out how long it is. */
+ amd64_call_reg (code, AMD64_R11);
+ call_start = mono_arch_nacl_skip_nops (buf_aligned);
+ call_len = code - call_start;
+
+ /* The tramp_size is twice the NaCl alignment because it starts with */
+ /* a call which needs to be aligned to the end of the boundary. */
+ *tramp_size = kNaClAlignment*2;
+ {
+ /* Emit nops to align call site below which is 7 bytes plus */
+ /* the length of the call sequence emitted above. */
+ /* Note: this requires the specific trampoline starts on a */
+ /* kNaclAlignedment aligned address, which it does because */
+ /* it's its own function that is aligned. */
+ guint8 nop_buf[256];
+ guint8 *nopbuf_aligned = ALIGN_TO (nop_buf, kNaClAlignment);
+ guint8 *nopbuf_end = mono_arch_nacl_pad (nopbuf_aligned, kNaClAlignment - 7 - (call_len));
+ emit_bytes (acfg, nopbuf_aligned, nopbuf_end - nopbuf_aligned);
+ }
+ /* The trampoline is stored at the offset'th pointer, the -4 is */
+ /* present because RIP relative addressing starts at the end of */
+ /* the current instruction, while the label "." is relative to */
+ /* the beginning of the current asm location, which in this case */
+ /* is not the mov instruction, but the offset itself, due to the */
+ /* way the bytes and ints are emitted here. */
+ got_offset = (offset * sizeof(gpointer)) - 4;
+
+ /* mov <OFFSET>(%rip), %r11d */
+ emit_byte (acfg, '\x45');
+ emit_byte (acfg, '\x8b');
+ emit_byte (acfg, '\x1d');
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", got_offset);
+
+ /* naclcall %r11 */
+ emit_bytes (acfg, call_start, call_len);
+
+ /* The arg is stored at the offset+1 pointer, relative to beginning */
+ /* of trampoline: 7 for mov, plus the call length, and 1 for push. */
+ got_offset = ((offset + 1) * sizeof(gpointer)) + 7 + call_len + 1;
+
+ /* We can't emit this data directly, hide in a "push imm32" */
+ emit_byte (acfg, '\x68'); /* push */
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", got_offset);
+ emit_alignment (acfg, kNaClAlignment);
+#endif /*__native_client_codegen__*/
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
/* Branch to generic trampoline */
x86_jump_reg (code, X86_ECX);
+#ifdef __native_client_codegen__
+ {
+ /* emit nops to next 32 byte alignment */
+ int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
+ while (code < (buf + a)) x86_nop(code);
+ }
+#endif
emit_bytes (acfg, buf, code - buf);
- *tramp_size = 17;
+ *tramp_size = NACL_SIZE(17, kNaClAlignment);
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
* CALL_TARGET is the symbol pointing to the native code of METHOD.
*/
static void
-arch_emit_unbox_trampoline (MonoAotCompile *acfg, MonoMethod *method, MonoGenericSharingContext *gsctx, const char *call_target)
+arch_emit_unbox_trampoline (MonoAotCompile *acfg, MonoMethod *method, const char *call_target)
{
#if defined(TARGET_AMD64)
guint8 buf [32];
guint8 *code;
int this_reg;
- this_reg = mono_arch_get_this_arg_reg (mono_method_signature (method), gsctx, NULL);
+ this_reg = mono_arch_get_this_arg_reg (NULL);
code = buf;
amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
guint8 *code;
int this_pos = 4;
- if (MONO_TYPE_ISSTRUCT (mono_method_signature (method)->ret))
- this_pos = 8;
-
code = buf;
x86_alu_membase_imm (code, X86_ADD, X86_ESP, this_pos, sizeof (MonoObject));
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
- int this_pos = 0;
code = buf;
- if (MONO_TYPE_ISSTRUCT (mono_method_signature (method)->ret))
- this_pos = 1;
-
- ARM_ADD_REG_IMM8 (code, this_pos, this_pos, sizeof (MonoObject));
+ ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, sizeof (MonoObject));
emit_bytes (acfg, buf, code - buf);
/* jump to method */
#elif defined(TARGET_POWERPC)
int this_pos = 3;
- if (MONO_TYPE_ISSTRUCT (mono_method_signature (method)->ret))
- this_pos = 4;
-
g_assert (!acfg->use_bin_writer);
fprintf (acfg->fp, "\n\taddi %d, %d, %d\n", this_pos, this_pos, (int)sizeof (MonoObject));
arch_emit_static_rgctx_trampoline (MonoAotCompile *acfg, int offset, int *tramp_size)
{
#if defined(TARGET_AMD64)
+#if defined(__default_codegen__)
/* This should be exactly 13 bytes long */
*tramp_size = 13;
emit_byte (acfg, '\xff');
emit_byte (acfg, '\x25');
emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4);
+#elif defined(__native_client_codegen__)
+ guint8 buf [128];
+ guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
+ guint8 *code = buf_aligned;
+
+ /* mov <OFFSET>(%rip), %r10d */
+ emit_byte (acfg, '\x45');
+ emit_byte (acfg, '\x8b');
+ emit_byte (acfg, '\x15');
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
+
+ /* mov <OFFSET>(%rip), %r11d */
+ emit_byte (acfg, '\x45');
+ emit_byte (acfg, '\x8b');
+ emit_byte (acfg, '\x1d');
+ emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4);
+
+ /* nacljmp *%r11 */
+ amd64_jump_reg (code, AMD64_R11);
+ emit_bytes (acfg, buf_aligned, code - buf_aligned);
+
+ emit_alignment (acfg, kNaClAlignment);
+ *tramp_size = kNaClAlignment;
+#endif /*__native_client_codegen__*/
+
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
/* Branch to the target address */
x86_jump_membase (code, X86_ECX, (offset + 1) * sizeof (gpointer));
+#ifdef __native_client_codegen__
+ {
+ /* emit nops to next 32 byte alignment */
+ int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
+ while (code < (buf + a)) x86_nop(code);
+ }
+#endif
+
emit_bytes (acfg, buf, code - buf);
- *tramp_size = 15;
+ *tramp_size = NACL_SIZE (15, kNaClAlignment);
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
{
#if defined(TARGET_AMD64)
guint8 *buf, *code;
+#if defined(__native_client_codegen__)
+ guint8 *buf_alloc;
+#endif
guint8 *labels [3];
+ guint8 mov_buf[3];
+ guint8 *mov_buf_ptr = mov_buf;
+ const int kSizeOfMove = 7;
+#if defined(__default_codegen__)
code = buf = g_malloc (256);
+#elif defined(__native_client_codegen__)
+ buf_alloc = g_malloc (256 + kNaClAlignment + kSizeOfMove);
+ buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
+ /* The RIP relative move below is emitted first */
+ buf += kSizeOfMove;
+ code = buf;
+#endif
/* FIXME: Optimize this, i.e. use binary search etc. */
/* Maybe move the body into a separate function (slower, but much smaller) */
- /* R10 is a free register */
+ /* MONO_ARCH_IMT_SCRATCH_REG is a free register */
labels [0] = code;
- amd64_alu_membase_imm (code, X86_CMP, AMD64_R10, 0, 0);
+ amd64_alu_membase_imm (code, X86_CMP, MONO_ARCH_IMT_SCRATCH_REG, 0, 0);
labels [1] = code;
- amd64_branch8 (code, X86_CC_Z, FALSE, 0);
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
/* Check key */
- amd64_alu_membase_reg (code, X86_CMP, AMD64_R10, 0, MONO_ARCH_IMT_REG);
+ amd64_alu_membase_reg_size (code, X86_CMP, MONO_ARCH_IMT_SCRATCH_REG, 0, MONO_ARCH_IMT_REG, sizeof (gpointer));
labels [2] = code;
- amd64_branch8 (code, X86_CC_Z, FALSE, 0);
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
/* Loop footer */
- amd64_alu_reg_imm (code, X86_ADD, AMD64_R10, 2 * sizeof (gpointer));
+ amd64_alu_reg_imm (code, X86_ADD, MONO_ARCH_IMT_SCRATCH_REG, 2 * sizeof (gpointer));
amd64_jump_code (code, labels [0]);
/* Match */
mono_amd64_patch (labels [2], code);
- amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, sizeof (gpointer), 8);
- amd64_jump_membase (code, AMD64_R10, 0);
+ amd64_mov_reg_membase (code, MONO_ARCH_IMT_SCRATCH_REG, MONO_ARCH_IMT_SCRATCH_REG, sizeof (gpointer), sizeof (gpointer));
+ amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
/* No match */
/* FIXME: */
mono_amd64_patch (labels [1], code);
x86_breakpoint (code);
- /* mov <OFFSET>(%rip), %r10 */
- emit_byte (acfg, '\x4d');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x15');
+ /* mov <OFFSET>(%rip), MONO_ARCH_IMT_SCRATCH_REG */
+ amd64_emit_rex (mov_buf_ptr, sizeof(gpointer), MONO_ARCH_IMT_SCRATCH_REG, 0, AMD64_RIP);
+ *(mov_buf_ptr)++ = (unsigned char)0x8b; /* mov opcode */
+ x86_address_byte (mov_buf_ptr, 0, MONO_ARCH_IMT_SCRATCH_REG & 0x7, 5);
+ emit_bytes (acfg, mov_buf, mov_buf_ptr - mov_buf);
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
emit_bytes (acfg, buf, code - buf);
- *tramp_size = code - buf + 7;
+ *tramp_size = code - buf + kSizeOfMove;
+#if defined(__native_client_codegen__)
+ /* The tramp will be padded to the next kNaClAlignment bundle. */
+ *tramp_size = ALIGN_TO ((*tramp_size), kNaClAlignment);
+#endif
+
+#if defined(__default_codegen__)
+ g_free (buf);
+#elif defined(__native_client_codegen__)
+ g_free (buf_alloc);
+#endif
+
#elif defined(TARGET_X86)
guint8 *buf, *code;
+#ifdef __native_client_codegen__
+ guint8 *buf_alloc;
+#endif
guint8 *labels [3];
+#if defined(__default_codegen__)
code = buf = g_malloc (256);
+#elif defined(__native_client_codegen__)
+ buf_alloc = g_malloc (256 + kNaClAlignment);
+ code = buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
+#endif
/* Allocate a temporary stack slot */
x86_push_reg (code, X86_EAX);
mono_x86_patch (labels [1], code);
x86_breakpoint (code);
+#ifdef __native_client_codegen__
+ {
+ /* emit nops to next 32 byte alignment */
+ int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
+ while (code < (buf + a)) x86_nop(code);
+ }
+#endif
emit_bytes (acfg, buf, code - buf);
*tramp_size = code - buf;
+
+#if defined(__default_codegen__)
+ g_free (buf);
+#elif defined(__native_client_codegen__)
+ g_free (buf_alloc);
+#endif
+
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code, *code2, *labels [16];
sh->data = g_malloc (4096);
/* So offsets are > 0 */
+ sh->data [0] = 0;
sh->index ++;
}
* blob where the data was stored.
*/
static guint32
-add_to_blob (MonoAotCompile *acfg, guint8 *data, guint32 data_len)
+add_to_blob (MonoAotCompile *acfg, const guint8 *data, guint32 data_len)
{
if (acfg->blob.alloc_size == 0)
stream_init (&acfg->blob);
return add_stream_data (&acfg->blob, (char*)data, data_len);
}
+static guint32
+add_to_blob_aligned (MonoAotCompile *acfg, const guint8 *data, guint32 data_len, guint32 align)
+{
+ char buf [4] = {0};
+ guint32 count;
+
+ if (acfg->blob.alloc_size == 0)
+ stream_init (&acfg->blob);
+
+ count = acfg->blob.index % align;
+
+ /* we assume the stream data will be aligned */
+ if (count)
+ add_stream_data (&acfg->blob, buf, 4 - count);
+
+ return add_stream_data (&acfg->blob, (char*)data, data_len);
+}
+
/*
* emit_offset_table:
*
find_typespec_for_class (MonoAotCompile *acfg, MonoClass *klass)
{
int i;
- MonoClass *k = NULL;
+ int len = acfg->image->tables [MONO_TABLE_TYPESPEC].rows;
/* FIXME: Search referenced images as well */
- for (i = 0; i < acfg->image->tables [MONO_TABLE_TYPESPEC].rows; ++i) {
- k = mono_class_get_full (acfg->image, MONO_TOKEN_TYPE_SPEC | (i + 1), NULL);
- if (k == klass)
+ if (!acfg->typespec_classes) {
+ acfg->typespec_classes = mono_mempool_alloc0 (acfg->mempool, sizeof (MonoClass*) * len);
+ for (i = 0; i < len; ++i) {
+ acfg->typespec_classes [i] = mono_class_get_full (acfg->image, MONO_TOKEN_TYPE_SPEC | (i + 1), NULL);
+ }
+ }
+ for (i = 0; i < len; ++i) {
+ if (acfg->typespec_classes [i] == klass)
break;
}
- if (i < acfg->image->tables [MONO_TABLE_TYPESPEC].rows)
+ if (i < len)
return MONO_TOKEN_TYPE_SPEC | (i + 1);
else
return 0;
case MONO_WRAPPER_ALLOC: {
AllocatorWrapperInfo *info = mono_marshal_get_wrapper_info (method);
+ /* The GC name is saved once in MonoAotFileInfo */
g_assert (info->alloc_type != -1);
encode_value (info->alloc_type, p, &p);
break;
}
case MONO_WRAPPER_WRITE_BARRIER:
break;
- case MONO_WRAPPER_STELEMREF:
+ case MONO_WRAPPER_STELEMREF: {
+ MonoClass *klass = mono_marshal_get_wrapper_info (method);
+
+ /* Make sure this is the 'normal' stelemref wrapper, not the virtual one */
+ g_assert (!klass);
break;
+ }
case MONO_WRAPPER_UNKNOWN:
- if (strcmp (method->name, "FastMonitorEnter") == 0)
+ if (strcmp (method->name, "FastMonitorEnter") == 0) {
encode_value (MONO_AOT_WRAPPER_MONO_ENTER, p, &p);
- else if (strcmp (method->name, "FastMonitorExit") == 0)
+ } else if (strcmp (method->name, "FastMonitorExit") == 0) {
encode_value (MONO_AOT_WRAPPER_MONO_EXIT, p, &p);
- else
+ } else if (strcmp (method->name, "PtrToStructure") == 0) {
+ encode_value (MONO_AOT_WRAPPER_PTR_TO_STRUCTURE, p, &p);
+ encode_klass_ref (acfg, method->klass, p, &p);
+ } else if (strcmp (method->name, "StructureToPtr") == 0) {
+ encode_value (MONO_AOT_WRAPPER_STRUCTURE_TO_PTR, p, &p);
+ encode_klass_ref (acfg, method->klass, p, &p);
+ } else {
g_assert_not_reached ();
+ }
break;
case MONO_WRAPPER_SYNCHRONIZED:
case MONO_WRAPPER_MANAGED_TO_NATIVE:
*/
return g_strdup_printf ("%sp_%d", acfg->llvm_label_prefix, plt_offset);
#else
- return g_strdup_printf ("%s%sp_%d", acfg->llvm_label_prefix, acfg->temp_prefix, plt_offset);
+ return g_strdup_printf ("%sp_%d", acfg->temp_prefix, plt_offset);
#endif
}
res->plt_offset = acfg->plt_offset;
res->ji = new_ji;
res->symbol = get_plt_symbol (acfg, res->plt_offset, patch_info);
+ res->llvm_symbol = g_strdup_printf ("%s_llvm", res->symbol);
g_hash_table_insert (acfg->patch_to_plt_entry, new_ji, res);
/* Skip methods which can not be handled by get_runtime_invoke () */
sig = mono_method_signature (method);
+ if (!sig)
+ continue;
if ((sig->ret->type == MONO_TYPE_PTR) ||
(sig->ret->type == MONO_TYPE_TYPEDBYREF))
skip = TRUE;
if (method)
add_method (acfg, method);
#endif
+
+ /* Stelemref wrappers */
+ /* There is only a constant number of these, iterating over all types should handle them all */
+ for (i = 0; i < acfg->image->tables [MONO_TABLE_TYPEDEF].rows; ++i) {
+ MonoClass *klass;
+
+ token = MONO_TOKEN_TYPE_DEF | (i + 1);
+ klass = mono_class_get (acfg->image, token);
+ if (klass)
+ add_method (acfg, mono_marshal_get_virtual_stelemref (mono_array_class_get (klass, 1)));
+ }
}
/*
return FALSE;
}
+static void add_generic_class_with_depth (MonoAotCompile *acfg, MonoClass *klass, int depth);
+
+static void
+add_generic_class (MonoAotCompile *acfg, MonoClass *klass, gboolean force)
+{
+ /* This might lead to a huge code blowup so only do it if neccesary */
+ if (!acfg->aot_opts.full_aot && !force)
+ return;
+
+ add_generic_class_with_depth (acfg, klass, 0);
+}
+
/*
* add_generic_class:
*
* Add all methods of a generic class.
*/
static void
-add_generic_class (MonoAotCompile *acfg, MonoClass *klass)
+add_generic_class_with_depth (MonoAotCompile *acfg, MonoClass *klass, int depth)
{
MonoMethod *method;
gpointer iter;
* FIXME: Instances which are referenced by these methods are not added,
* for example Array.Resize<int> for List<int>.Add ().
*/
- add_extra_method (acfg, method);
+ add_extra_method_with_depth (acfg, method, depth);
}
if (klass->delegate) {
}
g_assert (nclass);
nclass = mono_class_inflate_generic_class (nclass, mono_generic_class_get_context (klass->generic_class));
- add_generic_class (acfg, nclass);
+ add_generic_class (acfg, nclass, FALSE);
}
iter = NULL;
while ((method = mono_class_get_methods (array_class, &iter))) {
if (strstr (method->name, name_prefix)) {
MonoMethod *m = mono_aot_get_array_helper_from_wrapper (method);
- add_extra_method (acfg, m);
+ add_extra_method_with_depth (acfg, m, depth);
}
}
if (mono_class_is_assignable_from (mono_class_inflate_generic_class (icomparable, &ctx), tclass)) {
gcomparer = mono_class_from_name (mono_defaults.corlib, "System.Collections.Generic", "GenericComparer`1");
g_assert (gcomparer);
- add_generic_class (acfg, mono_class_inflate_generic_class (gcomparer, &ctx));
+ add_generic_class (acfg, mono_class_inflate_generic_class (gcomparer, &ctx), FALSE);
+ }
+ }
+
+ /* Add an instance of GenericEqualityComparer<T> which is created dynamically by EqualityComparer<T> */
+ if (klass->image == mono_defaults.corlib && !strcmp (klass->name_space, "System.Collections.Generic") && !strcmp (klass->name, "EqualityComparer`1")) {
+ MonoClass *tclass = mono_class_from_mono_type (klass->generic_class->context.class_inst->type_argv [0]);
+ MonoClass *iface, *gcomparer;
+ MonoGenericContext ctx;
+ MonoType *args [16];
+
+ memset (&ctx, 0, sizeof (ctx));
+
+ iface = mono_class_from_name (mono_defaults.corlib, "System", "IEquatable`1");
+ g_assert (iface);
+ args [0] = &tclass->byval_arg;
+ ctx.class_inst = mono_metadata_get_generic_inst (1, args);
+
+ if (mono_class_is_assignable_from (mono_class_inflate_generic_class (iface, &ctx), tclass)) {
+ gcomparer = mono_class_from_name (mono_defaults.corlib, "System.Collections.Generic", "GenericEqualityComparer`1");
+ g_assert (gcomparer);
+ add_generic_class (acfg, mono_class_inflate_generic_class (gcomparer, &ctx), FALSE);
}
}
}
static void
-add_instances_of (MonoAotCompile *acfg, MonoClass *klass, MonoType **insts, int ninsts)
+add_instances_of (MonoAotCompile *acfg, MonoClass *klass, MonoType **insts, int ninsts, gboolean force)
{
int i;
MonoGenericContext ctx;
for (i = 0; i < ninsts; ++i) {
args [0] = insts [i];
ctx.class_inst = mono_metadata_get_generic_inst (1, args);
- add_generic_class (acfg, mono_class_inflate_generic_class (klass, &ctx));
+ add_generic_class (acfg, mono_class_inflate_generic_class (klass, &ctx), force);
}
}
token = MONO_TOKEN_METHOD_SPEC | (i + 1);
method = mono_get_method (acfg->image, token, NULL);
+ if (!method)
+ continue;
+
if (method->klass->image != acfg->image)
continue;
if (!klass || klass->rank)
continue;
- add_generic_class (acfg, klass);
+ add_generic_class (acfg, klass, FALSE);
}
/* Add types of args/locals */
if (sig) {
for (j = 0; j < sig->param_count; ++j)
if (sig->params [j]->type == MONO_TYPE_GENERICINST)
- add_generic_class (acfg, mono_class_from_mono_type (sig->params [j]));
+ add_generic_class (acfg, mono_class_from_mono_type (sig->params [j]), FALSE);
}
header = mono_method_get_header (method);
if (header) {
for (j = 0; j < header->num_locals; ++j)
if (header->locals [j]->type == MONO_TYPE_GENERICINST)
- add_generic_class (acfg, mono_class_from_mono_type (header->locals [j]));
+ add_generic_class (acfg, mono_class_from_mono_type (header->locals [j]), FALSE);
}
}
/* Add GenericComparer<T> instances for primitive types for Enum.ToString () */
klass = mono_class_from_name (acfg->image, "System.Collections.Generic", "GenericComparer`1");
if (klass)
- add_instances_of (acfg, klass, insts, ninsts);
+ add_instances_of (acfg, klass, insts, ninsts, TRUE);
klass = mono_class_from_name (acfg->image, "System.Collections.Generic", "GenericEqualityComparer`1");
if (klass)
- add_instances_of (acfg, klass, insts, ninsts);
+ add_instances_of (acfg, klass, insts, ninsts, TRUE);
/* Add instances of the array generic interfaces for primitive types */
/* This will add instances of the InternalArray_ helper methods in Array too */
klass = mono_class_from_name (acfg->image, "System.Collections.Generic", "ICollection`1");
if (klass)
- add_instances_of (acfg, klass, insts, ninsts);
+ add_instances_of (acfg, klass, insts, ninsts, TRUE);
klass = mono_class_from_name (acfg->image, "System.Collections.Generic", "IList`1");
if (klass)
- add_instances_of (acfg, klass, insts, ninsts);
+ add_instances_of (acfg, klass, insts, ninsts, TRUE);
klass = mono_class_from_name (acfg->image, "System.Collections.Generic", "IEnumerable`1");
if (klass)
- add_instances_of (acfg, klass, insts, ninsts);
+ add_instances_of (acfg, klass, insts, ninsts, TRUE);
/*
* Add a managed-to-native wrapper of Array.GetGenericValueImpl<object>, which is
MonoMethodHeader *header;
gboolean skip, direct_call;
guint32 got_slot;
- char direct_call_target [128];
+ char direct_call_target [1024];
if (method) {
header = mono_method_get_header (method);
MonoCompile *callee_cfg = g_hash_table_lookup (acfg->method_to_cfg, patch_info->data.method);
//printf ("DIRECT: %s %s\n", method ? mono_method_full_name (method, TRUE) : "", mono_method_full_name (callee_cfg->method, TRUE));
direct_call = TRUE;
+ g_assert (strlen (callee_cfg->asm_symbol) < 1000);
sprintf (direct_call_target, "%s", callee_cfg->asm_symbol);
patch_info->type = MONO_PATCH_INFO_NONE;
acfg->stats.direct_calls ++;
/* Nullify the patch */
patch_info->type = MONO_PATCH_INFO_NONE;
+ plt_entry->jit_used = TRUE;
}
}
{
char *name1, *name2, *cached;
int i, j, len, count;
-
+
name1 = mono_method_full_name (method, TRUE);
len = strlen (name1);
name2 = malloc (strlen (prefix) + len + 16);
method = cfg->orig_method;
code = cfg->native_code;
- header = mono_method_get_header (method);
+ header = cfg->header;
method_index = get_method_index (acfg, method);
encode_value (get_image_index (acfg, patch_info->data.image), p, &p);
break;
case MONO_PATCH_INFO_MSCORLIB_GOT_ADDR:
+ case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR:
break;
case MONO_PATCH_INFO_METHOD_REL:
encode_value ((gint)patch_info->data.offset, p, &p);
continue;
}
+ if (patch_info->type == MONO_PATCH_INFO_GC_CARD_TABLE_ADDR) {
+ /* Stored in a GOT slot initialized at module load time */
+ patch_info->type = MONO_PATCH_INFO_NONE;
+ continue;
+ }
+
if (is_plt_patch (patch_info)) {
/* Calls are made through the PLT */
patch_info->type = MONO_PATCH_INFO_NONE;
method = cfg->orig_method;
code = cfg->native_code;
- header = mono_method_get_header (method);
+ header = cfg->header;
method_index = get_method_index (acfg, method);
seq_points = cfg->seq_point_info;
- buf_size = header->num_clauses * 256 + debug_info_size + 1024 + (seq_points ? (seq_points->len * 64) : 0);
+ buf_size = header->num_clauses * 256 + debug_info_size + 1024 + (seq_points ? (seq_points->len * 64) : 0) + cfg->gc_map_size;
p = buf = g_malloc (buf_size);
#ifdef MONO_ARCH_HAVE_XP_UNWIND
use_unwind_ops = cfg->unwind_ops != NULL;
#endif
- flags = (jinfo->has_generic_jit_info ? 1 : 0) | (use_unwind_ops ? 2 : 0) | (header->num_clauses ? 4 : 0) | (seq_points ? 8 : 0) | (cfg->compile_llvm ? 16 : 0) | (jinfo->has_try_block_holes ? 32 : 0);
+ flags = (jinfo->has_generic_jit_info ? 1 : 0) | (use_unwind_ops ? 2 : 0) | (header->num_clauses ? 4 : 0) | (seq_points ? 8 : 0) | (cfg->compile_llvm ? 16 : 0) | (jinfo->has_try_block_holes ? 32 : 0) | (cfg->gc_map ? 64 : 0);
encode_value (flags, p, &p);
/* Exception table */
if (cfg->compile_llvm) {
+ /*
+ * When using LLVM, we can't emit some data, like pc offsets, this reg/offset etc.,
+ * since the information is only available to llc. Instead, we let llc save the data
+ * into the LSDA, and read it from there at runtime.
+ */
/* The assembly might be CIL stripped so emit the data ourselves */
if (header->num_clauses)
encode_value (header->num_clauses, p, &p);
} else {
encode_value (0, p, &p);
}
+
+ /* Emit a list of nesting clauses */
+ for (i = 0; i < header->num_clauses; ++i) {
+ gint32 cindex1 = k;
+ MonoExceptionClause *clause1 = &header->clauses [cindex1];
+ gint32 cindex2 = i;
+ MonoExceptionClause *clause2 = &header->clauses [cindex2];
+
+ if (cindex1 != cindex2 && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset)
+ encode_value (i, p, &p);
+ }
+ encode_value (-1, p, &p);
}
} else {
if (jinfo->num_clauses)
if (jinfo->has_generic_jit_info) {
MonoGenericJitInfo *gi = mono_jit_info_get_generic_jit_info (jinfo);
- encode_value (gi->has_this ? 1 : 0, p, &p);
- encode_value (gi->this_reg, p, &p);
- encode_value (gi->this_offset, p, &p);
+ if (!cfg->compile_llvm) {
+ encode_value (gi->has_this ? 1 : 0, p, &p);
+ encode_value (gi->this_reg, p, &p);
+ encode_value (gi->this_offset, p, &p);
+ }
/*
* Need to encode jinfo->method too, since it is not equal to 'method'
}
}
-
g_assert (debug_info_size < buf_size);
encode_value (debug_info_size, p, &p);
g_free (debug_info);
}
+ /* GC Map */
+ if (cfg->gc_map) {
+ encode_value (cfg->gc_map_size, p, &p);
+ /* The GC map requires 4 bytes of alignment */
+ while ((gsize)p % 4)
+ p ++;
+ memcpy (p, cfg->gc_map, cfg->gc_map_size);
+ p += cfg->gc_map_size;
+ }
+
acfg->stats.ex_info_size += p - buf;
g_assert (p - buf < buf_size);
/* Emit info */
- cfg->ex_info_offset = add_to_blob (acfg, buf, p - buf);
+ /* The GC Map requires 4 byte alignment */
+ cfg->ex_info_offset = add_to_blob_aligned (acfg, buf, p - buf, cfg->gc_map ? 4 : 1);
g_free (buf);
}
gboolean no_special_static, cant_encode;
gpointer iter = NULL;
+ if (!klass) {
+ buf_size = 16;
+
+ p = buf = g_malloc (buf_size);
+
+ /* Mark as unusable */
+ encode_value (-1, p, &p);
+
+ res = add_to_blob (acfg, buf, p - buf);
+ g_free (buf);
+
+ return res;
+ }
+
buf_size = 10240 + (klass->vtable_size * 16);
p = buf = g_malloc (buf_size);
return res;
}
+static char*
+get_plt_entry_debug_sym (MonoAotCompile *acfg, MonoJumpInfo *ji, GHashTable *cache)
+{
+ char *debug_sym;
+
+ switch (ji->type) {
+ case MONO_PATCH_INFO_METHOD:
+ debug_sym = get_debug_sym (ji->data.method, "plt_", cache);
+ break;
+ case MONO_PATCH_INFO_INTERNAL_METHOD:
+ debug_sym = g_strdup_printf ("plt__jit_icall_%s", ji->data.name);
+ break;
+ case MONO_PATCH_INFO_CLASS_INIT:
+ debug_sym = g_strdup_printf ("plt__class_init_%s", mono_type_get_name (&ji->data.klass->byval_arg));
+ sanitize_symbol (debug_sym);
+ break;
+ case MONO_PATCH_INFO_RGCTX_FETCH:
+ debug_sym = g_strdup_printf ("plt__rgctx_fetch_%d", acfg->label_generator ++);
+ break;
+ case MONO_PATCH_INFO_ICALL_ADDR: {
+ char *s = get_debug_sym (ji->data.method, "", cache);
+
+ debug_sym = g_strdup_printf ("plt__icall_native_%s", s);
+ g_free (s);
+ break;
+ }
+ case MONO_PATCH_INFO_JIT_ICALL_ADDR:
+ debug_sym = g_strdup_printf ("plt__jit_icall_native_%s", ji->data.name);
+ break;
+ case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
+ debug_sym = g_strdup_printf ("plt__generic_class_init");
+ break;
+ default:
+ break;
+ }
+
+ return debug_sym;
+}
+
/*
* Calls made from AOTed code are routed through a table of jumps similar to the
- * ELF PLT (Program Linkage Table). The differences are the following:
- * - the ELF PLT entries make an indirect jump though the GOT so they expect the
- * GOT pointer to be in EBX. We want to avoid this, so our table contains direct
- * jumps. This means the jumps need to be patched when the address of the callee is
- * known. Initially the PLT entries jump to code which transfers control to the
- * AOT runtime through the first PLT entry.
+ * ELF PLT (Program Linkage Table). Initially the PLT entries jump to code which transfers
+ * control to the AOT runtime through a trampoline.
*/
static void
emit_plt (MonoAotCompile *acfg)
sprintf (symbol, "plt");
emit_section_change (acfg, ".text", 0);
- emit_global (acfg, symbol, TRUE);
- emit_alignment (acfg, 16);
+ emit_alignment (acfg, NACL_SIZE(16, kNaClAlignment));
emit_label (acfg, symbol);
emit_label (acfg, acfg->plt_symbol);
for (i = 0; i < acfg->plt_offset; ++i) {
- char label [128];
char *debug_sym = NULL;
MonoPltEntry *plt_entry = NULL;
MonoJumpInfo *ji;
- if (i == 0) {
+ if (i == 0)
/*
- * The first plt entry is used to transfer code to the AOT loader.
+ * The first plt entry is unused.
*/
- arch_emit_plt_entry (acfg, i);
continue;
- }
plt_entry = g_hash_table_lookup (acfg->plt_offset_to_entry, GUINT_TO_POINTER (i));
ji = plt_entry->ji;
- sprintf (label, "%s", plt_entry->symbol);
if (acfg->llvm) {
/*
*/
if (ji && is_direct_callable (acfg, NULL, ji) && !acfg->use_bin_writer) {
MonoCompile *callee_cfg = g_hash_table_lookup (acfg->method_to_cfg, ji->data.method);
- fprintf (acfg->fp, "\n.set %s, %s\n", label, callee_cfg->asm_symbol);
+
+ if (acfg->thumb_mixed && !callee_cfg->compile_llvm) {
+ /* LLVM calls the PLT entries using bl, so emit a stub */
+ emit_label (acfg, plt_entry->llvm_symbol);
+ fprintf (acfg->fp, ".thumb_func\n");
+ fprintf (acfg->fp, "bx pc\n");
+ fprintf (acfg->fp, "nop\n");
+ fprintf (acfg->fp, ".arm\n");
+ fprintf (acfg->fp, "b %s\n", callee_cfg->asm_symbol);
+ } else {
+ fprintf (acfg->fp, "\n.set %s, %s\n", plt_entry->llvm_symbol, callee_cfg->asm_symbol);
+ }
continue;
}
}
- emit_label (acfg, label);
+ if (acfg->aot_opts.write_symbols)
+ plt_entry->debug_sym = get_plt_entry_debug_sym (acfg, ji, cache);
+ debug_sym = plt_entry->debug_sym;
- if (acfg->aot_opts.write_symbols) {
- switch (ji->type) {
- case MONO_PATCH_INFO_METHOD:
- debug_sym = get_debug_sym (ji->data.method, "plt_", cache);
- break;
- case MONO_PATCH_INFO_INTERNAL_METHOD:
- debug_sym = g_strdup_printf ("plt__jit_icall_%s", ji->data.name);
- break;
- case MONO_PATCH_INFO_CLASS_INIT:
- debug_sym = g_strdup_printf ("plt__class_init_%s", mono_type_get_name (&ji->data.klass->byval_arg));
- sanitize_symbol (debug_sym);
- break;
- case MONO_PATCH_INFO_RGCTX_FETCH:
- debug_sym = g_strdup_printf ("plt__rgctx_fetch_%d", acfg->label_generator ++);
- break;
- case MONO_PATCH_INFO_ICALL_ADDR: {
- char *s = get_debug_sym (ji->data.method, "", cache);
-
- debug_sym = g_strdup_printf ("plt__icall_native_%s", s);
- g_free (s);
- break;
- }
- case MONO_PATCH_INFO_JIT_ICALL_ADDR:
- debug_sym = g_strdup_printf ("plt__jit_icall_native_%s", ji->data.name);
- break;
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
- debug_sym = g_strdup_printf ("plt__generic_class_init");
- break;
- default:
- break;
+ if (acfg->thumb_mixed && !plt_entry->jit_used)
+ /* Emit only a thumb version */
+ continue;
+
+ if (!acfg->thumb_mixed)
+ emit_label (acfg, plt_entry->llvm_symbol);
+
+ if (debug_sym) {
+ emit_local_symbol (acfg, debug_sym, NULL, TRUE);
+ emit_label (acfg, debug_sym);
+ }
+
+ emit_label (acfg, plt_entry->symbol);
+
+ arch_emit_plt_entry (acfg, i);
+
+ if (debug_sym)
+ emit_symbol_size (acfg, debug_sym, ".");
+ }
+
+ if (acfg->thumb_mixed) {
+ /*
+ * Emit a separate set of PLT entries using thumb2 which is called by LLVM generated
+ * code.
+ */
+ for (i = 0; i < acfg->plt_offset; ++i) {
+ char *debug_sym = NULL;
+ MonoPltEntry *plt_entry = NULL;
+ MonoJumpInfo *ji;
+
+ if (i == 0)
+ continue;
+
+ plt_entry = g_hash_table_lookup (acfg->plt_offset_to_entry, GUINT_TO_POINTER (i));
+ ji = plt_entry->ji;
+
+ if (ji && is_direct_callable (acfg, NULL, ji) && !acfg->use_bin_writer)
+ continue;
+
+ /* Skip plt entries not actually called by LLVM code */
+ if (!plt_entry->llvm_used)
+ continue;
+
+ if (acfg->aot_opts.write_symbols) {
+ if (plt_entry->debug_sym)
+ debug_sym = g_strdup_printf ("%s_thumb", plt_entry->debug_sym);
}
if (debug_sym) {
emit_local_symbol (acfg, debug_sym, NULL, TRUE);
emit_label (acfg, debug_sym);
}
- }
- arch_emit_plt_entry (acfg, i);
+ emit_label (acfg, plt_entry->llvm_symbol);
- if (debug_sym) {
- emit_symbol_size (acfg, debug_sym, ".");
- g_free (debug_sym);
+ arch_emit_llvm_plt_entry (acfg, i);
+
+ if (debug_sym) {
+ emit_symbol_size (acfg, debug_sym, ".");
+ g_free (debug_sym);
+ }
}
}
emit_symbol_size (acfg, acfg->plt_symbol, ".");
sprintf (symbol, "plt_end");
- emit_global (acfg, symbol, TRUE);
emit_label (acfg, symbol);
g_hash_table_destroy (cache);
ji = info->ji;
unwind_ops = info->unwind_ops;
+#ifdef __native_client_codegen__
+ mono_nacl_fix_patches (code, ji);
+#endif
+
/* Emit code */
sprintf (start_symbol, "%s", name);
emit_section_change (acfg, ".text", 0);
emit_global (acfg, start_symbol, TRUE);
- emit_alignment (acfg, 16);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
emit_label (acfg, start_symbol);
sprintf (symbol, "%snamed_%s", acfg->temp_prefix, name);
g_assert_not_reached ();
}
- emit_global (acfg, symbol, TRUE);
- emit_alignment (acfg, 16);
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
emit_label (acfg, symbol);
acfg->trampoline_got_offset_base [ntype] = tramp_got_offset;
default:
g_assert_not_reached ();
}
+#ifdef __native_client_codegen__
+ /* align to avoid 32-byte boundary crossings */
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#endif
if (!acfg->trampoline_size [ntype]) {
g_assert (tramp_size);
opts->print_skipped_methods = TRUE;
} else if (str_begins_with (arg, "stats")) {
opts->stats = TRUE;
+ } else if (str_begins_with (arg, "mtriple=")) {
+ opts->mtriple = g_strdup (arg + strlen ("mtriple="));
} else {
fprintf (stderr, "AOT : Unknown argument '%s'.\n", arg);
exit (1);
return TRUE;
}
-static void
-add_generic_class (MonoAotCompile *acfg, MonoClass *klass);
-
/*
* compile_method:
*
add_extra_method_with_depth (acfg, m, depth + 1);
}
}
- add_generic_class (acfg, m->klass);
+ add_generic_class_with_depth (acfg, m->klass, depth + 5);
}
if (m->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED && !strcmp (m->name, "ElementAddr"))
add_extra_method_with_depth (acfg, m, depth + 1);
MonoClass *klass = patch_info->data.klass;
if (klass->generic_class && !mono_generic_context_is_sharable (&klass->generic_class->context, FALSE))
- add_generic_class (acfg, klass);
+ add_generic_class_with_depth (acfg, klass, depth + 5);
break;
}
default:
switch (patch_info->type) {
case MONO_PATCH_INFO_GOT_OFFSET:
case MONO_PATCH_INFO_NONE:
+ case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR:
break;
case MONO_PATCH_INFO_IMAGE:
/* The assembly is stored in GOT slot 0 */
char*
mono_aot_get_method_name (MonoCompile *cfg)
-{
- guint32 method_index = get_method_index (llvm_acfg, cfg->orig_method);
-
- return g_strdup_printf ("m_%x", method_index);
-}
-
-char*
-mono_aot_get_method_debug_name (MonoCompile *cfg)
{
return get_debug_sym (cfg->orig_method, "", llvm_acfg->method_label_hash);
}
return NULL;
plt_entry = get_plt_entry (llvm_acfg, ji);
+ plt_entry->llvm_used = TRUE;
- return g_strdup_printf (plt_entry->symbol);
+ return g_strdup_printf (plt_entry->llvm_symbol);
}
MonoJumpInfo*
char *command, *opts;
int i;
MonoJumpInfo *patch_info;
- const char *llc_extra_args;
/*
* When using LLVM, we let llvm emit the got since the LLVM IL needs to refer
* a lot of time, and doesn't seem to save much space.
* The following optimizations cannot be enabled:
* - 'tailcallelim'
+ * - 'jump-threading' changes our blockaddress references to int constants.
* The opt list below was produced by taking the output of:
* llvm-as < /dev/null | opt -O2 -disable-output -debug-pass=Arguments
* then removing tailcallelim + the global opts, and adding a second gvn.
*/
opts = g_strdup ("-instcombine -simplifycfg");
- opts = g_strdup ("-simplifycfg -domtree -domfrontier -scalarrepl -instcombine -simplifycfg -basiccg -prune-eh -inline -functionattrs -domtree -domfrontier -scalarrepl -simplify-libcalls -instcombine -jump-threading -simplifycfg -instcombine -simplifycfg -reassociate -domtree -loops -loopsimplify -domfrontier -loopsimplify -lcssa -loop-rotate -licm -lcssa -loop-unswitch -instcombine -scalar-evolution -loopsimplify -lcssa -iv-users -indvars -loop-deletion -loopsimplify -lcssa -loop-unroll -instcombine -memdep -gvn -memdep -memcpyopt -sccp -instcombine -jump-threading -domtree -memdep -dse -adce -gvn -simplifycfg -preverify -domtree -verify");
+ opts = g_strdup ("-simplifycfg -domtree -domfrontier -scalarrepl -instcombine -simplifycfg -basiccg -prune-eh -inline -functionattrs -domtree -domfrontier -scalarrepl -simplify-libcalls -instcombine -simplifycfg -instcombine -simplifycfg -reassociate -domtree -loops -loopsimplify -domfrontier -loopsimplify -lcssa -loop-rotate -licm -lcssa -loop-unswitch -instcombine -scalar-evolution -loopsimplify -lcssa -iv-users -indvars -loop-deletion -loopsimplify -lcssa -loop-unroll -instcombine -memdep -gvn -memdep -memcpyopt -sccp -instcombine -domtree -memdep -dse -adce -gvn -simplifycfg -preverify -domtree -verify");
#if 1
command = g_strdup_printf ("opt -f %s -o temp.opt.bc temp.bc", opts);
printf ("Executing opt: %s\n", command);
#endif
g_free (opts);
-#if !LLVM_CHECK_VERSION(2, 8)
- /* LLVM 2.8 removed the -f flag ??? */
- llc_extra_args = "-f";
-#else
- llc_extra_args = "";
-#endif
- command = g_strdup_printf ("llc %s %s -relocation-model=pic -unwind-tables -o %s temp.opt.bc", LLC_TARGET_ARGS, llc_extra_args, acfg->tmpfname);
+ if (!acfg->llc_args)
+ acfg->llc_args = g_string_new ("");
+
+ /* Verbose asm slows down llc greatly */
+ g_string_append (acfg->llc_args, " -asm-verbose=false");
+
+ if (acfg->aot_opts.mtriple)
+ g_string_append_printf (acfg->llc_args, " -mtriple=%s", acfg->aot_opts.mtriple);
+
+ unlink (acfg->tmpfname);
+
+ command = g_strdup_printf ("llc %s -relocation-model=pic -unwind-tables -disable-gnu-eh-frame -enable-mono-eh-frame -o %s temp.opt.bc", acfg->llc_args->str, acfg->tmpfname);
printf ("Executing llc: %s\n", command);
*/
sprintf (symbol, "methods");
emit_section_change (acfg, ".text", 0);
- emit_global (acfg, symbol, TRUE);
emit_alignment (acfg, 8);
if (acfg->llvm) {
for (i = 0; i < acfg->nmethods; ++i) {
* Emit some padding so the local symbol for the first method doesn't have the
* same address as 'methods'.
*/
+#if defined(__default_codegen__)
emit_zero_bytes (acfg, 16);
+#elif defined(__native_client_codegen__)
+ {
+ const int kPaddingSize = 16;
+ guint8 pad_buffer[kPaddingSize];
+ mono_arch_nacl_pad (pad_buffer, kPaddingSize);
+ emit_bytes (acfg, pad_buffer, kPaddingSize);
+ }
+#endif
+
for (l = acfg->method_order; l != NULL; l = l->next) {
MonoCompile *cfg;
}
emit_section_change (acfg, ".text", 0);
+#ifdef __native_client_codegen__
+ emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
+#endif
emit_global (acfg, symbol, TRUE);
emit_label (acfg, symbol);
sprintf (call_target, "%s", cfg->asm_symbol);
- arch_emit_unbox_trampoline (acfg, cfg->orig_method, cfg->generic_sharing_context, call_target);
+ arch_emit_unbox_trampoline (acfg, cfg->orig_method, call_target);
}
if (cfg->compile_llvm)
sprintf (symbol, "methods_end");
emit_section_change (acfg, ".text", 0);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
sprintf (symbol, "code_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
sprintf (symbol, "method_info_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
{
MonoMethodSignature *sig;
MonoClass *klass;
- int i;
+ int i, hindex;
int hashes_count;
guint32 *hashes_start, *hashes;
guint32 a, b, c;
+ MonoGenericInst *ginst = NULL;
/* Similar to the hash in mono_method_get_imt_slot () */
sig = mono_method_signature (method);
- hashes_count = sig->param_count + 5;
- hashes_start = malloc (hashes_count * sizeof (guint32));
+ if (method->is_inflated)
+ ginst = ((MonoMethodInflated*)method)->context.method_inst;
+
+ hashes_count = sig->param_count + 5 + (ginst ? ginst->type_argc : 0);
+ hashes_start = g_malloc0 (hashes_count * sizeof (guint32));
hashes = hashes_start;
/* Some wrappers are assigned to random classes */
hashes [2] = mono_metadata_str_hash (method->name);
hashes [3] = method->wrapper_type;
hashes [4] = mono_aot_type_hash (sig->ret);
+ hindex = 5;
for (i = 0; i < sig->param_count; i++) {
- hashes [5 + i] = mono_aot_type_hash (sig->params [i]);
+ hashes [hindex ++] = mono_aot_type_hash (sig->params [i]);
}
-
+ if (ginst) {
+ for (i = 0; i < ginst->type_argc; ++i)
+ hashes [hindex ++] = mono_aot_type_hash (ginst->type_argv [i]);
+ }
+ g_assert (hindex == hashes_count);
+
/* Setup internal state */
a = b = c = 0xdeadbeef + (((guint32)hashes_count)<<2);
if (!cfg)
continue;
- buf_size = 512;
+ buf_size = 10240;
p = buf = g_malloc (buf_size);
nmethods ++;
name = NULL;
if (method->wrapper_type) {
+ gboolean encode_ref = FALSE;
+
/*
* We encode some wrappers using their name, since encoding them
- * directly would be difficult. This also avoids creating the wrapper
- * methods at runtime, since they are not needed anyway.
+ * directly would be difficult. This works because at runtime, we only need to
+ * check whenever a method ref matches an existing MonoMethod. The downside is
+ * that the method names are large, so we use the binary encoding if possible.
*/
switch (method->wrapper_type) {
case MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK:
case MONO_WRAPPER_SYNCHRONIZED:
- /* encode_method_ref () can handle these */
+ encode_ref = TRUE;
+ break;
+ case MONO_WRAPPER_MANAGED_TO_NATIVE:
+ /* Skip JIT icall wrappers */
+ if (!strstr (method->name, "__icall_wrapper"))
+ encode_ref = TRUE;
break;
+ case MONO_WRAPPER_UNKNOWN:
+ if (!strcmp (method->name, "PtrToStructure") || !strcmp (method->name, "StructureToPtr"))
+ encode_ref = TRUE;
+ break;
case MONO_WRAPPER_RUNTIME_INVOKE:
if (mono_marshal_method_from_wrapper (method) != method && !strstr (method->name, "virtual"))
/* Direct wrapper, encode normally */
- break;
- /* Fall through */
+ encode_ref = TRUE;
+ break;
default:
- name = mono_aot_wrapper_name (method);
break;
}
+
+ if (!encode_ref)
+ name = mono_aot_wrapper_name (method);
}
if (name) {
/* Emit the table */
sprintf (symbol, "extra_method_table");
emit_section_change (acfg, RODATA_SECT, 0);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
*/
sprintf (symbol, "extra_method_info_offsets");
emit_section_change (acfg, RODATA_SECT, 0);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
sprintf (symbol, "ex_info_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
emit_section_change (acfg, RODATA_SECT, 1);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
- emit_global (acfg, symbol, FALSE);
for (i = 0; i < acfg->unwind_ops->len; ++i) {
guint32 index = GPOINTER_TO_UINT (g_ptr_array_index (acfg->unwind_ops, i));
sprintf (symbol, "class_info_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
for (i = 0; i < acfg->image->tables [MONO_TABLE_TYPEDEF].rows; ++i) {
token = MONO_TOKEN_TYPE_DEF | (i + 1);
klass = mono_class_get (acfg->image, token);
+ if (!klass)
+ continue;
full_name = mono_type_get_name_full (mono_class_get_type (klass), MONO_TYPE_NAME_FORMAT_FULL_NAME);
hash = mono_metadata_str_hash (full_name) % table_size;
g_free (full_name);
/* Emit the table */
sprintf (symbol, "class_name_table");
emit_section_change (acfg, RODATA_SECT, 0);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
* So we emit it at once, and reference its elements by an index.
*/
- sprintf (symbol, "mono_image_table");
+ sprintf (symbol, "image_table");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
/* Emit got_info_offsets table */
sprintf (symbol, "got_info_offsets");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
sprintf (symbol, "got_end");
emit_label (acfg, symbol);
}
-
- sprintf (symbol, "mono_aot_got_addr");
- emit_section_change (acfg, ".data", 0);
- emit_global (acfg, symbol, FALSE);
- emit_alignment (acfg, 8);
- emit_label (acfg, symbol);
- emit_pointer (acfg, acfg->got_symbol);
}
typedef struct GlobalsTableEntry {
{
char *build_info;
- emit_string_symbol (acfg, "mono_assembly_guid" , acfg->image->guid);
-
- emit_string_symbol (acfg, "mono_aot_version", MONO_AOT_FILE_VERSION);
+ emit_local_string_symbol (acfg, "assembly_guid" , acfg->image->guid);
if (acfg->aot_opts.bind_to_runtime_version) {
build_info = mono_get_runtime_build_info ();
- emit_string_symbol (acfg, "mono_runtime_version", build_info);
+ emit_local_string_symbol (acfg, "runtime_version", build_info);
g_free (build_info);
} else {
- emit_string_symbol (acfg, "mono_runtime_version", "");
+ emit_local_string_symbol (acfg, "runtime_version", "");
}
/*
* Emit a global symbol which can be passed by an embedding app to
* mono_aot_register_module ().
*/
-#if defined(__MACH__)
+#if defined(__MACH__) && !defined(__native_client_codegen__)
sprintf (symbol, "_mono_aot_module_%s_info", acfg->image->assembly->aname.name);
#else
sprintf (symbol, "mono_aot_module_%s_info", acfg->image->assembly->aname.name);
sprintf (symbol, "mem_end");
emit_section_change (acfg, ".text", 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
}
{
char symbol [128];
int i;
+ int gc_name_offset;
+ const char *gc_name;
+
+ /*
+ * The managed allocators are GC specific, so can't use an AOT image created by one GC
+ * in another.
+ */
+ gc_name = mono_gc_get_gc_name ();
+ gc_name_offset = add_to_blob (acfg, (guint8*)gc_name, strlen (gc_name) + 1);
sprintf (symbol, "mono_aot_file_info");
emit_section_change (acfg, ".data", 0);
emit_label (acfg, symbol);
emit_global (acfg, symbol, FALSE);
- /* The data emitted here must match MonoAotFileInfo in aot-runtime.c. */
+ /* The data emitted here must match MonoAotFileInfo. */
+
+ emit_int32 (acfg, MONO_AOT_FILE_VERSION);
+ emit_int32 (acfg, 0);
+
+ /*
+ * We emit pointers to our data structures instead of emitting global symbols which
+ * point to them, to reduce the number of globals, and because using globals leads to
+ * various problems (i.e. arm/thumb).
+ */
+ emit_pointer (acfg, acfg->got_symbol);
+ emit_pointer (acfg, "methods");
+ if (acfg->llvm) {
+ /*
+ * Emit a reference to the mono_eh_frame table created by our modified LLVM compiler.
+ */
+ emit_pointer (acfg, "mono_eh_frame");
+ } else {
+ emit_pointer (acfg, NULL);
+ }
+ emit_pointer (acfg, "blob");
+ emit_pointer (acfg, "class_name_table");
+ emit_pointer (acfg, "class_info_offsets");
+ emit_pointer (acfg, "method_info_offsets");
+ emit_pointer (acfg, "ex_info_offsets");
+ emit_pointer (acfg, "code_offsets");
+ emit_pointer (acfg, "extra_method_info_offsets");
+ emit_pointer (acfg, "extra_method_table");
+ emit_pointer (acfg, "got_info_offsets");
+ emit_pointer (acfg, "methods_end");
+ emit_pointer (acfg, "unwind_info");
+ emit_pointer (acfg, "mem_end");
+ emit_pointer (acfg, "image_table");
+ emit_pointer (acfg, "plt");
+ emit_pointer (acfg, "plt_end");
+ emit_pointer (acfg, "assembly_guid");
+ emit_pointer (acfg, "runtime_version");
+ if (acfg->num_trampoline_got_entries) {
+ emit_pointer (acfg, "specific_trampolines");
+ emit_pointer (acfg, "static_rgctx_trampolines");
+ emit_pointer (acfg, "imt_thunks");
+ } else {
+ emit_pointer (acfg, NULL);
+ emit_pointer (acfg, NULL);
+ emit_pointer (acfg, NULL);
+ }
+ if (acfg->thumb_mixed) {
+ emit_pointer (acfg, "thumb_end");
+ } else {
+ emit_pointer (acfg, NULL);
+ }
+
emit_int32 (acfg, acfg->plt_got_offset_base);
emit_int32 (acfg, (int)(acfg->got_offset * sizeof (gpointer)));
emit_int32 (acfg, acfg->plt_offset);
emit_int32 (acfg, acfg->nmethods);
emit_int32 (acfg, acfg->flags);
emit_int32 (acfg, acfg->opts);
+ emit_int32 (acfg, gc_name_offset);
for (i = 0; i < MONO_AOT_TRAMP_NUM; ++i)
emit_int32 (acfg, acfg->num_trampolines [i]);
sprintf (symbol, "blob");
emit_section_change (acfg, RODATA_SECT, 1);
- emit_global (acfg, symbol, FALSE);
emit_alignment (acfg, 8);
emit_label (acfg, symbol);
#define LD_OPTIONS "-m elf64ppc"
#elif defined(sparc) && SIZEOF_VOID_P == 8
#define AS_OPTIONS "-xarch=v9"
+#elif defined(TARGET_X86) && defined(__APPLE__) && !defined(__native_client_codegen__)
+#define AS_OPTIONS "-arch i386 -W"
#else
#define AS_OPTIONS ""
#endif
+#ifdef __native_client_codegen__
+#if defined(TARGET_AMD64)
+#define AS_NAME "nacl64-as"
+#else
+#define AS_NAME "nacl-as"
+#endif
+#else
+#define AS_NAME "as"
+#endif
+
#ifndef LD_OPTIONS
#define LD_OPTIONS ""
#endif
-#ifdef ENABLE_LLVM
-#define EH_LD_OPTIONS "--eh-frame-hdr"
-#else
#define EH_LD_OPTIONS ""
-#endif
if (acfg->aot_opts.asm_only) {
printf ("Output file: '%s'.\n", acfg->tmpfname);
} else {
objfile = g_strdup_printf ("%s.o", acfg->tmpfname);
}
- command = g_strdup_printf ("%sas %s %s -o %s", tool_prefix, AS_OPTIONS, acfg->tmpfname, objfile);
+ command = g_strdup_printf ("%s%s %s %s -o %s %s", tool_prefix, AS_NAME, AS_OPTIONS, acfg->as_args ? acfg->as_args->str : "", objfile, acfg->tmpfname);
printf ("Executing the native assembler: %s\n", command);
if (system (command) != 0) {
g_free (command);
command = g_strdup_printf ("gcc -dynamiclib -o %s %s.o", tmp_outfile_name, acfg->tmpfname);
#elif defined(HOST_WIN32)
command = g_strdup_printf ("gcc -shared --dll -mno-cygwin -o %s %s.o", tmp_outfile_name, acfg->tmpfname);
+#elif defined(TARGET_X86) && defined(__APPLE__) && !defined(__native_client_codegen__)
+ command = g_strdup_printf ("gcc -m32 -dynamiclib -o %s %s.o", tmp_outfile_name, acfg->tmpfname);
#else
command = g_strdup_printf ("%sld %s %s -shared -o %s %s.o", tool_prefix, EH_LD_OPTIONS, LD_OPTIONS, tmp_outfile_name, acfg->tmpfname);
#endif
fprintf (stderr, "The soft-debug AOT option requires the --debug option.\n");
return 1;
}
+ acfg->flags |= MONO_AOT_FILE_FLAG_DEBUG;
}
-#ifdef ENABLE_LLVM
- acfg->llvm = TRUE;
- acfg->aot_opts.asm_writer = TRUE;
- acfg->flags |= MONO_AOT_FILE_FLAG_WITH_LLVM;
-#endif
+ if (mono_use_llvm) {
+ acfg->llvm = TRUE;
+ acfg->aot_opts.asm_writer = TRUE;
+ acfg->flags |= MONO_AOT_FILE_FLAG_WITH_LLVM;
+ }
if (acfg->aot_opts.full_aot)
acfg->flags |= MONO_AOT_FILE_FLAG_FULL_AOT;
#endif
acfg->num_trampolines [MONO_AOT_TRAMP_IMT_THUNK] = acfg->aot_opts.full_aot ? acfg->aot_opts.nimt_trampolines : 0;
- acfg->got_symbol_base = g_strdup_printf ("mono_aot_%s_got", acfg->image->assembly->aname.name);
- acfg->plt_symbol = g_strdup_printf ("mono_aot_%s_plt", acfg->image->assembly->aname.name);
+ acfg->temp_prefix = img_writer_get_temp_label_prefix (NULL);
+
+ arch_init (acfg);
+
+ acfg->got_symbol_base = g_strdup_printf ("%smono_aot_%s_got", acfg->llvm_label_prefix, acfg->image->assembly->aname.name);
+ acfg->plt_symbol = g_strdup_printf ("%smono_aot_%s_plt", acfg->llvm_label_prefix, acfg->image->assembly->aname.name);
/* Get rid of characters which cannot occur in symbols */
for (p = acfg->got_symbol_base; *p; ++p) {
*p = '_';
}
- acfg->temp_prefix = img_writer_get_temp_label_prefix (NULL);
-
- /*
- * The prefix LLVM likes to put in front of symbol names on darwin.
- * The mach-os specs require this for globals, but LLVM puts them in front of all
- * symbols. We need to handle this, since we need to refer to LLVM generated
- * symbols.
- */
- acfg->llvm_label_prefix = "";
- if (acfg->llvm)
- acfg->llvm_label_prefix = LLVM_LABEL_PREFIX;
-
acfg->method_index = 1;
collect_methods (acfg);
acfg->plt_offset = 1;
#ifdef ENABLE_LLVM
- llvm_acfg = acfg;
- mono_llvm_create_aot_module (acfg->got_symbol_base);
+ if (acfg->llvm) {
+ llvm_acfg = acfg;
+ mono_llvm_create_aot_module (acfg->got_symbol_base);
+ }
#endif
/* GOT offset 0 is reserved for the address of the current assembly */
ji = mono_mempool_alloc0 (acfg->mempool, sizeof (MonoAotCompile));
ji->type = MONO_PATCH_INFO_MSCORLIB_GOT_ADDR;
get_got_offset (acfg, ji);
+
+ /* This is very common */
+ ji = mono_mempool_alloc0 (acfg->mempool, sizeof (MonoAotCompile));
+ ji->type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR;
+ get_got_offset (acfg, ji);
}
TV_GETTIME (atv);
} else {
acfg->tmpfname = g_strdup ("temp.s");
}
- }
- emit_llvm_file (acfg);
+ emit_llvm_file (acfg);
+ }
#endif
if (!acfg->aot_opts.asm_only && !acfg->aot_opts.asm_writer && bin_writer_supported ()) {
MonoCompile *cfg = acfg->cfgs [i];
int method_index = get_method_index (acfg, cfg->orig_method);
- cfg->asm_symbol = g_strdup_printf ("%s%sm_%x", acfg->temp_prefix, LLVM_LABEL_PREFIX, method_index);
+ if (COMPILE_LLVM (cfg))
+ cfg->asm_symbol = g_strdup_printf ("%s%s", acfg->llvm_label_prefix, cfg->llvm_method_name);
+ else
+ cfg->asm_symbol = g_strdup_printf ("%s%sm_%x", acfg->temp_prefix, acfg->llvm_label_prefix, method_index);
}
}
if (acfg->dwarf)
mono_dwarf_writer_emit_base_info (acfg->dwarf, mono_unwind_get_cie_program ());
+ if (acfg->thumb_mixed) {
+ char symbol [256];
+ /*
+ * This global symbol marks the end of THUMB code, and the beginning of ARM
+ * code generated by our JIT.
+ */
+ sprintf (symbol, "thumb_end");
+ emit_section_change (acfg, ".text", 0);
+ emit_label (acfg, symbol);
+ fprintf (acfg->fp, ".skip 16\n");
+
+ fprintf (acfg->fp, ".arm\n");
+ }
+
emit_code (acfg);
emit_info (acfg);