MonoAotFileFlags flags;
MonoDynamicStream blob;
gboolean blob_closed;
- MonoClass **typespec_classes;
+ GHashTable *typespec_classes;
GString *llc_args;
GString *as_args;
char *assembly_name_sym;
/* Wrappers around the image writer functions */
+#define MAX_SYMBOL_SIZE 256
+
+static inline const char *
+mangle_symbol (const char * symbol, char * mangled_symbol, gsize length)
+{
+ gsize needed_size = length;
+
+ g_assert (NULL != symbol);
+ g_assert (NULL != mangled_symbol);
+ g_assert (0 != length);
+
+#if defined(TARGET_WIN32) && defined(TARGET_X86)
+ if (symbol && '_' != symbol [0]) {
+ needed_size = g_snprintf (mangled_symbol, length, "_%s", symbol);
+ } else {
+ needed_size = g_snprintf (mangled_symbol, length, "%s", symbol);
+ }
+#else
+ needed_size = g_snprintf (mangled_symbol, length, "%s", symbol);
+#endif
+
+ g_assert (0 <= needed_size && needed_size < length);
+ return mangled_symbol;
+}
+
+static inline char *
+mangle_symbol_alloc (const char * symbol)
+{
+ g_assert (NULL != symbol);
+
+#if defined(TARGET_WIN32) && defined(TARGET_X86)
+ if (symbol && '_' != symbol [0]) {
+ return g_strdup_printf ("_%s", symbol);
+ }
+ else {
+ return g_strdup_printf ("%s", symbol);
+ }
+#else
+ return g_strdup_printf ("%s", symbol);
+#endif
+}
+
static inline void
emit_section_change (MonoAotCompile *acfg, const char *section_name, int subsection_index)
{
mono_img_writer_emit_section_change (acfg->w, section_name, subsection_index);
}
+#if defined(TARGET_WIN32) && defined(TARGET_X86)
+
+static inline void
+emit_local_symbol (MonoAotCompile *acfg, const char *name, const char *end_label, gboolean func)
+{
+ const char * mangled_symbol_name = name;
+ char * mangled_symbol_name_alloc = NULL;
+
+ if (TRUE == func) {
+ mangled_symbol_name_alloc = mangle_symbol_alloc (name);
+ mangled_symbol_name = mangled_symbol_name_alloc;
+ }
+
+ if (name != mangled_symbol_name && 0 != g_strcasecmp (name, mangled_symbol_name)) {
+ mono_img_writer_emit_label (acfg->w, mangled_symbol_name);
+ }
+ mono_img_writer_emit_local_symbol (acfg->w, mangled_symbol_name, end_label, func);
+
+ if (NULL != mangled_symbol_name_alloc) {
+ g_free (mangled_symbol_name_alloc);
+ }
+}
+
+#else
+
static inline void
emit_local_symbol (MonoAotCompile *acfg, const char *name, const char *end_label, gboolean func)
{
- mono_img_writer_emit_local_symbol (acfg->w, name, end_label, func);
+ mono_img_writer_emit_local_symbol (acfg->w, name, end_label, func);
}
+#endif
+
static inline void
emit_label (MonoAotCompile *acfg, const char *name)
{
mono_img_writer_emit_byte (acfg->w, val);
}
-#ifdef __native_client_codegen__
-static inline void
-emit_nacl_call_alignment (MonoAotCompile *acfg)
+#if defined(TARGET_WIN32) && defined(TARGET_X86)
+
+static G_GNUC_UNUSED void
+emit_global_inner (MonoAotCompile *acfg, const char *name, gboolean func)
{
- mono_img_writer_emit_nacl_call_alignment (acfg->w);
+ const char * mangled_symbol_name = name;
+ char * mangled_symbol_name_alloc = NULL;
+
+ mangled_symbol_name_alloc = mangle_symbol_alloc (name);
+ mangled_symbol_name = mangled_symbol_name_alloc;
+
+ if (0 != g_strcasecmp (name, mangled_symbol_name)) {
+ mono_img_writer_emit_label (acfg->w, mangled_symbol_name);
+ }
+ mono_img_writer_emit_global (acfg->w, mangled_symbol_name, func);
+
+ if (NULL != mangled_symbol_name_alloc) {
+ g_free (mangled_symbol_name_alloc);
+ }
}
-#endif
+
+#else
static G_GNUC_UNUSED void
emit_global_inner (MonoAotCompile *acfg, const char *name, gboolean func)
mono_img_writer_emit_global (acfg->w, name, func);
}
+#endif
+
static void
emit_global (MonoAotCompile *acfg, const char *name, gboolean func)
{
g_ptr_array_add (acfg->globals, g_strdup (name));
mono_img_writer_emit_local_symbol (acfg->w, name, NULL, func);
} else {
- mono_img_writer_emit_global (acfg->w, name, func);
+ emit_global_inner (acfg, name, func);
}
}
static void
emit_info_symbol (MonoAotCompile *acfg, const char *name)
{
- char symbol [256];
+ char symbol [MAX_SYMBOL_SIZE];
if (acfg->llvm) {
emit_label (acfg, name);
#else
#define AOT_FUNC_ALIGNMENT 16
#endif
-#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && defined(__native_client_codegen__)
-#undef AOT_FUNC_ALIGNMENT
-#define AOT_FUNC_ALIGNMENT 32
-#endif
#if defined(TARGET_POWERPC64) && !defined(__mono_ilp32__)
#define PPC_LD_OP "ld"
arch_emit_objc_selector_ref (MonoAotCompile *acfg, guint8 *code, int index, int *code_size)
{
#if defined(TARGET_ARM)
- char symbol1 [256];
- char symbol2 [256];
+ char symbol1 [MAX_SYMBOL_SIZE];
+ char symbol2 [MAX_SYMBOL_SIZE];
int lindex = acfg->objc_selector_index_2 ++;
/* Emit ldr.imm/b */
arch_emit_plt_entry (MonoAotCompile *acfg, const char *got_symbol, int offset, int info_offset)
{
#if defined(TARGET_X86)
-#if defined(__default_codegen__)
/* jmp *<offset>(%ebx) */
emit_byte (acfg, 0xff);
emit_byte (acfg, 0xa3);
emit_int32 (acfg, offset);
/* Used by mono_aot_get_plt_info_offset */
emit_int32 (acfg, info_offset);
-#elif defined(__native_client_codegen__)
- const guint8 kSizeOfNaClJmp = 11;
- guint8 bytes[kSizeOfNaClJmp];
- guint8 *pbytes = &bytes[0];
-
- x86_jump_membase32 (pbytes, X86_EBX, offset);
- emit_bytes (acfg, bytes, kSizeOfNaClJmp);
- /* four bytes of data, used by mono_arch_patch_plt_entry */
- /* For Native Client, make this work with data embedded in push. */
- emit_byte (acfg, 0x68); /* hide data in a push */
- emit_int32 (acfg, info_offset);
- emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
-#endif /*__native_client_codegen__*/
#elif defined(TARGET_AMD64)
-#if defined(__default_codegen__)
emit_unset_mode (acfg);
fprintf (acfg->fp, "jmp *%s+%d(%%rip)\n", got_symbol, offset);
/* Used by mono_aot_get_plt_info_offset */
emit_int32 (acfg, info_offset);
acfg->stats.plt_size += 10;
-#elif defined(__native_client_codegen__)
- guint8 buf [256];
- guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
- guint8 *code = buf_aligned;
-
- /* mov <OFFSET>(%rip), %r11d */
- emit_byte (acfg, '\x45');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x1d');
- emit_symbol_diff (acfg, got_symbol, ".", offset - 4);
-
- amd64_jump_reg (code, AMD64_R11);
- /* This should be constant for the plt patch */
- g_assert ((size_t)(code-buf_aligned) == 10);
- emit_bytes (acfg, buf_aligned, code - buf_aligned);
-
- /* Hide data in a push imm32 so it passes validation */
- emit_byte (acfg, 0x68); /* push */
- emit_int32 (acfg, info_offset);
- emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
-#endif /*__native_client_codegen__*/
#elif defined(TARGET_ARM)
guint8 buf [256];
guint8 *code;
* - all the trampolines should be of the same length.
*/
#if defined(TARGET_AMD64)
-#if defined(__default_codegen__)
/* This should be exactly 8 bytes long */
*tramp_size = 8;
/* call *<offset>(%rip) */
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
emit_zero_bytes (acfg, 1);
}
-#elif defined(__native_client_codegen__)
- guint8 buf [256];
- guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
- guint8 *code = buf_aligned;
- guint8 *call_start;
- size_t call_len;
- int got_offset;
-
- /* Emit this call in 'code' so we can find out how long it is. */
- amd64_call_reg (code, AMD64_R11);
- call_start = mono_arch_nacl_skip_nops (buf_aligned);
- call_len = code - call_start;
-
- /* The tramp_size is twice the NaCl alignment because it starts with */
- /* a call which needs to be aligned to the end of the boundary. */
- *tramp_size = kNaClAlignment*2;
- {
- /* Emit nops to align call site below which is 7 bytes plus */
- /* the length of the call sequence emitted above. */
- /* Note: this requires the specific trampoline starts on a */
- /* kNaclAlignedment aligned address, which it does because */
- /* it's its own function that is aligned. */
- guint8 nop_buf[256];
- guint8 *nopbuf_aligned = ALIGN_TO (nop_buf, kNaClAlignment);
- guint8 *nopbuf_end = mono_arch_nacl_pad (nopbuf_aligned, kNaClAlignment - 7 - (call_len));
- emit_bytes (acfg, nopbuf_aligned, nopbuf_end - nopbuf_aligned);
- }
- /* The trampoline is stored at the offset'th pointer, the -4 is */
- /* present because RIP relative addressing starts at the end of */
- /* the current instruction, while the label "." is relative to */
- /* the beginning of the current asm location, which in this case */
- /* is not the mov instruction, but the offset itself, due to the */
- /* way the bytes and ints are emitted here. */
- got_offset = (offset * sizeof(gpointer)) - 4;
-
- /* mov <OFFSET>(%rip), %r11d */
- emit_byte (acfg, '\x45');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x1d');
- emit_symbol_diff (acfg, acfg->got_symbol, ".", got_offset);
-
- /* naclcall %r11 */
- emit_bytes (acfg, call_start, call_len);
-
- /* The arg is stored at the offset+1 pointer, relative to beginning */
- /* of trampoline: 7 for mov, plus the call length, and 1 for push. */
- got_offset = ((offset + 1) * sizeof(gpointer)) + 7 + call_len + 1;
-
- /* We can't emit this data directly, hide in a "push imm32" */
- emit_byte (acfg, '\x68'); /* push */
- emit_symbol_diff (acfg, acfg->got_symbol, ".", got_offset);
- emit_alignment (acfg, kNaClAlignment);
-#endif /*__native_client_codegen__*/
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
/* Branch to generic trampoline */
x86_jump_reg (code, X86_ECX);
-#ifdef __native_client_codegen__
- {
- /* emit nops to next 32 byte alignment */
- int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
- while (code < (buf + a)) x86_nop(code);
- }
-#endif
emit_bytes (acfg, buf, code - buf);
- *tramp_size = NACL_SIZE(17, kNaClAlignment);
+ *tramp_size = 17;
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
arch_emit_static_rgctx_trampoline (MonoAotCompile *acfg, int offset, int *tramp_size)
{
#if defined(TARGET_AMD64)
-#if defined(__default_codegen__)
/* This should be exactly 13 bytes long */
*tramp_size = 13;
emit_byte (acfg, '\x25');
emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4);
}
-#elif defined(__native_client_codegen__)
- guint8 buf [128];
- guint8 *buf_aligned = ALIGN_TO(buf, kNaClAlignment);
- guint8 *code = buf_aligned;
-
- /* mov <OFFSET>(%rip), %r10d */
- emit_byte (acfg, '\x45');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x15');
- emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4);
-
- /* mov <OFFSET>(%rip), %r11d */
- emit_byte (acfg, '\x45');
- emit_byte (acfg, '\x8b');
- emit_byte (acfg, '\x1d');
- emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4);
-
- /* nacljmp *%r11 */
- amd64_jump_reg (code, AMD64_R11);
- emit_bytes (acfg, buf_aligned, code - buf_aligned);
-
- emit_alignment (acfg, kNaClAlignment);
- *tramp_size = kNaClAlignment;
-#endif /*__native_client_codegen__*/
-
#elif defined(TARGET_ARM)
guint8 buf [128];
guint8 *code;
/* Branch to the target address */
x86_jump_membase (code, X86_ECX, (offset + 1) * sizeof (gpointer));
-#ifdef __native_client_codegen__
- {
- /* emit nops to next 32 byte alignment */
- int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
- while (code < (buf + a)) x86_nop(code);
- }
-#endif
-
emit_bytes (acfg, buf, code - buf);
- *tramp_size = NACL_SIZE (15, kNaClAlignment);
+ *tramp_size = 15;
g_assert (code - buf == *tramp_size);
#else
g_assert_not_reached ();
{
#if defined(TARGET_AMD64)
guint8 *buf, *code;
-#if defined(__native_client_codegen__)
- guint8 *buf_alloc;
-#endif
guint8 *labels [16];
guint8 mov_buf[3];
guint8 *mov_buf_ptr = mov_buf;
const int kSizeOfMove = 7;
-#if defined(__default_codegen__)
+
code = buf = (guint8 *)g_malloc (256);
-#elif defined(__native_client_codegen__)
- buf_alloc = g_malloc (256 + kNaClAlignment + kSizeOfMove);
- buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
- /* The RIP relative move below is emitted first */
- buf += kSizeOfMove;
- code = buf;
-#endif
/* FIXME: Optimize this, i.e. use binary search etc. */
/* Maybe move the body into a separate function (slower, but much smaller) */
emit_bytes (acfg, buf, code - buf);
*tramp_size = code - buf + kSizeOfMove;
-#if defined(__native_client_codegen__)
- /* The tramp will be padded to the next kNaClAlignment bundle. */
- *tramp_size = ALIGN_TO ((*tramp_size), kNaClAlignment);
-#endif
-#if defined(__default_codegen__)
g_free (buf);
-#elif defined(__native_client_codegen__)
- g_free (buf_alloc);
-#endif
#elif defined(TARGET_X86)
guint8 *buf, *code;
-#ifdef __native_client_codegen__
- guint8 *buf_alloc;
-#endif
guint8 *labels [16];
-#if defined(__default_codegen__)
code = buf = g_malloc (256);
-#elif defined(__native_client_codegen__)
- buf_alloc = g_malloc (256 + kNaClAlignment);
- code = buf = ((guint)buf_alloc + kNaClAlignment) & ~kNaClAlignmentMask;
-#endif
/* Allocate a temporary stack slot */
x86_push_reg (code, X86_EAX);
mono_x86_patch (labels [3], code);
x86_breakpoint (code);
-#ifdef __native_client_codegen__
- {
- /* emit nops to next 32 byte alignment */
- int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
- while (code < (buf + a)) x86_nop(code);
- }
-#endif
emit_bytes (acfg, buf, code - buf);
*tramp_size = code - buf;
-#if defined(__default_codegen__)
g_free (buf);
-#elif defined(__native_client_codegen__)
- g_free (buf_alloc);
-#endif
#elif defined(TARGET_ARM)
guint8 buf [128];
/* Branch to the target address */
x86_jump_membase (code, X86_ECX, (offset + 1) * sizeof (gpointer));
-#ifdef __native_client_codegen__
- {
- /* emit nops to next 32 byte alignment */
- int a = (~kNaClAlignmentMask) & ((code - buf) + kNaClAlignment - 1);
- while (code < (buf + a)) x86_nop(code);
- }
-#endif
-
emit_bytes (acfg, buf, code - buf);
- *tramp_size = NACL_SIZE (15, kNaClAlignment);
+ *tramp_size = 15;
g_assert (code - buf == *tramp_size);
#elif defined(TARGET_ARM)
guint8 buf [128];
/* FIXME: Search referenced images as well */
if (!acfg->typespec_classes) {
- acfg->typespec_classes = (MonoClass **)mono_mempool_alloc0 (acfg->mempool, sizeof (MonoClass*) * len);
- for (i = 0; i < len; ++i) {
+ acfg->typespec_classes = g_hash_table_new (NULL, NULL);
+ for (i = 0; i < len; i++) {
MonoError error;
- acfg->typespec_classes [i] = mono_class_get_and_inflate_typespec_checked (acfg->image, MONO_TOKEN_TYPE_SPEC | (i + 1), NULL, &error);
+ int typespec = MONO_TOKEN_TYPE_SPEC | (i + 1);
+ MonoClass *klass_key = mono_class_get_and_inflate_typespec_checked (acfg->image, typespec, NULL, &error);
g_assert (mono_error_ok (&error)); /* FIXME error handling */
+ g_hash_table_insert (acfg->typespec_classes, klass_key, GINT_TO_POINTER (typespec));
}
}
- for (i = 0; i < len; ++i) {
- if (acfg->typespec_classes [i] == klass)
- break;
- }
-
- if (i < len)
- return MONO_TOKEN_TYPE_SPEC | (i + 1);
- else
- return 0;
+ return GPOINTER_TO_INT (g_hash_table_lookup (acfg->typespec_classes, klass));
}
static void
/* Managed Allocators */
nallocators = mono_gc_get_managed_allocator_types ();
for (i = 0; i < nallocators; ++i) {
- m = mono_gc_get_managed_allocator_by_type (i, TRUE);
- if (m)
+ if ((m = mono_gc_get_managed_allocator_by_type (i, MANAGED_ALLOCATOR_REGULAR)))
add_method (acfg, m);
- }
- for (i = 0; i < nallocators; ++i) {
- m = mono_gc_get_managed_allocator_by_type (i, FALSE);
- if (m)
+ if ((m = mono_gc_get_managed_allocator_by_type (i, MANAGED_ALLOCATOR_SLOW_PATH)))
add_method (acfg, m);
}
}
prefix = "_";
#endif
+#if defined(TARGET_WIN32) && defined(TARGET_X86)
+ char adjustedPrefix [MAX_SYMBOL_SIZE];
+ prefix = mangle_symbol (prefix, adjustedPrefix, G_N_ELEMENTS (adjustedPrefix));
+#endif
+
len = strlen (name1);
name2 = (char *)malloc (strlen (prefix) + len + 16);
memcpy (name2, prefix, strlen (prefix));
/* Need to add a prefix to create unique symbols */
prefix = g_strdup_printf ("plt_%s_", acfg->assembly_name_sym);
} else {
+#if defined(TARGET_WIN32) && defined(TARGET_X86)
+ prefix = mangle_symbol_alloc ("plt_");
+#else
prefix = g_strdup ("plt_");
+#endif
}
switch (ji->type) {
emit_line (acfg);
emit_section_change (acfg, ".text", 0);
- emit_alignment_code (acfg, NACL_SIZE(16, kNaClAlignment));
+ emit_alignment_code (acfg, 16);
emit_info_symbol (acfg, "plt");
emit_label (acfg, acfg->plt_symbol);
static G_GNUC_UNUSED void
emit_trampoline_full (MonoAotCompile *acfg, int got_offset, MonoTrampInfo *info, gboolean emit_tinfo)
{
- char start_symbol [256];
- char end_symbol [256];
- char symbol [256];
+ char start_symbol [MAX_SYMBOL_SIZE];
+ char end_symbol [MAX_SYMBOL_SIZE];
+ char symbol [MAX_SYMBOL_SIZE];
guint32 buf_size, info_offset;
MonoJumpInfo *patch_info;
guint8 *buf, *p;
ji = info->ji;
unwind_ops = info->unwind_ops;
-#ifdef __native_client_codegen__
- mono_nacl_fix_patches (code, ji);
-#endif
-
/* Emit code */
sprintf (start_symbol, "%s%s", acfg->user_symbol_prefix, name);
/* Emit debug info */
if (unwind_ops) {
- char symbol2 [256];
+ char symbol2 [MAX_SYMBOL_SIZE];
sprintf (symbol, "%s", name);
sprintf (symbol2, "%snamed_%s", acfg->temp_prefix, name);
static void
emit_trampolines (MonoAotCompile *acfg)
{
- char symbol [256];
- char end_symbol [256];
+ char symbol [MAX_SYMBOL_SIZE];
+ char end_symbol [MAX_SYMBOL_SIZE];
int i, tramp_got_offset;
int ntype;
#ifdef MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES
default:
g_assert_not_reached ();
}
-#ifdef __native_client_codegen__
- /* align to avoid 32-byte boundary crossings */
- emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
-#endif
-
if (!acfg->trampoline_size [ntype]) {
g_assert (tramp_size);
acfg->trampoline_size [ntype] = tramp_size;
MonoClass *klass = patch_info->data.field->parent;
/* The .cctor needs to run at runtime. */
- if (klass->generic_class && !mono_generic_context_is_sharable (&klass->generic_class->context, FALSE) && mono_class_get_cctor (klass))
+ if (klass->generic_class && !mono_generic_context_is_sharable_full (&klass->generic_class->context, FALSE, FALSE) && mono_class_get_cctor (klass))
add_extra_method_with_depth (acfg, mono_class_get_cctor (klass), depth + 1);
break;
}
GPtrArray *methods = (GPtrArray *)user_data [2];
int i;
+ MonoError error;
MonoThread *thread = mono_thread_attach (domain);
- mono_thread_set_name_internal (thread->internal_thread, mono_string_new (mono_get_root_domain (), "AOT compiler"), TRUE);
+ mono_thread_set_name_internal (thread->internal_thread, mono_string_new (mono_get_root_domain (), "AOT compiler"), TRUE, &error);
+ mono_error_assert_ok (&error);
for (i = 0; i < methods->len; ++i)
compile_method (acfg, (MonoMethod *)g_ptr_array_index (methods, i));
gboolean
mono_aot_is_linkonce_method (MonoMethod *method)
{
+ return FALSE;
+#if 0
WrapperInfo *info;
// FIXME: Add more cases
if ((info && (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG)))
return TRUE;
return FALSE;
+#endif
}
static gboolean
{
int oindex, i, prev_index;
gboolean saved_unbox_info = FALSE;
- char symbol [256];
+ char symbol [MAX_SYMBOL_SIZE];
if (acfg->aot_opts.llvm_only)
return;
* Emit some padding so the local symbol for the first method doesn't have the
* same address as 'methods'.
*/
-#if defined(__default_codegen__)
emit_padding (acfg, 16);
-#elif defined(__native_client_codegen__)
- {
- const int kPaddingSize = 16;
- guint8 pad_buffer[kPaddingSize];
- mono_arch_nacl_pad (pad_buffer, kPaddingSize);
- emit_bytes (acfg, pad_buffer, kPaddingSize);
- }
-#endif
for (oindex = 0; oindex < acfg->method_order->len; ++oindex) {
MonoCompile *cfg;
sprintf (symbol, "ut_%d", get_method_index (acfg, method));
emit_section_change (acfg, ".text", 0);
-#ifdef __native_client_codegen__
- emit_alignment (acfg, AOT_FUNC_ALIGNMENT);
-#endif
if (acfg->thumb_mixed && cfg->compile_llvm) {
emit_set_thumb_mode (acfg);
* To work around linker issues, we emit a table of branches, and disassemble them at runtime.
* This is PIE code, and the linker can update it if needed.
*/
+
sprintf (symbol, "method_addresses");
emit_section_change (acfg, ".text", 1);
emit_alignment_code (acfg, 8);
if (acfg->cfgs [i]) {
if (acfg->aot_opts.llvm_only && acfg->cfgs [i]->compile_llvm)
/* Obtained by calling a generated function in the LLVM image */
- arch_emit_direct_call (acfg, "method_addresses", FALSE, FALSE, NULL, &call_size);
+ arch_emit_direct_call (acfg, symbol, FALSE, FALSE, NULL, &call_size);
else
arch_emit_direct_call (acfg, acfg->cfgs [i]->asm_symbol, FALSE, acfg->thumb_mixed && acfg->cfgs [i]->compile_llvm, NULL, &call_size);
} else {
- arch_emit_direct_call (acfg, "method_addresses", FALSE, FALSE, NULL, &call_size);
+ arch_emit_direct_call (acfg, symbol, FALSE, FALSE, NULL, &call_size);
}
#endif
}
static void
emit_got (MonoAotCompile *acfg)
{
- char symbol [256];
+ char symbol [MAX_SYMBOL_SIZE];
if (acfg->aot_opts.llvm_only)
return;
static void
emit_aot_file_info (MonoAotCompile *acfg, MonoAotFileInfo *info)
{
- char symbol [256];
+ char symbol [MAX_SYMBOL_SIZE];
int i, sindex;
const char **symbols;
init_aot_file_info (acfg, info);
if (acfg->aot_opts.static_link) {
- char symbol [256];
+ char symbol [MAX_SYMBOL_SIZE];
char *p;
/*
}
#endif
- rename (tmp_outfile_name, outfile_name);
+ if (0 != rename (tmp_outfile_name, outfile_name)) {
+ if (G_FILE_ERROR_EXIST == g_file_error_from_errno (errno)) {
+ /* Since we are rebuilding the module we need to be able to replace any old copies. Remove old file and retry rename operation. */
+ unlink (outfile_name);
+ rename (tmp_outfile_name, outfile_name);
+ }
+ }
#if defined(TARGET_MACH)
command = g_strdup_printf ("dsymutil \"%s\"", outfile_name);
g_hash_table_destroy (acfg->image_hash);
g_hash_table_destroy (acfg->unwind_info_offsets);
g_hash_table_destroy (acfg->method_label_hash);
+ if (acfg->typespec_classes)
+ g_hash_table_destroy (acfg->typespec_classes);
g_hash_table_destroy (acfg->export_names);
g_hash_table_destroy (acfg->plt_entry_debug_sym_cache);
g_hash_table_destroy (acfg->klass_blob_hash);
}
#if defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
- acfg->opts |= MONO_OPT_GSHAREDVT;
- opts |= MONO_OPT_GSHAREDVT;
+ if (acfg->aot_opts.llvm_only || mono_aot_mode_is_full (&acfg->aot_opts)) {
+ acfg->opts |= MONO_OPT_GSHAREDVT;
+ opts |= MONO_OPT_GSHAREDVT;
+ }
#endif
if (opts & MONO_OPT_GSHAREDVT)