g_assert_not_reached ();
#endif
+#if 0
/*
* The first plt entry is used to transfer code to the AOT loader.
*/
emit_symbol_diff (acfg, "plt_jump_table", ".", -4);
emit_zero_bytes (acfg, 10);
#elif defined(__arm__)
- /* This is 8 bytes long, init_plt () depends on this */
- emit_unset_mode (acfg);
- fprintf (acfg->fp, "\tldr pc, [pc, #-4]\n");
- /* This is filled up during loading by the AOT loader */
- fprintf (acfg->fp, "\t.word 0\n");
+ if (1 || acfg->aot_opts.full_aot) {
+ /* Generate non-patchable code */
+ /* FIXME: Use this for the non-aot-only case as well */
+ emit_unset_mode (acfg);
+ fprintf (acfg->fp, "\tldr ip, [pc, #4]\n");
+ fprintf (acfg->fp, "\tadd ip, pc, ip\n");
+ fprintf (acfg->fp, "\tldr pc, [ip, #0]\n");
+ emit_symbol_diff (acfg, "plt_jump_table", ".", 4);
+
+ /* Default entry */
+ /* This is 12 bytes long, init_plt () depends on this */
+ emit_unset_mode (acfg);
+ fprintf (acfg->fp, "\tldr ip, [pc, #0]\n");
+ fprintf (acfg->fp, "\tb .Lp_0\n");
+ fprintf (acfg->fp, "\t.word %d\n", plt_info_offsets [i]);
+ } else {
+ /* This is 8 bytes long, init_plt () depends on this */
+ emit_unset_mode (acfg);
+ fprintf (acfg->fp, "\tldr pc, [pc, #-4]\n");
+ /* This is filled up during loading by the AOT loader */
+ fprintf (acfg->fp, "\t.word 0\n");
+ }
#else
g_assert_not_reached ();
+#endif
#endif
- for (i = 1; i < acfg->plt_offset; ++i) {
+ for (i = 0; i < acfg->plt_offset; ++i) {
char *label;
label = g_strdup_printf (".Lp_%d", i);
emit_label (acfg, label);
g_free (label);
+
+ /*
+ * The first plt entry is used to transfer code to the AOT loader.
+ */
+
#if defined(__i386__)
- /* Need to make sure this is 5 bytes long */
- emit_byte (acfg, '\xe9');
- label = g_strdup_printf (".Lpd_%d", i);
- emit_symbol_diff (acfg, label, ".", -4);
- g_free (label);
+ if (i == 0) {
+ /* It is filled up during loading by the AOT loader. */
+ emit_zero_bytes (acfg, 16);
+ } else {
+ /* Need to make sure this is 5 bytes long */
+ emit_byte (acfg, '\xe9');
+ label = g_strdup_printf (".Lpd_%d", i);
+ emit_symbol_diff (acfg, label, ".", -4);
+ g_free (label);
+ }
#elif defined(__x86_64__)
/*
* We can't emit jumps because they are 32 bits only so they can't be patched.
* - optimize SWITCH AOT implementation
* - implement IMT support
*/
- /* This is 8 bytes long, init_plt () depends on this */
- emit_unset_mode (acfg);
- fprintf (acfg->fp, "\tldr pc, [pc, #-4]\n");
- /* This is filled up during loading by the AOT loader */
- fprintf (acfg->fp, "\t.word 0\n");
+ if (1 || acfg->aot_opts.full_aot) {
+ emit_unset_mode (acfg);
+ fprintf (acfg->fp, "\tldr ip, [pc, #4]\n");
+ fprintf (acfg->fp, "\tadd ip, pc, ip\n");
+ fprintf (acfg->fp, "\tldr pc, [ip, #0]\n");
+ emit_symbol_diff (acfg, "plt_jump_table", ".", 0);
+ /* Used by mono_aot_get_plt_info_offset */
+ fprintf (acfg->fp, "\n\t.word %d\n", plt_info_offsets [i]);
+ } else {
+ /* This is 8 bytes long, init_plt () depends on this */
+ emit_unset_mode (acfg);
+ fprintf (acfg->fp, "\tldr pc, [pc, #-4]\n");
+ /* This is filled up during loading by the AOT loader */
+ fprintf (acfg->fp, "\t.word 0\n");
+ }
#else
g_assert_not_reached ();
#endif
#elif defined(__x86_64__)
/* Emitted along with the PLT entries since they will not be patched */
#elif defined(__arm__)
+ /* Emitted along with the PLT entries since they will not be patched */
+#if 0
/* This is 12 bytes long, init_plt () depends on this */
emit_unset_mode (acfg);
fprintf (acfg->fp, "\tldr ip, [pc, #0]\n");
fprintf (acfg->fp, "\tb .Lp_0\n");
fprintf (acfg->fp, "\t.word %d\n", plt_info_offsets [i]);
+#endif
#else
g_assert_not_reached ();
#endif
emit_section_change (acfg, ".bss", 0);
emit_label (acfg, symbol);
-#ifdef __x86_64__
+#if defined(__x86_64__) || defined(__arm__)
emit_zero_bytes (acfg, (int)(acfg->plt_offset * sizeof (gpointer)));
#endif
/* Initialize the first PLT entry */
make_writable (info->plt, info->plt_end - info->plt);
x86_jump_code (buf, tramp);
-#elif defined(__x86_64__)
+#elif defined(__x86_64__) || defined(__arm__)
/*
* Initialize the entries in the plt_jump_table to point to the default targets.
*/
/* The first entry points to the AOT trampoline */
((gpointer*)info->plt_jump_table)[0] = tramp;
for (i = 1; i < n_entries; ++i)
+#ifdef __arm__
+ /* All the default entries point to the first entry */
+ ((gpointer*)info->plt_jump_table)[i] = info->plt;
+#else
/* Each PLT entry is 16 bytes long, the default entry begins at offset 6 */
((gpointer*)info->plt_jump_table)[i] = info->plt + (i * 16) + 6;
+#endif
#elif defined(__arm__)
/* Initialize the first PLT entry */
make_writable (info->plt, info->plt_end - info->plt);
return NULL;
}
+/*
+ * mono_aot_get_plt_info_offset:
+ *
+ * Return the PLT info offset belonging to the plt entry called by CODE.
+ */
+guint32
+mono_aot_get_plt_info_offset (gssize *regs, guint8 *code)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ return regs [MONO_ARCH_AOT_PLT_OFFSET_REG];
+#elif defined(__arm__)
+ guint8 *plt_entry = mono_aot_get_plt_entry (code);
+
+ g_assert (plt_entry);
+
+ /* The offset is stored as the 5th word of the plt entry */
+ return ((guint32*)plt_entry) [4];
+#else
+ g_assert_not_reached ();
+ return NULL;
+#endif
+}
+
static gpointer
load_named_code (MonoAotModule *amodule, const char *name)
{
#include "mini.h"
#include "mini-arm.h"
+static guint8* nullified_class_init_trampoline;
+
/*
* Return the instruction to jump from code to target, 0 if not
* reachable with a single instruction
void
mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
{
- guint32 ins = branch_for_target_reachable (code, addr);
+ /* Patch the jump table entry used by the plt entry */
+ guint32 offset = ((guint32*)code)[3];
+ guint8 *jump_entry = code + offset + 16;
- if (ins)
- /* Patch the branch */
- ((guint32*)code) [0] = ins;
- else
- /* Patch the jump address */
- ((guint32*)code) [1] = (guint32)addr;
- mono_arch_flush_icache ((guint8*)code, 4);
+ *(guint8**)jump_entry = addr;
}
void
void
mono_arch_nullify_plt_entry (guint8 *code)
{
- guint8 buf [4];
- guint8 *p;
+ if (mono_aot_only && !nullified_class_init_trampoline)
+ nullified_class_init_trampoline = mono_aot_get_named_code ("nullified_class_init_trampoline");
- p = buf;
- ARM_MOV_REG_REG (p, ARMREG_PC, ARMREG_LR);
-
- ((guint32*)code) [0] = ((guint32*)buf) [0];
- mono_arch_flush_icache ((guint8*)code, 4);
+ mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
}
-
/* Stack size for trampoline function
*/
#define STACK (sizeof (MonoLMF))
/* Sanity check */
g_assert ((buf - code) <= GEN_TRAMP_SIZE);
+ *code_size = buf - code;
+
+ if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
+ guint32 code_len;
+
+ /* Initialize the nullified class init trampoline used in the AOT case */
+ nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
+ }
+
return code;
}
code = buf = mono_global_codeman_reserve (16);
- // FIXME:
+ ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_LR);
mono_arch_flush_icache (buf, code - buf);