* mini-ops.h: Use TARGET_POWERPC define for consistency.
* patch-info.h: Add 'MSCORLIB_GOT_ADDR' patch type.
* aot-compiler.c aot-runtime.c: Put the mscorlib got address into the
second got slot of every aot image.
* aot-compiler.c aot-runtime.c mini-trampolines.c: Add support for
aot on platforms with function pointers.
* mini-ppc.h mini-ppp.c cpu-ppc.md exceptions-ppc.c tramp-ppc.c: Add
support for aot/full aot on ppc/ppc64.
svn path=/trunk/mono/; revision=136504
2009-06-19 Zoltan Varga <vargaz@gmail.com>
+ * mini-ops.h: Use TARGET_POWERPC define for consistency.
+
+ * patch-info.h: Add 'MSCORLIB_GOT_ADDR' patch type.
+
+ * aot-compiler.c aot-runtime.c: Put the mscorlib got address into the
+ second got slot of every aot image.
+
+ * aot-compiler.c aot-runtime.c mini-trampolines.c: Add support for
+ aot on platforms with function pointers.
+
+ * mini-ppc.h mini-ppp.c cpu-ppc.md exceptions-ppc.c tramp-ppc.c: Add
+ support for aot/full aot on ppc/ppc64.
+
* tramp-<ARCH>.c (mono_arch_patch_plt_entry): Add 'got' and 'regs'
arguments which are needed on ppc.
char *got_symbol;
GHashTable *method_label_hash;
const char *temp_prefix;
+ guint32 label_generator;
} MonoAotCompile;
#define mono_acfg_lock(acfg) EnterCriticalSection (&((acfg)->mutex))
#else
#define AOT_FUNC_ALIGNMENT 16
#endif
+
+#if defined(TARGET_POWERPC64)
+#define PPC_LD_OP "ld"
+#define PPC_LDX_OP "ldx"
+#else
+#define PPC_LD_OP "lwz"
+#define PPC_LDX_OP "lwzx"
+#endif
/*
* arch_emit_direct_call:
fprintf (acfg->fp, "bl %s\n", target);
}
*call_size = 4;
+#elif defined(TARGET_POWERPC)
+ if (acfg->use_bin_writer) {
+ g_assert_not_reached ();
+ } else {
+ img_writer_emit_unset_mode (acfg->w);
+ fprintf (acfg->fp, "bl %s\n", target);
+ *call_size = 4;
+ }
#else
g_assert_not_reached ();
#endif
}
+/*
+ * PPC32 design:
+ * - we use an approach similar to the x86 abi: reserve a register (r30) to hold
+ * the GOT pointer.
+ * - The full-aot trampolines need access to the GOT of mscorlib, so we store
+ * in in the 2. slot of every GOT, and require every method to place the GOT
+ * address in r30, even when it doesn't access the GOT otherwise. This way,
+ * the trampolines can compute the mscorlib GOT address by loading 4(r30).
+ */
+
+/*
+ * PPC64 design:
+ * PPC64 uses function descriptors which greatly complicate all code, since
+ * these are used very inconsistently in the runtime. Some functions like
+ * mono_compile_method () return ftn descriptors, while others like the
+ * trampoline creation functions do not.
+ * We assume that all GOT slots contain function descriptors, and create
+ * descriptors in aot-runtime.c when needed.
+ * The ppc64 abi uses r2 to hold the address of the TOC/GOT, which is loaded
+ * from function descriptors, we could do the same, but it would require
+ * rewriting all the ppc/aot code to handle function descriptors properly.
+ * So instead, we use the same approach as on PPC32.
+ * This is a horrible mess, but fixing it would probably lead to an even bigger
+ * one.
+ */
+
#ifdef MONO_ARCH_AOT_SUPPORTED
/*
* arch_emit_got_offset:
static void
arch_emit_got_offset (MonoAotCompile *acfg, guint8 *code, int *code_size)
{
+#if defined(TARGET_POWERPC64)
+ g_assert (!acfg->use_bin_writer);
+ img_writer_emit_unset_mode (acfg->w);
+ /*
+ * The ppc32 code doesn't seem to work on ppc64, the assembler complains about
+ * unsupported relocations. So we store the got address into the .Lgot_addr
+ * symbol which is in the text segment, compute its address, and load it.
+ */
+ fprintf (acfg->fp, ".L%d:\n", acfg->label_generator);
+ fprintf (acfg->fp, "lis 0, (.Lgot_addr + 4 - .L%d)@h\n", acfg->label_generator);
+ fprintf (acfg->fp, "ori 0, 0, (.Lgot_addr + 4 - .L%d)@l\n", acfg->label_generator);
+ fprintf (acfg->fp, "add 30, 30, 0\n");
+ fprintf (acfg->fp, "%s 30, 0(30)\n", PPC_LD_OP);
+ acfg->label_generator ++;
+ *code_size = 16;
+#elif defined(TARGET_POWERPC)
+ g_assert (!acfg->use_bin_writer);
+ img_writer_emit_unset_mode (acfg->w);
+ fprintf (acfg->fp, ".L%d:\n", acfg->label_generator);
+ fprintf (acfg->fp, "lis 0, (%s + 4 - .L%d)@h\n", acfg->got_symbol, acfg->label_generator);
+ fprintf (acfg->fp, "ori 0, 0, (%s + 4 - .L%d)@l\n", acfg->got_symbol, acfg->label_generator);
+ acfg->label_generator ++;
+ *code_size = 8;
+#else
guint32 offset = mono_arch_get_patch_offset (code);
emit_bytes (acfg, code, offset);
emit_symbol_diff (acfg, acfg->got_symbol, ".", offset);
*code_size = offset + 4;
+#endif
}
/*
/* Emit the offset */
#ifdef TARGET_AMD64
emit_symbol_diff (acfg, acfg->got_symbol, ".", (unsigned int) ((got_slot * sizeof (gpointer)) - 4));
+ *code_size = mono_arch_get_patch_offset (code) + 4;
#elif defined(TARGET_X86)
emit_int32 (acfg, (unsigned int) ((got_slot * sizeof (gpointer))));
+ *code_size = mono_arch_get_patch_offset (code) + 4;
#elif defined(TARGET_ARM)
emit_symbol_diff (acfg, acfg->got_symbol, ".", (unsigned int) ((got_slot * sizeof (gpointer))) - 12);
+ *code_size = mono_arch_get_patch_offset (code) + 4;
+#elif defined(TARGET_POWERPC)
+ {
+ guint8 buf [32];
+ guint8 *code;
+
+ code = buf;
+ ppc_load32 (code, ppc_r0, got_slot * sizeof (gpointer));
+ g_assert (code - buf == 8);
+ emit_bytes (acfg, buf, code - buf);
+ *code_size = code - buf;
+ }
#else
g_assert_not_reached ();
#endif
-
- *code_size = mono_arch_get_patch_offset (code) + 4;
}
#endif
* The plt_got_info_offset is computed automatically by
* mono_aot_get_plt_info_offset (), so no need to save it here.
*/
+#elif defined(TARGET_POWERPC)
+ guint32 offset = (acfg->plt_got_offset_base + index) * sizeof (gpointer);
+
+ /* The GOT address is guaranteed to be in r30 by OP_LOAD_GOTADDR */
+ g_assert (!acfg->use_bin_writer);
+ img_writer_emit_unset_mode (acfg->w);
+ fprintf (acfg->fp, "lis 11, %d@h\n", offset);
+ fprintf (acfg->fp, "ori 11, 11, %d@l\n", offset);
+ fprintf (acfg->fp, "add 11, 11, 30\n");
+ fprintf (acfg->fp, "%s 11, 0(11)\n", PPC_LD_OP);
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ fprintf (acfg->fp, "%s 2, %d(11)\n", PPC_LD_OP, (int)sizeof (gpointer));
+ fprintf (acfg->fp, "%s 11, 0(11)\n", PPC_LD_OP);
+#endif
+ fprintf (acfg->fp, "mtctr 11\n");
+ fprintf (acfg->fp, "bctr\n");
+ emit_int32 (acfg, acfg->plt_got_info_offsets [index]);
#else
g_assert_not_reached ();
#endif
*/
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4 + 4);
//emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4 + 8);
+#elif defined(TARGET_POWERPC)
+ guint8 buf [128];
+ guint8 *code;
+
+ *tramp_size = 4;
+ code = buf;
+
+ g_assert (!acfg->use_bin_writer);
+
+ /*
+ * PPC has no ip relative addressing, so we need to compute the address
+ * of the mscorlib got. That is slow and complex, so instead, we store it
+ * in the second got slot of every aot image. The caller already computed
+ * the address of its got and placed it into r30.
+ */
+ img_writer_emit_unset_mode (acfg->w);
+ /* Load mscorlib got address */
+ fprintf (acfg->fp, "%s 0, %d(30)\n", PPC_LD_OP, (int)sizeof (gpointer));
+ /* Load generic trampoline address */
+ fprintf (acfg->fp, "lis 11, %d@h\n", (int)(offset * sizeof (gpointer)));
+ fprintf (acfg->fp, "ori 11, 11, %d@l\n", (int)(offset * sizeof (gpointer)));
+ fprintf (acfg->fp, "%s 11, 11, 0\n", PPC_LDX_OP);
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ fprintf (acfg->fp, "%s 11, 0(11)\n", PPC_LD_OP);
+#endif
+ fprintf (acfg->fp, "mtctr 11\n");
+ /* Load trampoline argument */
+ /* On ppc, we pass it normally to the generic trampoline */
+ fprintf (acfg->fp, "lis 11, %d@h\n", (int)((offset + 1) * sizeof (gpointer)));
+ fprintf (acfg->fp, "ori 11, 11, %d@l\n", (int)((offset + 1) * sizeof (gpointer)));
+ fprintf (acfg->fp, "%s 0, 11, 0\n", PPC_LDX_OP);
+ /* Branch to generic trampoline */
+ fprintf (acfg->fp, "bctr\n");
+
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ *tramp_size = 10 * 4;
+#else
+ *tramp_size = 9 * 4;
+#endif
#else
g_assert_not_reached ();
#endif
} else {
fprintf (acfg->fp, "\n\tb %s\n", call_target);
}
+#elif defined(TARGET_POWERPC)
+ int this_pos = 3;
+
+ if (MONO_TYPE_ISSTRUCT (mono_method_signature (method)->ret))
+ this_pos = 4;
+
+ g_assert (!acfg->use_bin_writer);
+
+ fprintf (acfg->fp, "\n\taddi %d, %d, %d\n", this_pos, this_pos, (int)sizeof (MonoObject));
+ fprintf (acfg->fp, "\n\tb %s\n", call_target);
#else
g_assert_not_reached ();
#endif
emit_bytes (acfg, buf, code - buf);
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) - 4 + 8);
emit_symbol_diff (acfg, acfg->got_symbol, ".", ((offset + 1) * sizeof (gpointer)) - 4 + 4);
+#elif defined(TARGET_POWERPC)
+ guint8 buf [128];
+ guint8 *code;
+
+ *tramp_size = 4;
+ code = buf;
+
+ g_assert (!acfg->use_bin_writer);
+
+ /*
+ * PPC has no ip relative addressing, so we need to compute the address
+ * of the mscorlib got. That is slow and complex, so instead, we store it
+ * in the second got slot of every aot image. The caller already computed
+ * the address of its got and placed it into r30.
+ */
+ img_writer_emit_unset_mode (acfg->w);
+ /* Load mscorlib got address */
+ fprintf (acfg->fp, "%s 0, %d(30)\n", PPC_LD_OP, (int)sizeof (gpointer));
+ /* Load rgctx */
+ fprintf (acfg->fp, "lis 11, %d@h\n", (int)(offset * sizeof (gpointer)));
+ fprintf (acfg->fp, "ori 11, 11, %d@l\n", (int)(offset * sizeof (gpointer)));
+ fprintf (acfg->fp, "%s %d, 11, 0\n", PPC_LDX_OP, MONO_ARCH_RGCTX_REG);
+ /* Load target address */
+ fprintf (acfg->fp, "lis 11, %d@h\n", (int)((offset + 1) * sizeof (gpointer)));
+ fprintf (acfg->fp, "ori 11, 11, %d@l\n", (int)((offset + 1) * sizeof (gpointer)));
+ fprintf (acfg->fp, "%s 11, 11, 0\n", PPC_LDX_OP);
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ fprintf (acfg->fp, "%s 2, %d(11)\n", PPC_LD_OP, (int)sizeof (gpointer));
+ fprintf (acfg->fp, "%s 11, 0(11)\n", PPC_LD_OP);
+#endif
+ fprintf (acfg->fp, "mtctr 11\n");
+ /* Branch to the target address */
+ fprintf (acfg->fp, "bctr\n");
+
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ *tramp_size = 11 * 4;
+#else
+ *tramp_size = 9 * 4;
+#endif
+
#else
g_assert_not_reached ();
#endif
emit_symbol_diff (acfg, acfg->got_symbol, ".", (offset * sizeof (gpointer)) + (code - (labels [0] + 8)) - 4);
*tramp_size = code - buf + 4;
+#elif defined(TARGET_POWERPC)
+ guint8 buf [128];
+ guint8 *code, *labels [16];
+
+ code = buf;
+
+ /* Load the mscorlib got address */
+ ppc_load_reg (code, ppc_r11, sizeof (gpointer), ppc_r30);
+ /* Load the parameter from the GOT */
+ ppc_load (code, ppc_r0, offset * sizeof (gpointer));
+ ppc_load_reg_indexed (code, ppc_r11, ppc_r11, ppc_r0);
+
+ /* Load and check key */
+ labels [1] = code;
+ ppc_load_reg (code, ppc_r0, 0, ppc_r11);
+ ppc_cmp (code, 0, sizeof (gpointer) == 8 ? 1 : 0, ppc_r0, MONO_ARCH_IMT_REG);
+ labels [2] = code;
+ ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
+
+ /* End-of-loop check */
+ ppc_cmpi (code, 0, sizeof (gpointer) == 8 ? 1 : 0, ppc_r0, 0);
+ labels [3] = code;
+ ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
+
+ /* Loop footer */
+ ppc_addi (code, ppc_r11, ppc_r11, 2 * sizeof (gpointer));
+ labels [4] = code;
+ ppc_b (code, 0);
+ mono_ppc_patch (labels [4], labels [1]);
+
+ /* Match */
+ mono_ppc_patch (labels [2], code);
+ ppc_load_reg (code, ppc_r11, sizeof (gpointer), ppc_r11);
+ /* r11 now contains the value of the vtable slot */
+ /* this is not a function descriptor on ppc64 */
+ ppc_load_reg (code, ppc_r11, 0, ppc_r11);
+ ppc_mtctr (code, ppc_r11);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+
+ /* Fail */
+ mono_ppc_patch (labels [3], code);
+ /* FIXME: */
+ ppc_break (code);
+
+ *tramp_size = code - buf;
+
+ emit_bytes (acfg, buf, code - buf);
#else
g_assert_not_reached ();
#endif
/* FIXME: locking */
g_hash_table_foreach (mono_get_jit_icall_info (), add_jit_icall_wrapper, acfg);
+ /* stelemref */
+ add_method (acfg, mono_marshal_get_stelemref ());
+
+#ifdef MONO_ARCH_HAVE_TLS_GET
/* Managed Allocators */
nallocators = mono_gc_get_managed_allocator_types ();
for (i = 0; i < nallocators; ++i) {
add_method (acfg, m);
}
- /* stelemref */
- add_method (acfg, mono_marshal_get_stelemref ());
-
/* Monitor Enter/Exit */
desc = mono_method_desc_new ("Monitor:Enter", FALSE);
orig_method = mono_method_desc_search_in_class (desc, mono_defaults.monitor_class);
method = mono_monitor_get_fast_path (orig_method);
if (method)
add_method (acfg, method);
+#endif
}
/*
arch_emit_got_offset (acfg, code + i, &code_size);
i += code_size - 1;
skip = TRUE;
+ patch_info->type = MONO_PATCH_INFO_NONE;
break;
}
default: {
case MONO_PATCH_INFO_IMAGE:
encode_value (get_image_index (acfg, patch_info->data.image), p, &p);
break;
+ case MONO_PATCH_INFO_MSCORLIB_GOT_ADDR:
+ break;
case MONO_PATCH_INFO_METHOD_REL:
encode_value ((gint)patch_info->data.offset, p, &p);
break;
/* Sort relocations */
patches = g_ptr_array_new ();
for (patch_info = ji; patch_info; patch_info = patch_info->next)
- g_ptr_array_add (patches, patch_info);
+ if (patch_info->type != MONO_PATCH_INFO_NONE)
+ g_ptr_array_add (patches, patch_info);
g_ptr_array_sort (patches, compare_patches);
buf_size = patches->len * 128 + 128;
emit_trampoline (acfg, "throw_pending_exception", code, code_size, acfg->got_offset, ji, NULL);
#endif
-#if defined(TARGET_AMD64) || defined(TARGET_ARM)
for (i = 0; i < 128; ++i) {
int offset;
sprintf (symbol, "rgctx_fetch_trampoline_%u", offset);
emit_trampoline (acfg, symbol, code, code_size, acfg->got_offset, ji, NULL);
}
-#endif
-#if defined(TARGET_AMD64) || defined(TARGET_ARM)
{
GSList *l;
l = l->next;
}
}
-#endif
#endif /* #ifdef MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES */
char symbol [256];
GList *l;
+#if defined(TARGET_POWERPC64)
+ sprintf (symbol, ".Lgot_addr");
+ emit_section_change (acfg, ".text", 0);
+ emit_alignment (acfg, 8);
+ emit_label (acfg, symbol);
+ emit_pointer (acfg, acfg->got_symbol);
+#endif
+
sprintf (symbol, "methods");
emit_section_change (acfg, ".text", 0);
emit_global (acfg, symbol, TRUE);
#if defined(TARGET_AMD64)
#define AS_OPTIONS "--64"
+#elif defined(TARGET_POWERPC64)
+#define AS_OPTIONS "-a64 -mppc64"
+#define LD_OPTIONS "-m elf64ppc"
#elif defined(sparc) && SIZEOF_VOID_P == 8
#define AS_OPTIONS "-xarch=v9"
#else
#define AS_OPTIONS ""
+#endif
+
+#ifndef LD_OPTIONS
+#define LD_OPTIONS ""
#endif
if (acfg->aot_opts.asm_only) {
#elif defined(PLATFORM_WIN32)
command = g_strdup_printf ("gcc -shared --dll -mno-cygwin -o %s %s.o", tmp_outfile_name, acfg->tmpfname);
#else
- command = g_strdup_printf ("ld -shared -o %s %s.o", tmp_outfile_name, acfg->tmpfname);
+ command = g_strdup_printf ("ld %s -shared -o %s %s.o", LD_OPTIONS, tmp_outfile_name, acfg->tmpfname);
#endif
printf ("Executing the native linker: %s\n", command);
if (system (command) != 0) {
ji->data.image = acfg->image;
get_got_offset (acfg, ji);
+
+ /* Slot 1 is reserved for the mscorlib got addr */
+ ji = mono_mempool_alloc0 (acfg->mempool, sizeof (MonoAotCompile));
+ ji->type = MONO_PATCH_INFO_MSCORLIB_GOT_ADDR;
+ get_got_offset (acfg, ji);
}
TV_GETTIME (atv);
#ifdef PLATFORM_WIN32
#define SHARED_EXT ".dll"
-#elif (defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__)) || defined(__MACH__)
+#elif ((defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__)) || defined(__MACH__)) && !defined(__linux__)
#define SHARED_EXT ".dylib"
#else
#define SHARED_EXT ".so"
assembly->image->aot_module = amodule;
+ if (mono_aot_only) {
+ if (mono_defaults.corlib) {
+ /* The second got slot contains the mscorlib got addr */
+ MonoAotModule *mscorlib_amodule = mono_defaults.corlib->aot_module;
+
+ amodule->got [1] = mscorlib_amodule->got;
+ } else {
+ amodule->got [1] = amodule->got;
+ }
+ }
+
/*
* Since we store methoddef and classdef tokens when referring to methods/classes in
* referenced assemblies, we depend on the exact versions of the referenced assemblies.
goto cleanup;
if (!method && !mono_aot_only && !no_aot_trampoline && (ji->type == MONO_PATCH_INFO_METHOD) && (mono_metadata_token_table (token) == MONO_TABLE_METHOD)) {
- ji->data.target = mono_create_jit_trampoline_from_token (image, token);
+ ji->data.target = mono_create_ftnptr (mono_domain_get (), mono_create_jit_trampoline_from_token (image, token));
ji->type = MONO_PATCH_INFO_ABS;
}
else {
if (!aot_module->got [got_slots [pindex]]) {
aot_module->got [got_slots [pindex]] = mono_resolve_patch_target (method, domain, code, ji, TRUE);
+ if (ji->type == MONO_PATCH_INFO_METHOD_JUMP)
+ aot_module->got [got_slots [pindex]] = mono_create_ftnptr (domain, aot_module->got [got_slots [pindex]]);
if (ji->type == MONO_PATCH_INFO_METHOD_JUMP)
register_jump_target_got_slot (domain, ji->data.method, &(aot_module->got [got_slots [pindex]]));
}
return index;
}
+/*
+ * mono_aot_get_method:
+ *
+ * Return a pointer to the AOTed native code for METHOD if it can be found,
+ * NULL otherwise.
+ * On platforms with function pointers, this doesn't return a function pointer.
+ */
gpointer
mono_aot_get_method (MonoDomain *domain, MonoMethod *method)
{
static void
init_plt (MonoAotModule *amodule)
{
+#ifndef MONO_CROSS_COMPILE
+
#ifdef MONO_ARCH_AOT_SUPPORTED
#ifdef __i386__
guint8 *buf = amodule->plt;
-#elif defined(__x86_64__) || defined(__arm__)
+#elif defined(__x86_64__) || defined(__arm__) || defined(__mono_ppc__)
int i;
+ gpointer plt_0;
#endif
gpointer tramp;
/* Initialize the first PLT entry */
make_writable (amodule->plt, amodule->plt_end - amodule->plt);
x86_jump_code (buf, tramp);
-#elif defined(__x86_64__) || defined(__arm__)
+#elif defined(__x86_64__) || defined(__arm__) || defined(__mono_ppc__)
/*
* Initialize the PLT entries in the GOT to point to the default targets.
*/
+ tramp = mono_create_ftnptr (mono_domain_get (), tramp);
+ plt_0 = mono_create_ftnptr (mono_domain_get (), amodule->plt);
/* The first entry points to the AOT trampoline */
((gpointer*)amodule->got)[amodule->info.plt_got_offset_base] = tramp;
for (i = 1; i < amodule->info.plt_size; ++i)
/* All the default entries point to the first entry */
- ((gpointer*)amodule->got)[amodule->info.plt_got_offset_base + i] = amodule->plt;
+ ((gpointer*)amodule->got)[amodule->info.plt_got_offset_base + i] = plt_0;
#else
g_assert_not_reached ();
#endif
amodule->plt_inited = TRUE;
#endif
+
+#endif /* MONO_CROSS_COMPILE */
}
/*
mono_aot_get_plt_entry (guint8 *code)
{
MonoAotModule *aot_module = find_aot_module (code);
-#if defined(__arm__)
+#if defined(__arm__) || defined(__mono_ppc__)
guint32 ins;
#endif
if ((target >= (guint8*)(aot_module->plt)) && (target < (guint8*)(aot_module->plt_end)))
return target;
}
+#elif defined(__mono_ppc__)
+ /* Should be a bl */
+ ins = ((guint32*)(gpointer)code) [-1];
+
+ if ((ins >> 26 == 18) && ((ins & 1) == 1) && ((ins & 2) == 0)) {
+ gint32 disp = (((gint32)ins) >> 2) & 0xffffff;
+ guint8 *target = code - 4 + (disp * 4);
+
+ if ((target >= (guint8*)(aot_module->plt)) && (target < (guint8*)(aot_module->plt_end)))
+ return target;
+ }
#else
g_assert_not_reached ();
#endif
#elif defined(__arm__)
/* The offset is stored as the 4th word of the plt entry */
return ((guint32*)plt_entry) [3];
+#elif defined(__mono_ppc__)
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ return ((guint32*)plt_entry) [8];
+#else
+ return ((guint32*)plt_entry) [6];
+#endif
#else
g_assert_not_reached ();
return 0;
#endif
}
+static gpointer
+mono_create_ftnptr_malloc (guint8 *code)
+{
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ MonoPPCFunctionDescriptor *ftnptr = g_malloc0 (sizeof (MonoPPCFunctionDescriptor));
+
+ ftnptr->code = code;
+ ftnptr->toc = NULL;
+ ftnptr->env = NULL;
+
+ return ftnptr;
+#else
+ return code;
+#endif
+}
+
/*
* load_function:
*
target = mono_arm_throw_exception;
} else if (!strcmp (ji->data.name, "mono_arm_throw_exception_by_token")) {
target = mono_arm_throw_exception_by_token;
+#endif
+#ifdef __mono_ppc__
+ } else if (!strcmp (ji->data.name, "mono_ppc_throw_exception")) {
+ target = mono_ppc_throw_exception;
#endif
} else if (strstr (ji->data.name, "trampoline_func_") == ji->data.name) {
int tramp_type2 = atoi (ji->data.name + strlen ("trampoline_func_"));
res = sscanf (ji->data.name, "specific_trampoline_lazy_fetch_%u", &slot);
g_assert (res == 1);
target = mono_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
+ target = mono_create_ftnptr_malloc (target);
} else if (!strcmp (ji->data.name, "specific_trampoline_monitor_enter")) {
target = mono_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
+ target = mono_create_ftnptr_malloc (target);
} else if (!strcmp (ji->data.name, "specific_trampoline_monitor_exit")) {
target = mono_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
+ target = mono_create_ftnptr_malloc (target);
} else if (!strcmp (ji->data.name, "specific_trampoline_generic_class_init")) {
target = mono_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
+ target = mono_create_ftnptr_malloc (target);
} else if (!strcmp (ji->data.name, "mono_thread_get_and_clear_pending_exception")) {
target = mono_thread_get_and_clear_pending_exception;
} else {
/*
* Return the piece of code identified by NAME from the mscorlib AOT file.
+ * On ppc64, this returns a function descriptor.
*/
gpointer
mono_aot_get_named_code (const char *name)
amodule = image->aot_module;
g_assert (amodule);
- return load_function (amodule, name);
+ return mono_create_ftnptr_malloc (load_function (amodule, name));
}
/* Return a given kind of trampoline */
amodule->got [got_offset] = ctx;
amodule->got [got_offset + 1] = addr;
- return code;
+ /* The caller expects an ftnptr */
+ return mono_create_ftnptr (mono_domain_get (), code);
}
gpointer
}
code = load_function (amodule, symbol);
g_free (symbol);
- return code;
+
+ /* The caller expects an ftnptr */
+ return mono_create_ftnptr (mono_domain_get (), code);
}
gpointer
setlret: src1:i src2:i len:12
checkthis: src1:b len:4
voidcall: len:16 clob:c
-voidcall_reg: src1:i len:8 clob:c
+voidcall_reg: src1:i len:16 clob:c
voidcall_membase: src1:b len:12 clob:c
fcall: dest:g len:16 clob:c
-fcall_reg: dest:g src1:i len:8 clob:c
+fcall_reg: dest:g src1:i len:16 clob:c
fcall_membase: dest:g src1:b len:12 clob:c
lcall: dest:l len:16 clob:c
-lcall_reg: dest:l src1:i len:8 clob:c
+lcall_reg: dest:l src1:i len:16 clob:c
lcall_membase: dest:l src1:b len:12 clob:c
vcall: len:16 clob:c
-vcall_reg: src1:i len:8 clob:c
+vcall_reg: src1:i len:16 clob:c
vcall_membase: src1:b len:12 clob:c
-call_reg: dest:a src1:i len:8 clob:c
+call_reg: dest:a src1:i len:16 clob:c
call_membase: dest:a src1:b len:12 clob:c
iconst: dest:i len:8
r4const: dest:f len:12
-r8const: dest:f len:12
+r8const: dest:f len:24
label: len:0
store_membase_reg: dest:b src1:i len:12
storei1_membase_reg: dest:b src1:i len:12
call_handler: len:12
endfilter: src1:i len:32
aot_const: dest:i len:8
+load_gotaddr: dest:i len:32
+got_entry: dest:i src1:b len:32
sqrt: dest:f src1:f len:4
adc: dest:i src1:i src2:i len:4
addcc: dest:i src1:i src2:i len:4
iconst: dest:i len:20
i8const: dest:i len:20
r4const: dest:f len:12
-r8const: dest:f len:12
+r8const: dest:f len:24
label: len:0
store_membase_reg: dest:b src1:i len:12
storei1_membase_reg: dest:b src1:i len:12
call_handler: len:12
endfilter: src1:i len:20
aot_const: dest:i len:8
+load_gotaddr: dest:i len:32
+got_entry: dest:i src1:b len:32
sqrt: dest:f src1:f len:4
adc: dest:i src1:i src2:i len:4
addcc: dest:i src1:i src2:i len:4
* The first argument in r3 is the pointer to the context.
*/
gpointer
-mono_arch_get_restore_context (void)
+mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
guint8 *start, *code;
int size = MONO_PPC_32_64_CASE (128, 172) + PPC_FTNPTR_SIZE;
code = start = mono_global_codeman_reserve (size);
- code = mono_ppc_create_pre_code_ftnptr (code);
+ *ji = NULL;
+ if (!aot)
+ code = mono_ppc_create_pre_code_ftnptr (code);
restore_regs_from_context (ppc_r3, ppc_r4, ppc_r5);
/* restore also the stack pointer */
ppc_load_reg (code, ppc_sp, G_STRUCT_OFFSET (MonoContext, sc_sp), ppc_r3);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, code - start);
+
+ *code_size = code - start;
+
return start;
}
* @exc object in this case).
*/
gpointer
-mono_arch_get_call_filter (void)
+mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
guint8 *start, *code;
int alloc_size, pos, i;
int size = MONO_PPC_32_64_CASE (320, 500) + PPC_FTNPTR_SIZE;
+ *ji = NULL;
+
/* call_filter (MonoContext *ctx, unsigned long eip, gpointer exc) */
code = start = mono_global_codeman_reserve (size);
- code = mono_ppc_create_pre_code_ftnptr (code);
+ if (!aot)
+ code = mono_ppc_create_pre_code_ftnptr (code);
/* store ret addr */
ppc_mflr (code, ppc_r0);
g_assert ((code - start) < size);
mono_arch_flush_icache (start, code - start);
+
+ *code_size = code - start;
+
return start;
}
-static void
-throw_exception (MonoObject *exc, unsigned long eip, unsigned long esp, gulong *int_regs, gdouble *fp_regs, gboolean rethrow)
+void
+mono_ppc_throw_exception (MonoObject *exc, unsigned long eip, unsigned long esp, gulong *int_regs, gdouble *fp_regs, gboolean rethrow)
{
static void (*restore_context) (MonoContext *);
MonoContext ctx;
* void (*func) (guint32 ex_token, gpointer ip)
*
*/
-static gpointer
-mono_arch_get_throw_exception_generic (int size, int corlib, gboolean rethrow)
+static gpointer
+mono_arch_get_throw_exception_generic (int size, guint32 *code_size, MonoJumpInfo **ji, int corlib, gboolean rethrow, gboolean aot)
{
guint8 *start, *code;
int alloc_size, pos;
- start = mono_global_codeman_reserve (size);
+ *ji = NULL;
- code = mono_ppc_create_pre_code_ftnptr (start);
+ code = start = mono_global_codeman_reserve (size);
+ if (!aot)
+ code = mono_ppc_create_pre_code_ftnptr (code);
/* store ret addr */
if (corlib)
//ppc_break (code);
if (corlib) {
ppc_mr (code, ppc_r4, ppc_r3);
- ppc_load (code, ppc_r3, (gulong)mono_defaults.corlib);
- ppc_load_func (code, ppc_r0, mono_exception_from_token);
- ppc_mtctr (code, ppc_r0);
- ppc_bcctrl (code, PPC_BR_ALWAYS, 0);
+
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (start, code, ji, MONO_PATCH_INFO_IMAGE, mono_defaults.corlib);
+ ppc_mr (code, ppc_r3, ppc_r11);
+ code = mono_arch_emit_load_aotconst (start, code, ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_load_reg (code, ppc_r2, sizeof (gpointer), ppc_r11);
+ ppc_load_reg (code, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtctr (code, ppc_r11);
+ ppc_bcctrl (code, PPC_BR_ALWAYS, 0);
+ } else {
+ ppc_load (code, ppc_r3, (gulong)mono_defaults.corlib);
+ ppc_load_func (code, ppc_r0, mono_exception_from_token);
+ ppc_mtctr (code, ppc_r0);
+ ppc_bcctrl (code, PPC_BR_ALWAYS, 0);
+ }
}
/* call throw_exception (exc, ip, sp, int_regs, fp_regs) */
ppc_addi (code, ppc_r6, ppc_sp, pos);
ppc_li (code, ppc_r8, rethrow);
- ppc_load_func (code, ppc_r0, throw_exception);
- ppc_mtctr (code, ppc_r0);
- ppc_bcctrl (code, PPC_BR_ALWAYS, 0);
+ if (aot) {
+ // This can be called from runtime code, which can't guarantee that
+ // r30 contains the got address.
+ // So emit the got address loading code too
+ code = mono_arch_emit_load_got_addr (start, code, NULL, ji);
+ code = mono_arch_emit_load_aotconst (start, code, ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_ppc_throw_exception");
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_load_reg (code, ppc_r2, sizeof (gpointer), ppc_r11);
+ ppc_load_reg (code, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtctr (code, ppc_r11);
+ ppc_bcctrl (code, PPC_BR_ALWAYS, 0);
+ } else {
+ ppc_load_func (code, ppc_r0, mono_ppc_throw_exception);
+ ppc_mtctr (code, ppc_r0);
+ ppc_bcctrl (code, PPC_BR_ALWAYS, 0);
+ }
/* we should never reach this breakpoint */
ppc_break (code);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, code - start);
+
+ *code_size = code - start;
+
return start;
}
*
*/
gpointer
-mono_arch_get_rethrow_exception (void)
+mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
int size = MONO_PPC_32_64_CASE (132, 224) + PPC_FTNPTR_SIZE;
- return mono_arch_get_throw_exception_generic (size, FALSE, TRUE);
+ if (aot)
+ size += 64;
+ return mono_arch_get_throw_exception_generic (size, code_size, ji, FALSE, TRUE, aot);
}
+
/**
* arch_get_throw_exception:
*
* x86_call_code (code, arch_get_throw_exception ());
*
*/
-gpointer
-mono_arch_get_throw_exception (void)
+gpointer
+mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
int size = MONO_PPC_32_64_CASE (132, 224) + PPC_FTNPTR_SIZE;
- return mono_arch_get_throw_exception_generic (size, FALSE, FALSE);
+ if (aot)
+ size += 64;
+ return mono_arch_get_throw_exception_generic (size, code_size, ji, FALSE, FALSE, aot);
}
/**
* x86_call_code (code, arch_get_throw_exception_by_name ());
*
*/
-gpointer
-mono_arch_get_throw_exception_by_name (void)
+gpointer
+mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
guint8 *start, *code;
int size = 64;
+ *ji = NULL;
+
/* Not used on PPC */
start = code = mono_global_codeman_reserve (size);
ppc_break (code);
mono_arch_flush_icache (start, code - start);
+ *code_size = code - start;
return start;
}
* signature: void (*func) (guint32 ex_token, guint32 offset);
* On PPC, we pass the ip instead of the offset
*/
-gpointer
-mono_arch_get_throw_corlib_exception (void)
+gpointer
+mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
int size = MONO_PPC_32_64_CASE (168, 304) + PPC_FTNPTR_SIZE;
- return mono_arch_get_throw_exception_generic (size, TRUE, FALSE);
+ if (aot)
+ size += 64;
+ return mono_arch_get_throw_exception_generic (size, code_size, ji, TRUE, FALSE, aot);
}
/* mono_arch_find_jit_info:
void
mono_arch_sigctx_to_monoctx (void *ctx, MonoContext *mctx)
{
+#ifdef MONO_CROSS_COMPILE
+ g_assert_not_reached ();
+#else
os_ucontext *uc = ctx;
mctx->sc_ir = UCONTEXT_REG_NIP(uc);
mctx->sc_sp = UCONTEXT_REG_Rn(uc, 1);
memcpy (&mctx->regs, &UCONTEXT_REG_Rn(uc, 13), sizeof (gulong) * MONO_SAVED_GREGS);
memcpy (&mctx->fregs, &UCONTEXT_REG_FPRn(uc, 14), sizeof (double) * MONO_SAVED_FREGS);
+#endif
}
void
mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *ctx)
{
+#ifdef MONO_CROSS_COMPILE
+ g_assert_not_reached ();
+#else
os_ucontext *uc = ctx;
UCONTEXT_REG_NIP(uc) = mctx->sc_ir;
UCONTEXT_REG_Rn(uc, 1) = mctx->sc_sp;
memcpy (&UCONTEXT_REG_Rn(uc, 13), &mctx->regs, sizeof (gulong) * MONO_SAVED_GREGS);
memcpy (&UCONTEXT_REG_FPRn(uc, 14), &mctx->fregs, sizeof (double) * MONO_SAVED_FREGS);
+#endif
}
gpointer
mono_arch_ip_from_context (void *sigctx)
{
+#ifdef MONO_CROSS_COMPILE
+ g_assert_not_reached ();
+#else
os_ucontext *uc = sigctx;
return (gpointer)UCONTEXT_REG_NIP(uc);
+#endif
}
static void
void
mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
{
+#ifdef MONO_CROSS_COMPILE
+ g_assert_not_reached ();
+#else
#ifdef MONO_ARCH_USE_SIGACTION
os_ucontext *uc = (ucontext_t*)sigctx;
os_ucontext *uc_copy;
UCONTEXT_REG_Rn(uc, PPC_FIRST_ARG_REG + 1) = 0;
UCONTEXT_REG_Rn(uc, PPC_FIRST_ARG_REG + 2) = 0;
#endif
+
+#endif /* !MONO_CROSS_COMPILE */
}
gboolean
* The got_var contains the address of the Global Offset Table when AOT
* compiling.
*/
-inline static MonoInst *
+MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
#ifdef MONO_ARCH_NEED_GOT_VAR
return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
+#ifdef MONO_CROSS_COMPILE
+ MonoMethod *managed_alloc = NULL;
+#else
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
+#endif
gboolean pass_lw;
if (managed_alloc) {
if (strcmp (method->name, "InternalAllocateStr") == 0) {
MonoInst *iargs [2];
MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
+#ifdef MONO_CROSS_COMPILE
+ MonoMethod *managed_alloc = NULL;
+#else
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
+#endif
if (!managed_alloc)
return NULL;
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
break;
case CEE_LDC_R4: {
float *f;
+ gboolean use_aotconst = FALSE;
+
+#ifdef TARGET_POWERPC
+ /* FIXME: Clean this up */
+ if (cfg->compile_aot)
+ use_aotconst = TRUE;
+#endif
+
/* FIXME: we should really allocate this only late in the compilation process */
f = mono_domain_alloc (cfg->domain, sizeof (float));
CHECK_OPSIZE (5);
CHECK_STACK_OVF (1);
- MONO_INST_NEW (cfg, ins, OP_R4CONST);
- ins->type = STACK_R8;
- ins->dreg = alloc_dreg (cfg, STACK_R8);
+
+ if (use_aotconst) {
+ MonoInst *cons;
+ int dreg;
+
+ EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
+
+ dreg = alloc_freg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
+ ins->type = STACK_R8;
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_R4CONST);
+ ins->type = STACK_R8;
+ ins->dreg = alloc_dreg (cfg, STACK_R8);
+ ins->inst_p0 = f;
+ MONO_ADD_INS (bblock, ins);
+ }
++ip;
readr4 (ip, f);
- ins->inst_p0 = f;
- MONO_ADD_INS (bblock, ins);
-
ip += 4;
*sp++ = ins;
break;
}
case CEE_LDC_R8: {
double *d;
+ gboolean use_aotconst = FALSE;
+
+#ifdef TARGET_POWERPC
+ /* FIXME: Clean this up */
+ if (cfg->compile_aot)
+ use_aotconst = TRUE;
+#endif
+
/* FIXME: we should really allocate this only late in the compilation process */
d = mono_domain_alloc (cfg->domain, sizeof (double));
CHECK_OPSIZE (9);
CHECK_STACK_OVF (1);
- MONO_INST_NEW (cfg, ins, OP_R8CONST);
- ins->type = STACK_R8;
- ins->dreg = alloc_dreg (cfg, STACK_R8);
+
+ if (use_aotconst) {
+ MonoInst *cons;
+ int dreg;
+
+ EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
+
+ dreg = alloc_freg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
+ ins->type = STACK_R8;
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_R8CONST);
+ ins->type = STACK_R8;
+ ins->dreg = alloc_dreg (cfg, STACK_R8);
+ ins->inst_p0 = d;
+ MONO_ADD_INS (bblock, ins);
+ }
++ip;
readr8 (ip, d);
- ins->inst_p0 = d;
- MONO_ADD_INS (bblock, ins);
-
ip += 8;
- *sp++ = ins;
+ *sp++ = ins;
break;
}
case CEE_DUP: {
*/
ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
NULLIFY_INS (addr);
+ } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
+ ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
+ NULLIFY_INS (addr);
} else {
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
}
MONO_ADD_INS (cfg->cbb, store);
}
+#ifdef TARGET_POWERPC
+ if (cfg->compile_aot)
+ /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
+ mono_get_got_var (cfg);
+#endif
+
if (cfg->method == method && cfg->got_var)
mono_emit_load_got_addr (cfg);
#include "mini-x86.h"
#elif defined(TARGET_AMD64)
#include "mini-amd64.h"
-#elif defined(__mono_ppc__)
+#elif defined(TARGET_POWERPC)
#include "mini-ppc.h"
#elif defined(__sparc__) || defined(sparc)
#include "mini-sparc.h"
MINI_OP(OP_AMD64_SAVE_SP_TO_LMF, "amd64_save_sp_to_lmf", NONE, NONE, NONE)
#endif
-#if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__)
+#if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(TARGET_POWERPC)
MINI_OP(OP_PPC_SUBFIC, "ppc_subfic", IREG, IREG, NONE)
MINI_OP(OP_PPC_SUBFZE, "ppc_subfze", IREG, IREG, NONE)
MINI_OP(OP_CHECK_FINITE, "ppc_check_finite", NONE, IREG, NONE)
#include <mono/metadata/debug-helpers.h>
#include "mini-ppc.h"
-#ifdef __mono_ppc64__
+#ifdef TARGET_POWERPC64
#include "cpu-ppc64.h"
#else
#include "cpu-ppc.h"
} while (0)
#define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \
- MonoInst *inst; \
+ MonoInst *inst; \
MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \
inst->type = STACK_R8; \
inst->dreg = (dr); \
#define MAX_ARCH_DELEGATE_PARAMS 7
-gpointer
-mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+static gpointer
+get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len, gboolean aot)
{
guint8 *code, *start;
- /* FIXME: Support more cases */
- if (MONO_TYPE_ISSTRUCT (sig->ret))
- return NULL;
-
if (has_target) {
- static guint8* cached = NULL;
- int size = MONO_PPC_32_64_CASE (16, 20) + PPC_FTNPTR_SIZE;
- mono_mini_arch_lock ();
- if (cached) {
- mono_mini_arch_unlock ();
- return cached;
- }
+ int size = MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE;
start = code = mono_global_codeman_reserve (size);
- code = mono_ppc_create_pre_code_ftnptr (code);
+ if (!aot)
+ code = mono_ppc_create_pre_code_ftnptr (code);
/* Replace the this argument with the target */
ppc_load_reg (code, ppc_r0, G_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, size);
- cached = start;
- mono_mini_arch_unlock ();
- return cached;
} else {
- static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
int size, i;
- if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
- return NULL;
- for (i = 0; i < sig->param_count; ++i)
- if (!mono_is_regsize_var (sig->params [i]))
- return NULL;
-
- mono_mini_arch_lock ();
- code = cache [sig->param_count];
- if (code) {
- mono_mini_arch_unlock ();
- return code;
- }
-
- size = MONO_PPC_32_64_CASE (12, 16) + sig->param_count * 4 + PPC_FTNPTR_SIZE;
+ size = MONO_PPC_32_64_CASE (32, 32) + param_count * 4 + PPC_FTNPTR_SIZE;
start = code = mono_global_codeman_reserve (size);
- code = mono_ppc_create_pre_code_ftnptr (code);
+ if (!aot)
+ code = mono_ppc_create_pre_code_ftnptr (code);
ppc_load_reg (code, ppc_r0, G_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
#ifdef PPC_USES_FUNCTION_DESCRIPTOR
#endif
ppc_mtctr (code, ppc_r0);
/* slide down the arguments */
- for (i = 0; i < sig->param_count; ++i) {
+ for (i = 0; i < param_count; ++i) {
ppc_mr (code, (ppc_r3 + i), (ppc_r3 + i + 1));
}
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, size);
+ }
+
+ if (code_len)
+ *code_len = code - start;
+
+ return start;
+}
+
+GSList*
+mono_arch_get_delegate_invoke_impls (void)
+{
+ GSList *res = NULL;
+ guint8 *code;
+ guint32 code_len;
+ int i;
+
+ code = get_delegate_invoke_impl (TRUE, 0, &code_len, TRUE);
+ res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
+
+ for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
+ code = get_delegate_invoke_impl (FALSE, i, &code_len, TRUE);
+ res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
+ }
+
+ return res;
+}
+
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+{
+ guint8 *code, *start;
+
+ /* FIXME: Support more cases */
+ if (MONO_TYPE_ISSTRUCT (sig->ret))
+ return NULL;
+
+ if (has_target) {
+ static guint8* cached = NULL;
+
+ if (cached)
+ return cached;
+
+ if (mono_aot_only)
+ start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
+ else
+ start = get_delegate_invoke_impl (TRUE, 0, NULL, FALSE);
+
+ mono_memory_barrier ();
+
+ cached = start;
+ } else {
+ static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
+ int i;
+
+ if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
+ return NULL;
+ for (i = 0; i < sig->param_count; ++i)
+ if (!mono_is_regsize_var (sig->params [i]))
+ return NULL;
+
+
+ code = cache [sig->param_count];
+ if (code)
+ return code;
+
+ if (mono_aot_only) {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
+ start = mono_aot_get_named_code (name);
+ g_free (name);
+ } else {
+ start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL, FALSE);
+ }
+
+ mono_memory_barrier ();
+
cache [sig->param_count] = start;
- mono_mini_arch_unlock ();
- return start;
}
- return NULL;
+ return start;
}
gpointer
void
mono_arch_flush_icache (guint8 *code, gint size)
{
+#ifdef MONO_CROSS_COMPILE
+#else
register guint8 *p;
guint8 *endp, *start;
static int cachelinesize = 0;
asm ("sync");
asm ("isync");
#endif
+#endif
}
void
case MONO_TYPE_U8:
case MONO_TYPE_I8:
cinfo->args [n].size = 8;
- add_general (&gr, &stack_size, cinfo->args + n, sizeof (gpointer) == 8);
+ add_general (&gr, &stack_size, cinfo->args + n, SIZEOF_REGISTER == 8);
n++;
break;
case MONO_TYPE_R4:
cinfo->args [n].reg = fr;
fr ++;
FP_ALSO_IN_REG (gr ++);
- ALWAYS_ON_STACK (stack_size += sizeof (gpointer));
+ ALWAYS_ON_STACK (stack_size += SIZEOF_REGISTER);
} else {
cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size + MONO_PPC_32_64_CASE (0, 4);
cinfo->args [n].regtype = RegTypeBase;
cinfo->args [n].reg = ppc_sp; /* in the caller*/
- stack_size += sizeof (gpointer);
+ stack_size += SIZEOF_REGISTER;
}
n++;
break;
cinfo->args [n].regtype = RegTypeFP;
cinfo->args [n].reg = fr;
fr ++;
- FP_ALSO_IN_REG (gr += sizeof (double) / sizeof (gpointer));
+ FP_ALSO_IN_REG (gr += sizeof (double) / SIZEOF_REGISTER);
ALWAYS_ON_STACK (stack_size += 8);
} else {
cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
{
int sig_reg = mono_alloc_ireg (cfg);
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+
MONO_EMIT_NEW_ICONST (cfg, sig_reg, (gulong)call->signature);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG,
ppc_r1, cinfo->sig_cookie.offset, sig_reg);
goto loop_start; /* make it handle the possibly big ins->inst_offset */
case OP_R8CONST:
case OP_R4CONST:
+ if (cfg->compile_aot) {
+ /* Keep these in the aot case */
+ break;
+ }
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = (gulong)ins->inst_p0;
temp->dreg = mono_alloc_ireg (cfg);
ppc_patch_full (code, target, FALSE);
}
+void
+mono_ppc_patch (guchar *code, const guchar *target)
+{
+ ppc_patch (code, target);
+}
+
static guint8*
emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
{
CASE_PPC64 (OP_I8CONST)
ppc_load (code, ins->dreg, ins->inst_c0);
break;
+ case OP_LOAD_GOTADDR:
+ /* The PLT implementation depends on this */
+ g_assert (ins->dreg == ppc_r30);
+
+ code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
+ break;
+ case OP_GOT_ENTRY:
+ // FIXME: Fix max instruction length
+ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
+ /* arch_emit_got_access () patches this */
+ ppc_load32 (code, ppc_r0, 0);
+ ppc_load_reg_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
+ break;
case OP_AOTCONST:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
ppc_load_sequence (code, ins->dreg, 0);
}
ppc_mr (code, ppc_sp, ppc_r11);
mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
- ppc_b (code, 0);
+ if (cfg->compile_aot) {
+ /* arch_emit_got_access () patches this */
+ ppc_load32 (code, ppc_r0, 0);
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_load_reg_indexed (code, ppc_r11, ppc_r30, ppc_r0);
+ ppc_load_reg (code, ppc_r0, 0, ppc_r11);
+#else
+ ppc_load_reg_indexed (code, ppc_r0, ppc_r30, ppc_r0);
+#endif
+ ppc_mtctr (code, ppc_r0);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+ } else {
+ ppc_b (code, 0);
+ }
break;
}
case OP_CHECK_THIS:
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
else
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
- if (FORCE_INDIR_CALL || cfg->method->dynamic) {
+ if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
ppc_load_func (code, ppc_r0, 0);
ppc_mtlr (code, ppc_r0);
ppc_blrl (code);
ppc_mr (code, ppc_r3, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
- if (FORCE_INDIR_CALL || cfg->method->dynamic) {
+ if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
ppc_load_func (code, ppc_r0, 0);
ppc_mtlr (code, ppc_r0);
ppc_blrl (code);
ppc_mr (code, ppc_r3, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
- if (FORCE_INDIR_CALL || cfg->method->dynamic) {
+ if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
ppc_load_func (code, ppc_r0, 0);
ppc_mtlr (code, ppc_r0);
ppc_blrl (code);
/* floating point opcodes */
case OP_R8CONST:
+ g_assert (cfg->compile_aot);
+
+ /* FIXME: Optimize this */
+ ppc_bl (code, 1);
+ ppc_mflr (code, ppc_r11);
+ ppc_b (code, 3);
+ *(double*)code = *(double*)ins->inst_p0;
+ code += 8;
+ ppc_lfd (code, ins->dreg, 8, ppc_r11);
+ break;
case OP_R4CONST:
g_assert_not_reached ();
+ break;
case OP_STORER8_MEMBASE_REG:
if (ppc_is_imm16 (ins->inst_offset)) {
ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
void
mono_arch_register_lowlevel_calls (void)
{
+ /* The signature doesn't matter */
+ mono_register_jit_icall (mono_ppc_throw_exception, "mono_ppc_throw_exception", mono_create_icall_signature ("void"), TRUE);
}
#ifdef __mono_ppc64__
mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
{
MonoJumpInfo *patch_info;
+ gboolean compile_aot = !run_cctors;
for (patch_info = ji; patch_info; patch_info = patch_info->next) {
unsigned char *ip = patch_info->ip.i + code;
target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
+ if (compile_aot) {
+ switch (patch_info->type) {
+ case MONO_PATCH_INFO_BB:
+ case MONO_PATCH_INFO_LABEL:
+ break;
+ default:
+ /* No need to patch these */
+ continue;
+ }
+ }
+
switch (patch_info->type) {
case MONO_PATCH_INFO_IP:
patch_load_sequence (ip, ip);
if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
ppc_load (code, ppc_r3, cfg->domain);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
- if (FORCE_INDIR_CALL || cfg->method->dynamic) {
+ if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
ppc_load_func (code, ppc_r0, 0);
ppc_mtlr (code, ppc_r0);
ppc_blrl (code);
} else {
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_get_lmf_addr");
- if (FORCE_INDIR_CALL || cfg->method->dynamic) {
+ if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
ppc_load_func (code, ppc_r0, 0);
ppc_mtlr (code, ppc_r0);
ppc_blrl (code);
/* *(lmf_addr) = r11 */
ppc_store_reg (code, ppc_r11, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
/* save method info */
- ppc_load (code, ppc_r0, method);
+ if (cfg->compile_aot)
+ // FIXME:
+ ppc_load (code, ppc_r0, 0);
+ else
+ ppc_load (code, ppc_r0, method);
ppc_store_reg (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r11);
ppc_store_reg (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r11);
/* save the current IP */
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
+ if (cfg->compile_aot) {
+ ppc_bl (code, 1);
+ ppc_mflr (code, ppc_r0);
+ } else {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
#ifdef __mono_ppc64__
- ppc_load_sequence (code, ppc_r0, (gulong)0x0101010101010101L);
+ ppc_load_sequence (code, ppc_r0, (gulong)0x0101010101010101L);
#else
- ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L);
+ ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L);
#endif
+ }
ppc_store_reg (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r11);
}
* not used as the vtable register in interface calls.
*/
ppc_store_reg (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
- ppc_load (code, ppc_r11, (gulong)(& (vtable->vtable [0])));
+ ppc_load (code, ppc_r11, (gsize)(& (vtable->vtable [0])));
}
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done) {
- ppc_load (code, ppc_r0, (gulong)item->key);
+ ppc_load (code, ppc_r0, (gsize)item->key);
ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
}
item->jmp_code = code;
return (gpointer)ctx->regs [reg - ppc_r13];
}
+
+guint32
+mono_arch_get_patch_offset (guint8 *code)
+{
+ return 0;
+}
+
+/*
+ * mono_aot_emit_load_got_addr:
+ *
+ * Emit code to load the got address.
+ * On PPC, the result is placed into r30.
+ */
+guint8*
+mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji)
+{
+ ppc_bl (code, 1);
+ ppc_mflr (code, ppc_r30);
+ if (cfg)
+ mono_add_patch_info (cfg, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
+ else
+ *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
+ /* arch_emit_got_address () patches this */
+#if defined(TARGET_POWERPC64)
+ ppc_nop (code);
+ ppc_nop (code);
+ ppc_nop (code);
+ ppc_nop (code);
+#else
+ ppc_load32 (code, ppc_r0, 0);
+ ppc_add (code, ppc_r30, ppc_r30, ppc_r0);
+#endif
+
+ return code;
+}
+
+/*
+ * mono_ppc_emit_load_aotconst:
+ *
+ * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
+ * TARGET from the mscorlib GOT in full-aot code.
+ * On PPC, the GOT address is assumed to be in r30, and the result is placed into
+ * r11.
+ */
+guint8*
+mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target)
+{
+ /* Load the mscorlib got address */
+ ppc_load_reg (code, ppc_r11, sizeof (gpointer), ppc_r30);
+ *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
+ /* arch_emit_got_access () patches this */
+ ppc_load32 (code, ppc_r0, 0);
+ ppc_load_reg_indexed (code, ppc_r11, ppc_r11, ppc_r0);
+
+ return code;
+}
int fp_conv_var_offset;
} MonoCompileArch;
+/*
+ * ILP32 uses a version of the ppc64 abi with sizeof(void*)==sizeof(long)==4.
+ * To support this, code which needs the size of a pointer needs to use
+ * sizeof (gpointer), while code which needs the size of a register/stack slot
+ * needs to use SIZEOF_REGISTER.
+ */
+
#ifdef __mono_ppc64__
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS
#define MONO_ARCH_HAVE_ATOMIC_ADD 1
#define PPC_USES_FUNCTION_DESCRIPTOR
+
+#ifndef __mono_ilp32__
#define MONO_ARCH_HAVE_TLS_GET 1
#define MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH 1
+#endif
+
#else /* must be __mono_ppc__ */
+
#if 0
/* enabling this for PPC32 causes hangs in the thread/delegate tests.
So disable for now. */
#define MONO_ARCH_HAVE_THROW_CORLIB_EXCEPTION 1
#define MONO_ARCH_HAVE_STATIC_RGCTX_TRAMPOLINE 1
+#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_NEED_DIV_CHECK 1
+#define MONO_ARCH_AOT_SUPPORTED 1
+#define MONO_ARCH_NEED_GOT_VAR 1
#define PPC_NUM_REG_ARGS (PPC_LAST_ARG_REG-PPC_FIRST_ARG_REG+1)
#define PPC_NUM_REG_FPARGS (PPC_LAST_FPARG_REG-PPC_FIRST_FPARG_REG+1)
#define MONO_CONTEXT_GET_BP(ctx) ((gpointer)((ctx)->regs [ppc_r31-13]))
#define MONO_CONTEXT_GET_SP(ctx) ((gpointer)((ctx)->sc_sp))
-#ifdef __APPLE__
+#ifdef MONO_CROSS_COMPILE
+
+typedef struct {
+ unsigned long sp;
+ unsigned long unused1;
+ unsigned long lr;
+} MonoPPCStackFrame;
+
+#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,start_func) g_assert_not_reached ()
+
+#elif defined(__APPLE__)
typedef struct {
unsigned long sp;
#include "mini-ppc-os.h"
#endif
+void
+mono_ppc_patch (guchar *code, const guchar *target) MONO_INTERNAL;
+
+void
+mono_ppc_throw_exception (MonoObject *exc, unsigned long eip, unsigned long esp, gulong *int_regs, gdouble *fp_regs, gboolean rethrow) MONO_INTERNAL;
+
#ifdef __mono_ppc64__
#define MONO_PPC_32_64_CASE(c32,c64) c64
extern void mono_ppc_emitted (guint8 *code, ssize_t length, const char *format, ...);
#define MONO_PPC_32_64_CASE(c32,c64) c32
#endif
-extern gboolean mono_ppc_is_direct_call_sequence (guint32 *code);
+gboolean mono_ppc_is_direct_call_sequence (guint32 *code) MONO_INTERNAL;
+
+void mono_ppc_patch_plt_entry (guint8 *code, gpointer *got, gssize *regs, guint8 *addr) MONO_INTERNAL;
#endif /* __MONO_MINI_PPC_H__ */
/* Avoid loading metadata or creating a generic vtable if possible */
addr = mono_aot_get_method_from_vt_slot (mono_domain_get (), vt, displacement);
+ if (addr)
+ addr = mono_create_ftnptr (mono_domain_get (), addr);
if (addr && !vt->klass->valuetype) {
vtable_slot = mono_get_vcall_slot_addr (code, (gpointer*)regs);
if (mono_aot_is_got_entry (code, (guint8*)vtable_slot) || mono_domain_owns_vtable_slot (mono_domain_get (), vtable_slot)) {
return mono_magic_trampoline (regs, code, method, tramp);
}
+ addr = mono_create_ftnptr (mono_domain_get (), addr);
+
vtable_slot = mono_get_vcall_slot_addr (code, (gpointer*)regs);
g_assert (!vtable_slot);
#define JIT_RUNTIME_WORKS
#ifdef JIT_RUNTIME_WORKS
mono_install_runtime_cleanup ((MonoDomainFunc)mini_cleanup);
+#ifndef MONO_CROSS_COMPILE
mono_runtime_init (domain, mono_thread_start_cb, mono_thread_attach_cb);
+#endif
mono_thread_attach (domain);
#endif
#ifndef DISABLE_COM
cominterop_release_all_rcws ();
#endif
-
+
+#ifndef MONO_CROSS_COMPILE
/*
* mono_runtime_cleanup() and mono_domain_finalize () need to
* be called early since they need the execution engine still
* and mono_runtime_cleanup will wait for other threads to finish).
*/
mono_domain_finalize (domain, 2000);
+#endif
/* This accesses metadata so needs to be called before runtime shutdown */
print_jit_stats ();
+#ifndef MONO_CROSS_COMPILE
mono_runtime_cleanup (domain);
+#endif
mono_profiler_shutdown ();
void mono_print_ins (MonoInst *ins) MONO_INTERNAL;
gboolean mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method) MONO_INTERNAL;
gboolean mini_method_verify (MonoCompile *cfg, MonoMethod *method) MONO_INTERNAL;
+MonoInst *mono_get_got_var (MonoCompile *cfg) MONO_INTERNAL;
gboolean mini_class_is_system_array (MonoClass *klass) MONO_INTERNAL;
MonoMethodSignature *mono_get_element_address_signature (int arity) MONO_INTERNAL;
void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins) MONO_INTERNAL;
GSList* mono_arch_get_delegate_invoke_impls (void) MONO_INTERNAL;
LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) MONO_INTERNAL;
+guint8* mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji) MONO_INTERNAL;
+guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target) MONO_INTERNAL;
MonoJitInfo *mono_arch_find_jit_info (MonoDomain *domain,
MonoJitTlsData *jit_tls,
PATCH_INFO(GENERIC_CLASS_INIT, "generic_class_init")
PATCH_INFO(MONITOR_ENTER, "monitor_enter")
PATCH_INFO(MONITOR_EXIT, "monitor_exit")
+PATCH_INFO(MSCORLIB_GOT_ADDR, "mscorlib_got_addr")
PATCH_INFO(NONE, "none")
static guint8* nullified_class_init_trampoline;
+/* Same as mono_create_ftnptr, but doesn't require a domain */
+static gpointer
+mono_ppc_create_ftnptr (guint8 *code)
+{
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ MonoPPCFunctionDescriptor *ftnptr = mono_global_codeman_reserve (sizeof (MonoPPCFunctionDescriptor));
+
+ ftnptr->code = code;
+ ftnptr->toc = NULL;
+ ftnptr->env = NULL;
+
+ return ftnptr;
+#else
+ return code;
+#endif
+}
+
/*
* Return the instruction to jump from code to target, 0 if not
* reachable with a single instruction
/* Jump-specific trampoline code fragment size */
#define JUMP_TRAMPOLINE_SIZE 64
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+#define PPC_TOC_REG ppc_r2
+#else
+#define PPC_TOC_REG -1
+#endif
+
/*
* Stack frame description when the generic trampoline is called.
* caller frame
* -------------------
* Saved FP registers 0-13
* -------------------
- * Saved general registers 0-12
+ * Saved general registers 0-30
* -------------------
* param area for 3 args to ppc_magic_trampoline
* -------------------
* -------------------
*/
guchar*
-mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
+mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, GSList **out_unwind_ops, gboolean aot)
{
guint8 *buf, *code = NULL;
int i, offset;
gconstpointer tramp_handler;
- int size = MONO_PPC_32_64_CASE (516, 692);
+ int size = MONO_PPC_32_64_CASE (600, 800);
/* Now we'll create in 'buf' the PowerPC trampoline code. This
is the trampoline code common to all methods */
code = buf = mono_global_codeman_reserve (size);
+ *ji = NULL;
+ *out_unwind_ops = NULL;
+
ppc_store_reg_update (buf, ppc_r1, -STACK, ppc_r1);
/* start building the MonoLMF on the stack */
ppc_store_multiple_regs (buf, ppc_r13, offset, ppc_r1);
/* Now save the rest of the registers below the MonoLMF struct, first 14
- * fp regs and then the 13 gregs.
+ * fp regs and then the 31 gregs.
*/
offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double));
for (i = 0; i < 14; i++) {
ppc_stfd (buf, i, offset, ppc_r1);
offset += sizeof (double);
}
-#define GREGS_OFFSET (STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (13 * sizeof (gulong)))
+#define GREGS_OFFSET (STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (31 * sizeof (gulong)))
offset = GREGS_OFFSET;
- for (i = 0; i < 13; i++) {
+ for (i = 0; i < 31; i++) {
ppc_store_reg (buf, i, offset, ppc_r1);
offset += sizeof (gulong);
}
+
/* we got here through a jump to the ctr reg, we must save the lr
* in the parent frame (we do it here to reduce the size of the
* method-specific trampoline)
/* ok, now we can continue with the MonoLMF setup, mostly untouched
* from emit_prolog in mini-ppc.c
*/
- ppc_load_func (buf, ppc_r0, mono_get_lmf_addr);
- ppc_mtlr (buf, ppc_r0);
- ppc_blrl (buf);
+ if (aot) {
+ buf = mono_arch_emit_load_aotconst (code, buf, ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_load_reg (buf, ppc_r2, sizeof (gpointer), ppc_r11);
+ ppc_load_reg (buf, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtlr (buf, ppc_r11);
+ ppc_blrl (buf);
+ } else {
+ ppc_load_func (buf, ppc_r0, mono_get_lmf_addr);
+ ppc_mtlr (buf, ppc_r0);
+ ppc_blrl (buf);
+ }
/* we build the MonoLMF structure on the stack - see mini-ppc.h
* The pointer to the struct is put in ppc_r11.
*/
/* Arg 3: MonoMethod *method. It was put in r5 already above */
/*ppc_mr (buf, ppc_r5, ppc_r5);*/
- tramp_handler = mono_get_trampoline_func (tramp_type);
- ppc_load_func (buf, ppc_r0, tramp_handler);
- ppc_mtlr (buf, ppc_r0);
- ppc_blrl (buf);
+ if (aot) {
+ buf = mono_arch_emit_load_aotconst (code, buf, ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("trampoline_func_%d", tramp_type));
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_load_reg (buf, ppc_r2, sizeof (gpointer), ppc_r11);
+ ppc_load_reg (buf, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtlr (buf, ppc_r11);
+ ppc_blrl (buf);
+ } else {
+ tramp_handler = mono_get_trampoline_func (tramp_type);
+ ppc_load_func (buf, ppc_r0, tramp_handler);
+ ppc_mtlr (buf, ppc_r0);
+ ppc_blrl (buf);
+ }
/* OK, code address is now on r3. Move it to the counter reg
* so it will be ready for the final jump: this is safe since we
*/
if (!MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_load_reg (buf, ppc_r2, sizeof (gpointer), ppc_r3);
ppc_load_reg (buf, ppc_r3, 0, ppc_r3);
#endif
ppc_mtctr (buf, ppc_r3);
ppc_lfd (buf, i, offset, ppc_r1);
offset += sizeof (double);
}
- offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (13 * sizeof (gulong));
+ offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (31 * sizeof (gulong));
ppc_load_reg (buf, ppc_r0, offset, ppc_r1);
offset += 2 * sizeof (gulong);
for (i = 2; i < 13; i++) {
- if (i != 3 || tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
+ if (i != PPC_TOC_REG && (i != 3 || tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH))
ppc_load_reg (buf, i, offset, ppc_r1);
offset += sizeof (gulong);
}
/* Flush instruction cache, since we've generated code */
mono_arch_flush_icache (code, buf - code);
+ *code_size = buf - code;
+
/* Sanity check */
g_assert ((buf - code) <= size);
if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
guint32 code_len;
- /* Initialize the nullified class init trampoline used in the AOT case */
- nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
+ /* Initialize the nullified class init trampoline */
+ nullified_class_init_trampoline = mono_ppc_create_ftnptr (mono_arch_get_nullified_class_init_trampoline (&code_len));
}
return code;
gpointer
mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
+{
+ guint32 code_size;
+ MonoJumpInfo *ji;
+
+ return mono_arch_create_rgctx_lazy_fetch_trampoline_full (slot, &code_size, &ji, FALSE);
+}
+
+gpointer
+mono_arch_create_rgctx_lazy_fetch_trampoline_full (guint32 slot, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
#ifdef MONO_ARCH_VTABLE_REG
guint8 *tramp;
int i;
gboolean mrgctx;
+ *ji = NULL;
+
mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
index = MONO_RGCTX_SLOT_INDEX (slot);
if (mrgctx)
tramp_size += 4;
else
tramp_size += 12;
+ if (aot)
+ tramp_size += 32;
code = buf = mono_global_codeman_reserve (tramp_size);
/* move the rgctx pointer to the VTABLE register */
ppc_mr (code, MONO_ARCH_VTABLE_REG, ppc_r3);
- tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot),
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
+ /* Branch to the trampoline */
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_load_reg (code, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtctr (code, ppc_r11);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+ } else {
+ tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot),
MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
- /* jump to the actual trampoline */
- code = emit_trampoline_jump (code, tramp);
+ /* jump to the actual trampoline */
+ code = emit_trampoline_jump (code, tramp);
+ }
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
+ *code_size = code - buf;
+
return buf;
#else
g_assert_not_reached ();
gpointer
mono_arch_create_generic_class_init_trampoline (void)
+{
+ guint32 code_size;
+ MonoJumpInfo *ji;
+
+ return mono_arch_create_generic_class_init_trampoline_full (&code_size, &ji, FALSE);
+}
+
+gpointer
+mono_arch_create_generic_class_init_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
guint8 *tramp;
guint8 *code, *buf;
int tramp_size;
tramp_size = MONO_PPC_32_64_CASE (32, 44);
+ if (aot)
+ tramp_size += 32;
code = buf = mono_global_codeman_reserve (tramp_size);
+ *ji = NULL;
+
if (byte_offset < 0)
mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
ppc_patch (jump, code);
- tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT,
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
+ /* Branch to the trampoline */
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_load_reg (code, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtctr (code, ppc_r11);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+ } else {
+ tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT,
mono_get_root_domain (), NULL);
- /* jump to the actual trampoline */
- code = emit_trampoline_jump (code, tramp);
+ /* jump to the actual trampoline */
+ code = emit_trampoline_jump (code, tramp);
+ }
mono_arch_flush_icache (buf, code - buf);
+ *code_size = code - buf;
+
g_assert (code - buf <= tramp_size);
return buf;
guint32 tramp_size = 64;
code = buf = mono_global_codeman_reserve (tramp_size);
- code = mono_ppc_create_pre_code_ftnptr (code);
ppc_blr (code);
mono_arch_flush_icache (buf, code - buf);