static guint8* nullified_class_init_trampoline;
+/* Same as mono_create_ftnptr, but doesn't require a domain */
+static gpointer
+mono_ppc_create_ftnptr (guint8 *code)
+{
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ MonoPPCFunctionDescriptor *ftnptr = mono_global_codeman_reserve (sizeof (MonoPPCFunctionDescriptor));
+
+ ftnptr->code = code;
+ ftnptr->toc = NULL;
+ ftnptr->env = NULL;
+
+ return ftnptr;
+#else
+ return code;
+#endif
+}
+
/*
* Return the instruction to jump from code to target, 0 if not
* reachable with a single instruction
/*
* get_unbox_trampoline:
- * @gsctx: the generic sharing context
* @m: method pointer
* @addr: pointer to native code for @m
*
* unboxing before calling the method
*/
gpointer
-mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
+mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
{
guint8 *code, *start;
int this_pos = 3;
addr = mono_get_addr_from_ftnptr (addr);
- if (MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
- this_pos = 4;
-
mono_domain_lock (domain);
start = code = mono_domain_code_reserve (domain, size);
code = mono_ppc_create_pre_code_ftnptr (code);
ppc_addi (code, this_pos, this_pos, sizeof (MonoObject));
ppc_emit32 (code, short_branch);
} else {
- ppc_load (code, ppc_r0, addr);
+ ppc_load_ptr (code, ppc_r0, addr);
ppc_mtctr (code, ppc_r0);
ppc_addi (code, this_pos, this_pos, sizeof (MonoObject));
ppc_bcctr (code, 20, 0);
/* Compute size of code needed to emit mrgctx */
p = imm_buf;
- ppc_load (p, MONO_ARCH_RGCTX_REG, mrgctx);
+ ppc_load_ptr (p, MONO_ARCH_RGCTX_REG, mrgctx);
imm_size = p - imm_buf;
mono_domain_lock (domain);
mono_domain_unlock (domain);
if (short_branch) {
- ppc_load (code, MONO_ARCH_RGCTX_REG, mrgctx);
+ ppc_load_ptr (code, MONO_ARCH_RGCTX_REG, mrgctx);
ppc_emit32 (code, short_branch);
} else {
- ppc_load (code, ppc_r0, addr);
+ ppc_load_ptr (code, ppc_r0, addr);
ppc_mtctr (code, ppc_r0);
- ppc_load (code, MONO_ARCH_RGCTX_REG, mrgctx);
+ ppc_load_ptr (code, MONO_ARCH_RGCTX_REG, mrgctx);
ppc_bcctr (code, 20, 0);
}
mono_arch_flush_icache (start, code - start);
}
void
-mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
+mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
{
- g_assert_not_reached ();
+ guint32 ins1, ins2, offset;
+
+ /* Patch the jump table entry used by the plt entry */
+
+ /* Should be a lis+ori */
+ ins1 = ((guint32*)code)[0];
+ g_assert (ins1 >> 26 == 15);
+ ins2 = ((guint32*)code)[1];
+ g_assert (ins2 >> 26 == 24);
+ offset = ((ins1 & 0xffff) << 16) | (ins2 & 0xffff);
+
+ /* Either got or regs is set */
+ if (!got)
+ got = (gpointer*)(gsize) regs [30];
+ *(guint8**)((guint8*)got + offset) = addr;
}
void
-mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
+mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
{
mono_arch_patch_callsite (NULL, code, nullified_class_init_trampoline);
}
void
-mono_arch_nullify_plt_entry (guint8 *code)
+mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs)
{
- g_assert_not_reached ();
+ if (mono_aot_only && !nullified_class_init_trampoline)
+ nullified_class_init_trampoline = mono_aot_get_trampoline ("nullified_class_init_trampoline");
+
+ mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline);
}
/* Stack size for trampoline function
* PPC_MINIMAL_STACK_SIZE + 16 (args + alignment to ppc_magic_trampoline)
* + MonoLMF + 14 fp regs + 13 gregs + alignment
- * #define STACK (PPC_MINIMAL_STACK_SIZE + 4 * sizeof (gulong) + sizeof (MonoLMF) + 14 * sizeof (double) + 13 * (sizeof (gulong)))
- * STACK would be 444 for 32 bit darwin
*/
-#ifdef __mono_ppc64__
-#define STACK (PPC_MINIMAL_STACK_SIZE + 4 * sizeof (gulong) + sizeof (MonoLMF) + 14 * sizeof (double) + 13 * sizeof (gulong))
-#else
-#define STACK (448)
-#endif
+#define STACK (((PPC_MINIMAL_STACK_SIZE + 4 * sizeof (mgreg_t) + sizeof (MonoLMF) + 14 * sizeof (double) + 31 * sizeof (mgreg_t)) + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~(MONO_ARCH_FRAME_ALIGNMENT - 1))
/* Method-specific trampoline code fragment size */
#define METHOD_TRAMPOLINE_SIZE 64
/* Jump-specific trampoline code fragment size */
#define JUMP_TRAMPOLINE_SIZE 64
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+#define PPC_TOC_REG ppc_r2
+#else
+#define PPC_TOC_REG -1
+#endif
+
/*
* Stack frame description when the generic trampoline is called.
* caller frame
* -------------------
* Saved FP registers 0-13
* -------------------
- * Saved general registers 0-12
+ * Saved general registers 0-30
* -------------------
* param area for 3 args to ppc_magic_trampoline
* -------------------
* -------------------
*/
guchar*
-mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
+mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
{
+
guint8 *buf, *code = NULL;
int i, offset;
gconstpointer tramp_handler;
- int size = MONO_PPC_32_64_CASE (516, 692);
+ int size = MONO_PPC_32_64_CASE (600, 800);
+ GSList *unwind_ops = NULL;
+ MonoJumpInfo *ji = NULL;
/* Now we'll create in 'buf' the PowerPC trampoline code. This
is the trampoline code common to all methods */
code = buf = mono_global_codeman_reserve (size);
- ppc_store_reg_update (buf, ppc_r1, -STACK, ppc_r1);
+ ppc_str_update (code, ppc_r1, -STACK, ppc_r1);
/* start building the MonoLMF on the stack */
offset = STACK - sizeof (double) * MONO_SAVED_FREGS;
for (i = 14; i < 32; i++) {
- ppc_stfd (buf, i, offset, ppc_r1);
+ ppc_stfd (code, i, offset, ppc_r1);
offset += sizeof (double);
}
/*
* now the integer registers.
*/
offset = STACK - sizeof (MonoLMF) + G_STRUCT_OFFSET (MonoLMF, iregs);
- ppc_store_multiple_regs (buf, ppc_r13, offset, ppc_r1);
+ ppc_str_multiple (code, ppc_r13, offset, ppc_r1);
/* Now save the rest of the registers below the MonoLMF struct, first 14
- * fp regs and then the 13 gregs.
+ * fp regs and then the 31 gregs.
*/
offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double));
for (i = 0; i < 14; i++) {
- ppc_stfd (buf, i, offset, ppc_r1);
+ ppc_stfd (code, i, offset, ppc_r1);
offset += sizeof (double);
}
-#define GREGS_OFFSET (STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (13 * sizeof (gulong)))
+#define GREGS_OFFSET (STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (31 * sizeof (mgreg_t)))
offset = GREGS_OFFSET;
- for (i = 0; i < 13; i++) {
- ppc_store_reg (buf, i, offset, ppc_r1);
- offset += sizeof (gulong);
+ for (i = 0; i < 31; i++) {
+ ppc_str (code, i, offset, ppc_r1);
+ offset += sizeof (mgreg_t);
}
+
/* we got here through a jump to the ctr reg, we must save the lr
* in the parent frame (we do it here to reduce the size of the
* method-specific trampoline)
*/
- ppc_mflr (buf, ppc_r0);
- ppc_store_reg (buf, ppc_r0, STACK + PPC_RET_ADDR_OFFSET, ppc_r1);
+ ppc_mflr (code, ppc_r0);
+ ppc_str (code, ppc_r0, STACK + PPC_RET_ADDR_OFFSET, ppc_r1);
/* ok, now we can continue with the MonoLMF setup, mostly untouched
* from emit_prolog in mini-ppc.c
*/
- ppc_load_func (buf, ppc_r0, mono_get_lmf_addr);
- ppc_mtlr (buf, ppc_r0);
- ppc_blrl (buf);
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_ldptr (code, ppc_r2, sizeof (gpointer), ppc_r11);
+ ppc_ldptr (code, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtlr (code, ppc_r11);
+ ppc_blrl (code);
+ } else {
+ ppc_load_func (code, ppc_r0, mono_get_lmf_addr);
+ ppc_mtlr (code, ppc_r0);
+ ppc_blrl (code);
+ }
/* we build the MonoLMF structure on the stack - see mini-ppc.h
* The pointer to the struct is put in ppc_r11.
*/
- ppc_addi (buf, ppc_r11, ppc_sp, STACK - sizeof (MonoLMF));
- ppc_store_reg (buf, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
+ ppc_addi (code, ppc_r11, ppc_sp, STACK - sizeof (MonoLMF));
+ ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
/* new_lmf->previous_lmf = *lmf_addr */
- ppc_load_reg (buf, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
- ppc_store_reg (buf, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
+ ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
/* *(lmf_addr) = r11 */
- ppc_store_reg (buf, ppc_r11, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
- /* save method info (it's stored on the stack, so get it first and put it
- * in r5 as it's the third argument to the function)
- */
- if (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT)
- ppc_load_reg (buf, ppc_r5, GREGS_OFFSET + PPC_FIRST_ARG_REG * sizeof (gpointer), ppc_r1);
- else
- ppc_load_reg (buf, ppc_r5, GREGS_OFFSET, ppc_r1);
- if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP))
- ppc_store_reg (buf, ppc_r5, G_STRUCT_OFFSET(MonoLMF, method), ppc_r11);
+ ppc_stptr (code, ppc_r11, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
+ /* save method info (it's stored on the stack, so get it first). */
+ if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP)) {
+ ppc_ldr (code, ppc_r0, GREGS_OFFSET, ppc_r1);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r11);
+ } else {
+ ppc_load (code, ppc_r0, 0);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r11);
+ }
/* store the frame pointer of the calling method */
- ppc_addi (buf, ppc_r0, ppc_sp, STACK);
- ppc_store_reg (buf, ppc_r0, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r11);
+ ppc_addi (code, ppc_r0, ppc_sp, STACK);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r11);
/* save the IP (caller ip) */
if (tramp_type == MONO_TRAMPOLINE_JUMP) {
- ppc_li (buf, ppc_r0, 0);
+ ppc_li (code, ppc_r0, 0);
} else {
- ppc_load_reg (buf, ppc_r0, STACK + PPC_RET_ADDR_OFFSET, ppc_r1);
+ ppc_ldr (code, ppc_r0, STACK + PPC_RET_ADDR_OFFSET, ppc_r1);
}
- ppc_store_reg (buf, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r11);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r11);
/*
- * Now we're ready to call trampoline (gssize *regs, guint8 *code, gpointer value, guint8 *tramp)
+ * Now we're ready to call trampoline (mgreg_t *regs, guint8 *code, gpointer value, guint8 *tramp)
* Note that the last argument is unused.
*/
/* Arg 1: a pointer to the registers */
- ppc_addi (buf, ppc_r3, ppc_r1, GREGS_OFFSET);
+ ppc_addi (code, ppc_r3, ppc_r1, GREGS_OFFSET);
/* Arg 2: code (next address to the instruction that called us) */
if (tramp_type == MONO_TRAMPOLINE_JUMP)
- ppc_li (buf, ppc_r4, 0);
+ ppc_li (code, ppc_r4, 0);
else
- ppc_load_reg (buf, ppc_r4, STACK + PPC_RET_ADDR_OFFSET, ppc_r1);
+ ppc_ldr (code, ppc_r4, STACK + PPC_RET_ADDR_OFFSET, ppc_r1);
- /* Arg 3: MonoMethod *method. It was put in r5 already above */
- /*ppc_mr (buf, ppc_r5, ppc_r5);*/
+ /* Arg 3: trampoline argument */
+ if (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT)
+ ppc_ldr (code, ppc_r5, GREGS_OFFSET + MONO_ARCH_VTABLE_REG * sizeof (mgreg_t), ppc_r1);
+ else
+ ppc_ldr (code, ppc_r5, GREGS_OFFSET, ppc_r1);
- tramp_handler = mono_get_trampoline_func (tramp_type);
- ppc_load_func (buf, ppc_r0, tramp_handler);
- ppc_mtlr (buf, ppc_r0);
- ppc_blrl (buf);
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("trampoline_func_%d", tramp_type));
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_ldptr (code, ppc_r2, sizeof (gpointer), ppc_r11);
+ ppc_ldptr (code, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtlr (code, ppc_r11);
+ ppc_blrl (code);
+ } else {
+ tramp_handler = mono_get_trampoline_func (tramp_type);
+ ppc_load_func (code, ppc_r0, tramp_handler);
+ ppc_mtlr (code, ppc_r0);
+ ppc_blrl (code);
+ }
/* OK, code address is now on r3. Move it to the counter reg
* so it will be ready for the final jump: this is safe since we
*/
if (!MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
#ifdef PPC_USES_FUNCTION_DESCRIPTOR
- ppc_load_reg (buf, ppc_r3, 0, ppc_r3);
+ ppc_ldptr (code, ppc_r2, sizeof (gpointer), ppc_r3);
+ ppc_ldptr (code, ppc_r3, 0, ppc_r3);
#endif
- ppc_mtctr (buf, ppc_r3);
+ ppc_mtctr (code, ppc_r3);
}
/*
* the same state as before we executed.
* The pointer to MonoLMF is in ppc_r11.
*/
- ppc_addi (buf, ppc_r11, ppc_r1, STACK - sizeof (MonoLMF));
+ ppc_addi (code, ppc_r11, ppc_r1, STACK - sizeof (MonoLMF));
/* r5 = previous_lmf */
- ppc_load_reg (buf, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
+ ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
/* r6 = lmf_addr */
- ppc_load_reg (buf, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
+ ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
/* *(lmf_addr) = previous_lmf */
- ppc_store_reg (buf, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6);
+ ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6);
/* restore iregs */
- ppc_load_multiple_regs (buf, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r11);
+ ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r11);
/* restore fregs */
for (i = 14; i < 32; i++)
- ppc_lfd (buf, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r11);
+ ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r11);
/* restore the volatile registers, we skip r1, of course */
offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double));
for (i = 0; i < 14; i++) {
- ppc_lfd (buf, i, offset, ppc_r1);
+ ppc_lfd (code, i, offset, ppc_r1);
offset += sizeof (double);
}
- offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (13 * sizeof (gulong));
- ppc_load_reg (buf, ppc_r0, offset, ppc_r1);
- offset += 2 * sizeof (gulong);
+ offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (31 * sizeof (mgreg_t));
+ ppc_ldr (code, ppc_r0, offset, ppc_r1);
+ offset += 2 * sizeof (mgreg_t);
for (i = 2; i < 13; i++) {
- if (i != 3 || tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
- ppc_load_reg (buf, i, offset, ppc_r1);
- offset += sizeof (gulong);
+ if (i != PPC_TOC_REG && (i != 3 || tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH))
+ ppc_ldr (code, i, offset, ppc_r1);
+ offset += sizeof (mgreg_t);
}
/* Non-standard function epilogue. Instead of doing a proper
* return, we just jump to the compiled code.
*/
/* Restore stack pointer and LR and jump to the code */
- ppc_load_reg (buf, ppc_r1, 0, ppc_r1);
- ppc_load_reg (buf, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_r1);
- ppc_mtlr (buf, ppc_r11);
+ ppc_ldr (code, ppc_r1, 0, ppc_r1);
+ ppc_ldr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_r1);
+ ppc_mtlr (code, ppc_r11);
if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type))
- ppc_blr (buf);
+ ppc_blr (code);
else
- ppc_bcctr (buf, 20, 0);
+ ppc_bcctr (code, 20, 0);
/* Flush instruction cache, since we've generated code */
- mono_arch_flush_icache (code, buf - code);
+ mono_arch_flush_icache (buf, code - buf);
/* Sanity check */
- g_assert ((buf - code) <= size);
+ g_assert ((code - buf) <= size);
if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
- guint32 code_len;
-
- /* Initialize the nullified class init trampoline used in the AOT case */
- nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
+ /* Initialize the nullified class init trampoline */
+ nullified_class_init_trampoline = mono_ppc_create_ftnptr (mono_arch_get_nullified_class_init_trampoline (NULL));
}
- return code;
+ if (info)
+ *info = mono_tramp_info_create (mono_get_generic_trampoline_name (tramp_type), buf, code - buf, ji, unwind_ops);
+
+ return buf;
}
#define TRAMPOLINE_SIZE (MONO_PPC_32_64_CASE (24, (5+5+1+1)*4))
mono_domain_unlock (domain);
if (short_branch) {
- ppc_load_sequence (buf, ppc_r0, (gulong) arg1);
- ppc_emit32 (buf, short_branch);
+ ppc_load_sequence (code, ppc_r0, (mgreg_t)(gsize) arg1);
+ ppc_emit32 (code, short_branch);
} else {
/* Prepare the jump to the generic trampoline code.*/
- ppc_load (buf, ppc_r0, (gulong) tramp);
- ppc_mtctr (buf, ppc_r0);
+ ppc_load_ptr (code, ppc_r0, tramp);
+ ppc_mtctr (code, ppc_r0);
/* And finally put 'arg1' in r0 and fly! */
- ppc_load (buf, ppc_r0, (gulong) arg1);
- ppc_bcctr (buf, 20, 0);
+ ppc_load_ptr (code, ppc_r0, arg1);
+ ppc_bcctr (code, 20, 0);
}
/* Flush instruction cache, since we've generated code */
- mono_arch_flush_icache (code, buf - code);
+ mono_arch_flush_icache (buf, code - buf);
+
+ g_assert ((code - buf) <= TRAMPOLINE_SIZE);
- g_assert ((buf - code) <= TRAMPOLINE_SIZE);
if (code_len)
- *code_len = buf - code;
+ *code_len = code - buf;
- return code;
+ return buf;
}
static guint8*
if (short_branch) {
ppc_emit32 (code, short_branch);
} else {
- ppc_load (code, ppc_r0, tramp);
+ ppc_load_ptr (code, ppc_r0, tramp);
ppc_mtctr (code, ppc_r0);
ppc_bcctr (code, 20, 0);
}
}
gpointer
-mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
+mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
{
#ifdef MONO_ARCH_VTABLE_REG
guint8 *tramp;
int depth, index;
int i;
gboolean mrgctx;
+ MonoJumpInfo *ji = NULL;
+ GSList *unwind_ops = NULL;
mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
index = MONO_RGCTX_SLOT_INDEX (slot);
if (mrgctx)
- index += sizeof (MonoMethodRuntimeGenericContext) / sizeof (gpointer);
+ index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
for (depth = 0; ; ++depth) {
int size = mono_class_rgctx_get_array_size (depth, mrgctx);
tramp_size += 4;
else
tramp_size += 12;
+ if (aot)
+ tramp_size += 32;
code = buf = mono_global_codeman_reserve (tramp_size);
ppc_mr (code, ppc_r4, PPC_FIRST_ARG_REG);
} else {
/* load rgctx ptr from vtable */
- ppc_load_reg (code, ppc_r4, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), PPC_FIRST_ARG_REG);
+ ppc_ldptr (code, ppc_r4, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), PPC_FIRST_ARG_REG);
/* is the rgctx ptr null? */
ppc_compare_reg_imm (code, 0, ppc_r4, 0);
/* if yes, jump to actual trampoline */
for (i = 0; i < depth; ++i) {
/* load ptr to next array */
if (mrgctx && i == 0)
- ppc_load_reg (code, ppc_r4, sizeof (MonoMethodRuntimeGenericContext), ppc_r4);
+ ppc_ldptr (code, ppc_r4, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, ppc_r4);
else
- ppc_load_reg (code, ppc_r4, 0, ppc_r4);
+ ppc_ldptr (code, ppc_r4, 0, ppc_r4);
/* is the ptr null? */
ppc_compare_reg_imm (code, 0, ppc_r4, 0);
/* if yes, jump to actual trampoline */
}
/* fetch slot */
- ppc_load_reg (code, ppc_r4, sizeof (gpointer) * (index + 1), ppc_r4);
+ ppc_ldptr (code, ppc_r4, sizeof (gpointer) * (index + 1), ppc_r4);
/* is the slot null? */
ppc_compare_reg_imm (code, 0, ppc_r4, 0);
/* if yes, jump to actual trampoline */
/* move the rgctx pointer to the VTABLE register */
ppc_mr (code, MONO_ARCH_VTABLE_REG, ppc_r3);
- tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot),
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
+ /* Branch to the trampoline */
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_ldptr (code, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtctr (code, ppc_r11);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+ } else {
+ tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot),
MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
- /* jump to the actual trampoline */
- code = emit_trampoline_jump (code, tramp);
+ /* jump to the actual trampoline */
+ code = emit_trampoline_jump (code, tramp);
+ }
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
+ if (info)
+ *info = mono_tramp_info_create (mono_get_rgctx_fetch_trampoline_name (slot), buf, code - buf, ji, unwind_ops);
+
return buf;
#else
g_assert_not_reached ();
}
gpointer
-mono_arch_create_generic_class_init_trampoline (void)
+mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
{
guint8 *tramp;
guint8 *code, *buf;
static guint8 bitmask;
guint8 *jump;
int tramp_size;
+ GSList *unwind_ops = NULL;
+ MonoJumpInfo *ji = NULL;
tramp_size = MONO_PPC_32_64_CASE (32, 44);
+ if (aot)
+ tramp_size += 32;
code = buf = mono_global_codeman_reserve (tramp_size);
if (byte_offset < 0)
mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
- ppc_lbz (code, ppc_r4, byte_offset, PPC_FIRST_ARG_REG);
+ ppc_lbz (code, ppc_r4, byte_offset, MONO_ARCH_VTABLE_REG);
ppc_andid (code, ppc_r4, ppc_r4, bitmask);
jump = code;
ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
ppc_patch (jump, code);
- tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT,
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
+ /* Branch to the trampoline */
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ ppc_ldptr (code, ppc_r11, 0, ppc_r11);
+#endif
+ ppc_mtctr (code, ppc_r11);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+ } else {
+ tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT,
mono_get_root_domain (), NULL);
- /* jump to the actual trampoline */
- code = emit_trampoline_jump (code, tramp);
+ /* jump to the actual trampoline */
+ code = emit_trampoline_jump (code, tramp);
+ }
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
+ if (info)
+ *info = mono_tramp_info_create (g_strdup_printf ("generic_class_init_trampoline"), buf, code - buf, ji, unwind_ops);
+
return buf;
}
gpointer
-mono_arch_get_nullified_class_init_trampoline (guint32 *code_len)
+mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
{
guint8 *code, *buf;
guint32 tramp_size = 64;
code = buf = mono_global_codeman_reserve (tramp_size);
- code = mono_ppc_create_pre_code_ftnptr (code);
ppc_blr (code);
mono_arch_flush_icache (buf, code - buf);
- *code_len = code - buf;
-
g_assert (code - buf <= tramp_size);
+ if (info)
+ *info = mono_tramp_info_create (g_strdup_printf ("nullified_class_init_trampoline"), buf, code - buf, NULL, NULL);
+
return buf;
}
+
+guint8*
+mono_arch_get_call_target (guint8 *code)
+{
+ /* Should be a bl */
+ guint32 ins = ((guint32*)(gpointer)code) [-1];
+
+ if ((ins >> 26 == 18) && ((ins & 1) == 1) && ((ins & 2) == 0)) {
+ gint32 disp = (((gint32)ins) >> 2) & 0xffffff;
+ guint8 *target = code - 4 + (disp * 4);
+
+ return target;
+ } else {
+ return NULL;
+ }
+}
+
+guint32
+mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
+{
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ return ((guint32*)plt_entry) [8];
+#else
+ return ((guint32*)plt_entry) [6];
+#endif
+}