void
mono_arch_patch_callsite (guint8 *code, guint8 *addr)
{
- guint64 *callsite = (guint64*)(gpointer)(code - 48);
+ guint8 *callsite_begin;
+ guint64 *callsite = (guint64*)(gpointer)(code - 16);
+ guint64 instructions [3];
guint64 buf [16];
- Ia64CodegenState cgen;
+ Ia64CodegenState gen;
gpointer func = ((gpointer*)(gpointer)addr)[0];
+ while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) &&
+ (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS))
+ callsite -= 2;
+ callsite_begin = (guint8*)callsite;
+
/* Patch the code generated by emit_call */
- ia64_codegen_init (cgen, (guint8*)buf);
- ia64_movl (cgen, GP_SCRATCH_REG, func);
- ia64_codegen_close (cgen);
+ instructions [0] = ia64_bundle_ins1 (callsite);
+ instructions [1] = ia64_bundle_ins2 (callsite);
+ instructions [2] = ia64_bundle_ins3 (callsite);
+
+ ia64_codegen_init (gen, (guint8*)buf);
+ ia64_movl (gen, GP_SCRATCH_REG, func);
+ instructions [1] = gen.instructions [0];
+ instructions [2] = gen.instructions [1];
+
+ ia64_codegen_init (gen, (guint8*)buf);
+ ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]);
+ ia64_codegen_close (gen);
/* This might not be safe, but not all itanium processors support st16 */
callsite [0] = buf [0];
callsite [1] = buf [1];
- mono_arch_flush_icache (code - 48, 16);
+ mono_arch_flush_icache (callsite_begin, code - callsite_begin);
}
void
mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
{
- guint64 *callsite = (guint64*)(gpointer)(code - 48);
+ guint8 *callsite_begin;
+ guint64 *callsite = (guint64*)(gpointer)(code - 16);
+ guint64 instructions [3];
guint64 buf [16];
- Ia64CodegenState cgen;
- int i;
+ Ia64CodegenState gen;
+
+ while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) &&
+ (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS))
+ callsite -= 2;
+ callsite_begin = (guint8*)callsite;
/* Replace the code generated by emit_call with a sets of nops */
- ia64_codegen_init (cgen, (guint8*)buf);
- ia64_emit_bundle_template (&cgen, IA64_TEMPLATE_MII, IA64_NOP_M, IA64_NOP_I, IA64_NOP_I);
+ /* The first bundle might have other instructions in it */
+ instructions [0] = ia64_bundle_ins1 (callsite);
+ instructions [1] = IA64_NOP_X;
+ instructions [2] = IA64_NOP_X;
+
+ ia64_codegen_init (gen, (guint8*)buf);
+ ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]);
+ ia64_codegen_close (gen);
/* This might not be safe, but not all itanium processors support st16 */
+ callsite [0] = buf [0];
+ callsite [1] = buf [1];
+
+ callsite += 2;
+
+ /* The other bundles can be full replaced with nops */
+
+ ia64_codegen_init (gen, (guint8*)buf);
+ ia64_emit_bundle_template (&gen, IA64_TEMPLATE_MII, IA64_NOP_M, IA64_NOP_I, IA64_NOP_I);
+ ia64_codegen_close (gen);
+
+ while ((guint8*)callsite < code) {
+ callsite [0] = buf [0];
+ callsite [1] = buf [1];
+ callsite += 2;
+ }
+
+ mono_arch_flush_icache (callsite_begin, code - callsite_begin);
+}
+
+void
+mono_arch_patch_delegate_trampoline (guint8 *code, guint8 *tramp, gssize *regs, guint8 *addr)
+{
/*
- * The code generated by emit_call consists of 5 instructions, but these
- * might be compressed into 3 bundles.
+ * This is called by the code generated by OP_CALL_REG:
+ * ld8 r30=[r8],8
+ * nop.i 0x0;;
+ * mov.sptk b6=r30
+ * ld8 r1=[r8]
+ * br.call.sptk.few b0=b6
*/
- for (i = 0; i < 3 * 2; ++i)
- callsite [i] = buf [i % 2];
- mono_arch_flush_icache (code - 48, 48);
+ /* We patch the function descriptor instead of delegate->method_ptr */
+ //g_assert (((gpointer*)(regs [8] - 8))[0] == tramp);
+ ((gpointer*)(regs [8] - 8))[0] = mono_get_addr_from_ftnptr (addr);
+ ((gpointer*)(regs [8] - 8))[1] = NULL;
}
guchar*
mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
{
guint8 *buf, *tramp;
- int i, offset, saved_regs_offset, saved_fpregs_offset, framesize;
+ int i, offset, saved_regs_offset, saved_fpregs_offset, last_offset, framesize;
int in0, local0, out0, l0, l1, l2, l3, l4, l5, l6, l7, l8, o0, o1, o2, o3;
gboolean has_caller;
Ia64CodegenState code;
unw_dyn_info_t *di;
unw_dyn_region_info_t *r_pro;
+ /*
+ * Since jump trampolines are not patched, this trampoline is executed every
+ * time a call is made to a jump trampoline. So we try to keep things faster
+ * in that case.
+ */
if (tramp_type == MONO_TRAMPOLINE_JUMP)
has_caller = FALSE;
else
* 16 locals
* 4 output (number of parameters passed to trampoline)
*/
+ ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + l5);
ia64_alloc (code, l5, local0 - in0, out0 - local0, 4, 0);
+ ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + l8);
ia64_mov (code, l8, IA64_SP);
ia64_adds_imm (code, IA64_SP, (-framesize), IA64_SP);
ia64_mov (code, l6, GP_SCRATCH_REG);
/* Save the calling address */
+ ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + local0 + 7);
ia64_mov_from_br (code, l7, IA64_B0);
/* Create unwind info for the prolog */
- r_pro = g_malloc0 (_U_dyn_region_info_size (3));
- r_pro->op_count = 3;
- r_pro->insn_count = 16;
- i = 0;
- _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 2,
- /* reg=*/ UNW_IA64_AR_PFS, /* dst=*/ UNW_IA64_GR + local0 + 5);
- _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 5,
- /* reg=*/ UNW_IA64_SP, /* dst=*/ UNW_IA64_GR + local0 + 8);
- _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 14,
- /* reg=*/ UNW_IA64_RP, /* dst=*/ UNW_IA64_GR + local0 + 7);
- g_assert ((unsigned) i <= r_pro->op_count);
+ ia64_begin_bundle (code);
+ r_pro = mono_ia64_create_unwind_region (&code);
/* Save registers */
- saved_regs_offset = offset;
- offset += 128 * 8;
- /*
- * Only the registers which are needed for computing vtable slots need
- * to be saved.
- */
- for (i = 0; i < 64; ++i)
- if ((1 << i) & MONO_ARCH_CALLEE_REGS) {
- ia64_adds_imm (code, l1, saved_regs_offset + (i * 8), IA64_SP);
- ia64_st8_hint (code, l1, i, 0);
- }
+ /* Not needed for jump trampolines */
+ if (tramp_type != MONO_TRAMPOLINE_JUMP) {
+ saved_regs_offset = offset;
+ offset += 128 * 8;
+ /*
+ * Only the registers which are needed for computing vtable slots need
+ * to be saved.
+ */
+ last_offset = -1;
+ for (i = 0; i < 64; ++i)
+ if ((1 << i) & MONO_ARCH_CALLEE_REGS) {
+ if (last_offset != i * 8)
+ ia64_adds_imm (code, l1, saved_regs_offset + (i * 8), IA64_SP);
+ ia64_st8_spill_inc_imm_hint (code, l1, i, 8, 0);
+ last_offset = (i + 1) * 8;
+ }
+ }
/* Save fp registers */
saved_fpregs_offset = offset;
tramp = (guint8*)mono_class_init_trampoline;
else if (tramp_type == MONO_TRAMPOLINE_AOT)
tramp = (guint8*)mono_aot_trampoline;
+ else if (tramp_type == MONO_TRAMPOLINE_DELEGATE)
+ tramp = (guint8*)mono_delegate_trampoline;
else
tramp = (guint8*)mono_magic_trampoline;