#include <mono/metadata/appdomain.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/tabledefs.h>
-#include <mono/arch/ia64/ia64-codegen.h>
#include <mono/metadata/mono-debug-debugger.h>
+#include <mono/arch/ia64/ia64-codegen.h>
#include "mini.h"
#include "mini-ia64.h"
-#define NOT_IMPLEMENTED g_assert_not_reached ()
-
#define GP_SCRATCH_REG 31
#define GP_SCRATCH_REG2 30
/*
- * get_unbox_trampoline:
+ * mono_arch_get_unbox_trampoline:
+ * @gsctx: the generic sharing context
* @m: method pointer
* @addr: pointer to native code for @m
*
* this argument. This method returns a pointer to a trampoline which does
* unboxing before calling the method
*/
-static gpointer
-get_unbox_trampoline (MonoMethod *m, gpointer addr)
+gpointer
+mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
{
guint8 *buf;
gpointer func_addr, func_gp;
Ia64CodegenState code;
int this_reg = 0;
+ gpointer *desc;
MonoDomain *domain = mono_domain_get ();
/* FIXME: Optimize this */
- if (!mono_method_signature (m)->ret->byref && MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
+ if (MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
this_reg = 1;
func_addr = ((gpointer*)addr) [0];
g_assert (code.buf - buf < 256);
- /* FIXME: */
+ mono_arch_flush_icache (buf, code.buf - buf);
- gpointer *desc = g_malloc0 (sizeof (gpointer) * 2);
+ /* FIXME: */
+ desc = g_malloc0 (sizeof (gpointer) * 2);
desc [0] = buf;
desc [1] = func_gp;
return desc;
}
-/**
- * ia64_magic_trampoline:
- */
-static gpointer
-ia64_magic_trampoline (long *regs, guint8 *code, MonoMethod *m, guint8* tramp)
+void
+mono_arch_patch_callsite (guint8 *method_start, guint8 *code, guint8 *addr)
{
- gpointer addr;
- gpointer *vtable_slot;
-
- addr = mono_compile_method (m);
- g_assert (addr);
+ guint8 *callsite_begin;
+ guint64 *callsite = (guint64*)(gpointer)(code - 16);
+ guint64 *next_bundle;
+ guint64 ins, instructions [3];
+ guint64 buf [16];
+ Ia64CodegenState gen;
+ gpointer func = ((gpointer*)(gpointer)addr)[0];
+
+ while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) &&
+ (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS))
+ callsite -= 2;
+ callsite_begin = (guint8*)callsite;
+
+ next_bundle = callsite + 2;
+ ins = ia64_bundle_ins1 (next_bundle);
+ if (ia64_ins_opcode (ins) == 5) {
+ /* ld8_inc_imm -> indirect call through a function pointer */
+ g_assert (ia64_ins_r1 (ins) == GP_SCRATCH_REG2);
+ g_assert (ia64_ins_r3 (ins) == GP_SCRATCH_REG);
+ return;
+ }
- //printf ("ENTER: %s\n", mono_method_full_name (m, TRUE));
+ /* Patch the code generated by emit_call */
- /* the method was jumped to */
- if (!code)
- /* FIXME: Optimize the case when the call is from a delegate wrapper */
- return addr;
+ instructions [0] = ia64_bundle_ins1 (callsite);
+ instructions [1] = ia64_bundle_ins2 (callsite);
+ instructions [2] = ia64_bundle_ins3 (callsite);
- vtable_slot = mono_arch_get_vcall_slot_addr (code, (gpointer*)regs);
+ ia64_codegen_init (gen, (guint8*)buf);
+ ia64_movl (gen, GP_SCRATCH_REG, func);
+ instructions [1] = gen.instructions [0];
+ instructions [2] = gen.instructions [1];
- if (vtable_slot) {
- if (m->klass->valuetype)
- addr = get_unbox_trampoline (m, addr);
+ ia64_codegen_init (gen, (guint8*)buf);
+ ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]);
+ ia64_codegen_close (gen);
- g_assert (*vtable_slot);
+ /* This might not be safe, but not all itanium processors support st16 */
+ callsite [0] = buf [0];
+ callsite [1] = buf [1];
- if (mono_aot_is_got_entry (code, (guint8*)vtable_slot) || mono_domain_owns_vtable_slot (mono_domain_get (), vtable_slot))
- *vtable_slot = addr;
- }
- else {
- /* FIXME: Patch calling code */
- }
+ mono_arch_flush_icache (callsite_begin, code - callsite_begin);
+}
- return addr;
+void
+mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
+{
+ g_assert_not_reached ();
}
-/*
- * ia64_aot_trampoline:
- *
- * This trampoline handles calls made from AOT code. We try to bypass the
- * normal JIT compilation logic to avoid loading the metadata for the method.
- */
-static gpointer
-ia64_aot_trampoline (long *regs, guint8 *code, guint8 *token_info,
- guint8* tramp)
+void
+mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
{
- NOT_IMPLEMENTED;
+ guint8 *callsite_begin;
+ guint64 *callsite = (guint64*)(gpointer)(code - 16);
+ guint64 instructions [3];
+ guint64 buf [16];
+ Ia64CodegenState gen;
- return NULL;
+ while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) &&
+ (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS))
+ callsite -= 2;
+ callsite_begin = (guint8*)callsite;
+
+ /* Replace the code generated by emit_call with a sets of nops */
+
+ /* The first bundle might have other instructions in it */
+ instructions [0] = ia64_bundle_ins1 (callsite);
+ instructions [1] = IA64_NOP_X;
+ instructions [2] = IA64_NOP_X;
+
+ ia64_codegen_init (gen, (guint8*)buf);
+ ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]);
+ ia64_codegen_close (gen);
+
+ /* This might not be safe, but not all itanium processors support st16 */
+ callsite [0] = buf [0];
+ callsite [1] = buf [1];
+
+ callsite += 2;
+
+ /* The other bundles can be full replaced with nops */
+
+ ia64_codegen_init (gen, (guint8*)buf);
+ ia64_emit_bundle_template (&gen, IA64_TEMPLATE_MII, IA64_NOP_M, IA64_NOP_I, IA64_NOP_I);
+ ia64_codegen_close (gen);
+
+ while ((guint8*)callsite < code) {
+ callsite [0] = buf [0];
+ callsite [1] = buf [1];
+ callsite += 2;
+ }
+
+ mono_arch_flush_icache (callsite_begin, code - callsite_begin);
}
-/**
- * ia64_class_init_trampoline:
- *
- * This method calls mono_runtime_class_init () to run the static constructor
- * for the type, then patches the caller code so it is not called again.
- */
-static void
-ia64_class_init_trampoline (long *regs, guint8 *code, MonoVTable *vtable, guint8 *tramp)
+void
+mono_arch_nullify_plt_entry (guint8 *code)
{
- mono_runtime_class_init (vtable);
-
- /* FIXME: Patch calling code */
+ g_assert_not_reached ();
}
guchar*
mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
{
guint8 *buf, *tramp;
- int i, offset, saved_regs_offset, saved_fpregs_offset, framesize;
+ int i, offset, saved_regs_offset, saved_fpregs_offset, last_offset, framesize;
int in0, local0, out0, l0, l1, l2, l3, l4, l5, l6, l7, l8, o0, o1, o2, o3;
gboolean has_caller;
Ia64CodegenState code;
unw_dyn_info_t *di;
unw_dyn_region_info_t *r_pro;
+ /*
+ * Since jump trampolines are not patched, this trampoline is executed every
+ * time a call is made to a jump trampoline. So we try to keep things faster
+ * in that case.
+ */
if (tramp_type == MONO_TRAMPOLINE_JUMP)
has_caller = FALSE;
else
ia64_codegen_init (code, buf);
- /* FIXME: Save/restore lmf */
-
/* Stacked Registers */
in0 = 32;
local0 = in0 + 8;
* 16 locals
* 4 output (number of parameters passed to trampoline)
*/
+ ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + l5);
ia64_alloc (code, l5, local0 - in0, out0 - local0, 4, 0);
+ ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + l8);
ia64_mov (code, l8, IA64_SP);
ia64_adds_imm (code, IA64_SP, (-framesize), IA64_SP);
ia64_mov (code, l6, GP_SCRATCH_REG);
/* Save the calling address */
+ ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + local0 + 7);
ia64_mov_from_br (code, l7, IA64_B0);
/* Create unwind info for the prolog */
- r_pro = g_malloc0 (_U_dyn_region_info_size (3));
- r_pro->op_count = 3;
- r_pro->insn_count = 16;
- i = 0;
- _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 2,
- /* reg=*/ UNW_IA64_AR_PFS, /* dst=*/ UNW_IA64_GR + local0 + 5);
- _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 5,
- /* reg=*/ UNW_IA64_SP, /* dst=*/ UNW_IA64_GR + local0 + 8);
- _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 14,
- /* reg=*/ UNW_IA64_RP, /* dst=*/ UNW_IA64_GR + local0 + 7);
- g_assert ((unsigned) i <= r_pro->op_count);
+ ia64_begin_bundle (code);
+ r_pro = mono_ia64_create_unwind_region (&code);
/* Save registers */
- saved_regs_offset = offset;
- offset += 128 * 8;
- /*
- * Only the registers which are needed for computing vtable slots need
- * to be saved.
- */
- for (i = 0; i < 64; ++i)
- if ((1 << i) & MONO_ARCH_CALLEE_REGS) {
- ia64_adds_imm (code, l1, saved_regs_offset + (i * 8), IA64_SP);
- ia64_st8_hint (code, l1, i, 0);
- }
+ /* Not needed for jump trampolines */
+ if (tramp_type != MONO_TRAMPOLINE_JUMP) {
+ saved_regs_offset = offset;
+ offset += 128 * 8;
+ /*
+ * Only the registers which are needed for computing vtable slots need
+ * to be saved.
+ */
+ last_offset = -1;
+ for (i = 0; i < 64; ++i)
+ if ((1 << i) & MONO_ARCH_CALLEE_REGS) {
+ if (last_offset != i * 8)
+ ia64_adds_imm (code, l1, saved_regs_offset + (i * 8), IA64_SP);
+ ia64_st8_spill_inc_imm_hint (code, l1, i, 8, 0);
+ last_offset = (i + 1) * 8;
+ }
+ }
/* Save fp registers */
saved_fpregs_offset = offset;
/* FIXME: */
ia64_mov (code, o3, 0);
- if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT)
- tramp = (guint8*)ia64_class_init_trampoline;
- else if (tramp_type == MONO_TRAMPOLINE_AOT)
- tramp = (guint8*)ia64_aot_trampoline;
- else
- tramp = (guint8*)ia64_magic_trampoline;
+ tramp = (guint8*)mono_get_trampoline_func (tramp_type);
/* Call the trampoline using an indirect call */
ia64_movl (code, l0, tramp);
ia64_ld8 (code, IA64_GP, l0);
ia64_br_call_reg (code, 0, IA64_B6);
+ /* Check for thread interruption */
+ /* This is not perf critical code so no need to check the interrupt flag */
+ ia64_mov (code, l2, IA64_R8);
+
+ tramp = (guint8*)mono_thread_force_interruption_checkpoint;
+ ia64_movl (code, l0, tramp);
+ ia64_ld8_inc_imm (code, l1, l0, 8);
+ ia64_mov_to_br (code, IA64_B6, l1);
+ ia64_ld8 (code, IA64_GP, l0);
+ ia64_br_call_reg (code, 0, IA64_B6);
+
+ ia64_mov (code, IA64_R8, l2);
+
/* Restore fp regs */
ia64_adds_imm (code, l1, saved_fpregs_offset, IA64_SP);
for (i = 0; i < 8; ++i)
/* FIXME: Handle NATs in fp regs / scratch regs */
- /* Load method address from function descriptor */
- ia64_ld8 (code, l0, IA64_R8);
- ia64_mov_to_br (code, IA64_B6, l0);
+ if (tramp_type != MONO_TRAMPOLINE_CLASS_INIT) {
+ /* Load method address from function descriptor */
+ ia64_ld8 (code, l0, IA64_R8);
+ ia64_mov_to_br (code, IA64_B6, l0);
+ }
/* Clean up register/memory stack frame */
ia64_adds_imm (code, IA64_SP, framesize, IA64_SP);
#define TRAMPOLINE_SIZE 128
-static MonoJitInfo*
-create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain)
+gpointer
+mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
{
- MonoJitInfo *ji;
guint8 *buf, *tramp;
gint64 disp;
Ia64CodegenState code;
mono_arch_flush_icache (buf, code.buf - buf);
- ji = g_new0 (MonoJitInfo, 1);
- ji->code_start = buf;
- ji->code_size = code.buf - buf;
+ if (code_len)
+ *code_len = code.buf - buf;
- return ji;
-}
-
-MonoJitInfo*
-mono_arch_create_jump_trampoline (MonoMethod *method)
-{
- MonoJitInfo *ji = create_specific_trampoline (method, MONO_TRAMPOLINE_JUMP, mono_domain_get ());
-
- ji->method = method;
- return ji;
-}
-
-gpointer
-mono_arch_create_jit_trampoline (MonoMethod *method)
-{
- MonoJitInfo *ji;
- gpointer code_start;
-
- ji = create_specific_trampoline (method, MONO_TRAMPOLINE_GENERIC, mono_domain_get ());
- code_start = ji->code_start;
- g_free (ji);
-
- return code_start;
-}
-
-gpointer
-mono_arch_create_jit_trampoline_from_token (MonoImage *image, guint32 token)
-{
- MonoDomain *domain = mono_domain_get ();
- MonoJitInfo *ji;
- gpointer code_start;
- guint8 *buf, *start;
-
- mono_domain_lock (domain);
- buf = start = mono_code_manager_reserve (domain->code_mp, 2 * sizeof (gpointer));
- mono_domain_unlock (domain);
-
- *(gpointer*)(gpointer)buf = image;
- buf += sizeof (gpointer);
- *(guint32*)(gpointer)buf = token;
-
- ji = create_specific_trampoline (start, MONO_TRAMPOLINE_AOT, domain);
- code_start = ji->code_start;
- g_free (ji);
-
- return code_start;
-}
-
-/**
- * mono_arch_create_class_init_trampoline:
- * @vtable: the type to initialize
- *
- * Creates a trampoline function to run a type initializer.
- * If the trampoline is called, it calls mono_runtime_class_init with the
- * given vtable, then patches the caller code so it does not get called any
- * more.
- *
- * Returns: a pointer to the newly created code
- */
-gpointer
-mono_arch_create_class_init_trampoline (MonoVTable *vtable)
-{
- MonoJitInfo *ji;
- gpointer code;
-
- ji = create_specific_trampoline (vtable, MONO_TRAMPOLINE_CLASS_INIT, vtable->domain);
- code = ji->code_start;
- g_free (ji);
-
- return code;
+ return buf;
}
void
}
gpointer
-mono_debugger_create_notification_function (gpointer *notification_address)
+mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 encoded_offset)
{
- NOT_IMPLEMENTED;
-
+ /* FIXME: implement! */
+ g_assert_not_reached ();
return NULL;
}