2 * tramp-ia64.c: JIT trampoline code for ia64
5 * Zoltan Varga (vargaz@gmail.com)
7 * (C) 2001 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/marshal.h>
15 #include <mono/metadata/tabledefs.h>
16 #include <mono/metadata/mono-debug-debugger.h>
17 #include <mono/arch/ia64/ia64-codegen.h>
20 #include "mini-ia64.h"
22 #define NOT_IMPLEMENTED g_assert_not_reached ()
24 #define GP_SCRATCH_REG 31
25 #define GP_SCRATCH_REG2 30
28 * mono_arch_get_unbox_trampoline:
30 * @addr: pointer to native code for @m
32 * when value type methods are called through the vtable we need to unbox the
33 * this argument. This method returns a pointer to a trampoline which does
34 * unboxing before calling the method
37 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
40 gpointer func_addr, func_gp;
41 Ia64CodegenState code;
44 MonoDomain *domain = mono_domain_get ();
46 /* FIXME: Optimize this */
48 if (!mono_method_signature (m)->ret->byref && MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
51 func_addr = ((gpointer*)addr) [0];
52 func_gp = ((gpointer*)addr) [1];
54 mono_domain_lock (domain);
55 buf = mono_code_manager_reserve (domain->code_mp, 256);
56 mono_domain_unlock (domain);
58 /* Since the this reg is a stacked register, its a bit hard to access it */
59 ia64_codegen_init (code, buf);
60 ia64_alloc (code, 40, 8, 1, 0, 0);
61 ia64_adds_imm (code, 32 + this_reg, sizeof (MonoObject), 32 + this_reg);
62 ia64_mov_to_ar_i (code, IA64_PFS, 40);
63 ia64_movl (code, GP_SCRATCH_REG, func_addr);
64 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
65 ia64_br_cond_reg (code, IA64_B6);
66 ia64_codegen_close (code);
68 g_assert (code.buf - buf < 256);
70 mono_arch_flush_icache (buf, code.buf - buf);
73 desc = g_malloc0 (sizeof (gpointer) * 2);
81 mono_arch_patch_callsite (guint8 *code, guint8 *addr)
83 guint8 *callsite_begin;
84 guint64 *callsite = (guint64*)(gpointer)(code - 16);
86 guint64 ins, instructions [3];
89 gpointer func = ((gpointer*)(gpointer)addr)[0];
91 while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) &&
92 (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS))
94 callsite_begin = (guint8*)callsite;
96 next_bundle = callsite + 2;
97 ins = ia64_bundle_ins1 (next_bundle);
98 if (ia64_ins_opcode (ins) == 5) {
99 /* ld8_inc_imm -> indirect call through a function pointer */
100 g_assert (ia64_ins_r1 (ins) == GP_SCRATCH_REG2);
101 g_assert (ia64_ins_r3 (ins) == GP_SCRATCH_REG);
105 /* Patch the code generated by emit_call */
107 instructions [0] = ia64_bundle_ins1 (callsite);
108 instructions [1] = ia64_bundle_ins2 (callsite);
109 instructions [2] = ia64_bundle_ins3 (callsite);
111 ia64_codegen_init (gen, (guint8*)buf);
112 ia64_movl (gen, GP_SCRATCH_REG, func);
113 instructions [1] = gen.instructions [0];
114 instructions [2] = gen.instructions [1];
116 ia64_codegen_init (gen, (guint8*)buf);
117 ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]);
118 ia64_codegen_close (gen);
120 /* This might not be safe, but not all itanium processors support st16 */
121 callsite [0] = buf [0];
122 callsite [1] = buf [1];
124 mono_arch_flush_icache (callsite_begin, code - callsite_begin);
128 mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
130 g_assert_not_reached ();
134 mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
136 guint8 *callsite_begin;
137 guint64 *callsite = (guint64*)(gpointer)(code - 16);
138 guint64 instructions [3];
140 Ia64CodegenState gen;
142 while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) &&
143 (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS))
145 callsite_begin = (guint8*)callsite;
147 /* Replace the code generated by emit_call with a sets of nops */
149 /* The first bundle might have other instructions in it */
150 instructions [0] = ia64_bundle_ins1 (callsite);
151 instructions [1] = IA64_NOP_X;
152 instructions [2] = IA64_NOP_X;
154 ia64_codegen_init (gen, (guint8*)buf);
155 ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]);
156 ia64_codegen_close (gen);
158 /* This might not be safe, but not all itanium processors support st16 */
159 callsite [0] = buf [0];
160 callsite [1] = buf [1];
164 /* The other bundles can be full replaced with nops */
166 ia64_codegen_init (gen, (guint8*)buf);
167 ia64_emit_bundle_template (&gen, IA64_TEMPLATE_MII, IA64_NOP_M, IA64_NOP_I, IA64_NOP_I);
168 ia64_codegen_close (gen);
170 while ((guint8*)callsite < code) {
171 callsite [0] = buf [0];
172 callsite [1] = buf [1];
176 mono_arch_flush_icache (callsite_begin, code - callsite_begin);
180 mono_arch_nullify_plt_entry (guint8 *code)
182 g_assert_not_reached ();
186 mono_arch_patch_delegate_trampoline (guint8 *code, guint8 *tramp, gssize *regs, guint8 *addr)
189 * This is called by the code generated by OP_CALL_REG:
194 * br.call.sptk.few b0=b6
197 /* We patch the function descriptor instead of delegate->method_ptr */
198 //g_assert (((gpointer*)(regs [8] - 8))[0] == tramp);
199 ((gpointer*)(regs [8] - 8))[0] = mono_get_addr_from_ftnptr (addr);
200 ((gpointer*)(regs [8] - 8))[1] = NULL;
204 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
207 int i, offset, saved_regs_offset, saved_fpregs_offset, last_offset, framesize;
208 int in0, local0, out0, l0, l1, l2, l3, l4, l5, l6, l7, l8, o0, o1, o2, o3;
210 Ia64CodegenState code;
212 unw_dyn_region_info_t *r_pro;
215 * Since jump trampolines are not patched, this trampoline is executed every
216 * time a call is made to a jump trampoline. So we try to keep things faster
219 if (tramp_type == MONO_TRAMPOLINE_JUMP)
224 buf = mono_global_codeman_reserve (2048);
226 ia64_codegen_init (code, buf);
228 /* FIXME: Save/restore lmf */
230 /* Stacked Registers */
239 l5 = 45; /* saved ar.pfs */
242 l8 = 48; /* saved sp */
243 o0 = out0 + 0; /* regs */
244 o1 = out0 + 1; /* code */
245 o2 = out0 + 2; /* arg */
246 o3 = out0 + 3; /* tramp */
248 framesize = (128 * 8) + 1024;
249 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
252 * Allocate a new register+memory stack frame.
253 * 8 input registers (the max used by the ABI)
255 * 4 output (number of parameters passed to trampoline)
257 ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + l5);
258 ia64_alloc (code, l5, local0 - in0, out0 - local0, 4, 0);
259 ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + l8);
260 ia64_mov (code, l8, IA64_SP);
261 ia64_adds_imm (code, IA64_SP, (-framesize), IA64_SP);
263 offset = 16; /* scratch area */
265 /* Save the argument received from the specific trampoline */
266 ia64_mov (code, l6, GP_SCRATCH_REG);
268 /* Save the calling address */
269 ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + local0 + 7);
270 ia64_mov_from_br (code, l7, IA64_B0);
272 /* Create unwind info for the prolog */
273 ia64_begin_bundle (code);
274 r_pro = mono_ia64_create_unwind_region (&code);
277 /* Not needed for jump trampolines */
278 if (tramp_type != MONO_TRAMPOLINE_JUMP) {
279 saved_regs_offset = offset;
282 * Only the registers which are needed for computing vtable slots need
286 for (i = 0; i < 64; ++i)
287 if ((1 << i) & MONO_ARCH_CALLEE_REGS) {
288 if (last_offset != i * 8)
289 ia64_adds_imm (code, l1, saved_regs_offset + (i * 8), IA64_SP);
290 ia64_st8_spill_inc_imm_hint (code, l1, i, 8, 0);
291 last_offset = (i + 1) * 8;
295 /* Save fp registers */
296 saved_fpregs_offset = offset;
298 ia64_adds_imm (code, l1, saved_fpregs_offset, IA64_SP);
299 for (i = 0; i < 8; ++i)
300 ia64_stfd_inc_imm_hint (code, l1, i + 8, 8, 0);
302 g_assert (offset < framesize);
304 /* Arg1 is the pointer to the saved registers */
305 ia64_adds_imm (code, o0, saved_regs_offset, IA64_SP);
307 /* Arg2 is the address of the calling code */
309 ia64_mov (code, o1, l7);
311 ia64_mov (code, o1, 0);
313 /* Arg3 is the method/vtable ptr */
314 ia64_mov (code, o2, l6);
316 /* Arg4 is the trampoline address */
318 ia64_mov (code, o3, 0);
320 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT)
321 tramp = (guint8*)mono_class_init_trampoline;
322 else if (tramp_type == MONO_TRAMPOLINE_AOT)
323 tramp = (guint8*)mono_aot_trampoline;
325 else if (tramp_type == MONO_TRAMPOLINE_DELEGATE)
326 tramp = (guint8*)mono_delegate_trampoline;
329 tramp = (guint8*)mono_magic_trampoline;
331 /* Call the trampoline using an indirect call */
332 ia64_movl (code, l0, tramp);
333 ia64_ld8_inc_imm (code, l1, l0, 8);
334 ia64_mov_to_br (code, IA64_B6, l1);
335 ia64_ld8 (code, IA64_GP, l0);
336 ia64_br_call_reg (code, 0, IA64_B6);
338 /* Restore fp regs */
339 ia64_adds_imm (code, l1, saved_fpregs_offset, IA64_SP);
340 for (i = 0; i < 8; ++i)
341 ia64_ldfd_inc_imm (code, i + 8, l1, 8);
343 /* FIXME: Handle NATs in fp regs / scratch regs */
345 if (tramp_type != MONO_TRAMPOLINE_CLASS_INIT) {
346 /* Load method address from function descriptor */
347 ia64_ld8 (code, l0, IA64_R8);
348 ia64_mov_to_br (code, IA64_B6, l0);
351 /* Clean up register/memory stack frame */
352 ia64_adds_imm (code, IA64_SP, framesize, IA64_SP);
353 ia64_mov_to_ar_i (code, IA64_PFS, l5);
355 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
356 ia64_mov_ret_to_br (code, IA64_B0, l7);
357 ia64_br_ret_reg (code, IA64_B0);
360 /* Call the compiled method */
361 ia64_mov_to_br (code, IA64_B0, l7);
362 ia64_br_cond_reg (code, IA64_B6);
365 ia64_codegen_close (code);
367 g_assert ((code.buf - buf) <= 2048);
369 /* FIXME: emit unwind info for epilog */
370 di = g_malloc0 (sizeof (unw_dyn_info_t));
371 di->start_ip = (unw_word_t) buf;
372 di->end_ip = (unw_word_t) code.buf;
374 di->format = UNW_INFO_FORMAT_DYNAMIC;
375 di->u.pi.name_ptr = (unw_word_t)"ia64_generic_trampoline";
376 di->u.pi.regions = r_pro;
378 _U_dyn_register (di);
380 mono_arch_flush_icache (buf, code.buf - buf);
385 #define TRAMPOLINE_SIZE 128
388 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
392 Ia64CodegenState code;
394 tramp = mono_get_trampoline_code (tramp_type);
396 mono_domain_lock (domain);
397 buf = mono_code_manager_reserve (domain->code_mp, TRAMPOLINE_SIZE);
398 mono_domain_unlock (domain);
400 /* FIXME: Optimize this */
402 ia64_codegen_init (code, buf);
404 ia64_movl (code, GP_SCRATCH_REG, arg1);
406 ia64_begin_bundle (code);
407 disp = (tramp - code.buf) >> 4;
408 if (ia64_is_imm21 (disp)) {
409 ia64_br_cond (code, disp);
412 ia64_movl (code, GP_SCRATCH_REG2, tramp);
413 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
414 ia64_br_cond_reg (code, IA64_B6);
417 ia64_codegen_close (code);
419 g_assert (code.buf - buf <= TRAMPOLINE_SIZE);
421 mono_arch_flush_icache (buf, code.buf - buf);
424 *code_len = code.buf - buf;
430 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
436 mono_debugger_create_notification_function (void)