2 * tramp-ppc.c: JIT trampoline code for PowerPC
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Paolo Molaro (lupus@ximian.com)
7 * Carlos Valiente <yo@virutass.net>
9 * (C) 2001 Ximian, Inc.
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/marshal.h>
17 #include <mono/metadata/tabledefs.h>
18 #include <mono/arch/ppc/ppc-codegen.h>
19 #include <mono/metadata/mono-debug-debugger.h>
25 MONO_TRAMPOLINE_GENERIC,
27 MONO_TRAMPOLINE_CLASS_INIT
30 /* adapt to mini later... */
31 #define mono_jit_share_code (1)
34 * Address of the x86 trampoline code. This is used by the debugger to check
35 * whether a method is a trampoline.
37 guint8 *mono_generic_trampoline_code = NULL;
40 * get_unbox_trampoline:
42 * @addr: pointer to native code for @m
44 * when value type methods are called through the vtable we need to unbox the
45 * this argument. This method returns a pointer to a trampoline which does
46 * unboxing before calling the method
49 get_unbox_trampoline (MonoMethod *m, gpointer addr)
54 if (!m->signature->ret->byref && MONO_TYPE_ISSTRUCT (m->signature->ret))
57 start = code = g_malloc (20);
59 ppc_load (code, ppc_r0, addr);
60 ppc_mtctr (code, ppc_r0);
61 ppc_addi (code, this_pos, this_pos, sizeof (MonoObject));
62 ppc_bcctr (code, 20, 0);
63 g_assert ((code - start) <= 20);
68 /* Stack size for trampoline function */
69 #define STACK (144 + 8*8)
71 /* Method-specific trampoline code framgment size */
72 #define METHOD_TRAMPOLINE_SIZE 64
75 * ppc_magic_trampoline:
76 * @code: pointer into caller code
77 * @method: the method to translate
80 * This method is called by the function 'arch_create_jit_trampoline', which in
81 * turn is called by the trampoline functions for virtual methods.
82 * After having called the JIT compiler to compile the method, it inspects the
83 * caller code to find the address of the method-specific part of the
84 * trampoline vtable slot for this method, updates it with a fragment that calls
85 * the newly compiled code and returns this address of the compiled code to
86 * 'arch_create_jit_trampoline'
89 ppc_magic_trampoline (MonoMethod *method, guint32 *code, char *sp)
95 addr = mono_compile_method(method);
98 /* Locate the address of the method-specific trampoline. The call using
99 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
100 looks something like this:
102 mtlr rA ; Move rA (a register containing the
103 ; target address) to LR
104 blrl ; Call function at LR
106 PowerPC instructions are 32-bit long, which means that a 32-bit target
107 address cannot be encoded as an immediate value (because we already
108 have spent some bits to encode the branch instruction!). That's why a
109 'b'ranch to the contents of the 'l'ink 'r'egister (with 'l'ink register
110 update) is needed, instead of a simpler 'branch immediate'. This
111 complicates our purpose here, because 'blrl' overwrites LR, which holds
112 the value we're interested in.
114 Therefore, we need to locate the 'mtlr rA' instruction to know which
115 register LR was loaded from, and then retrieve the value from that
118 /* This is the 'blrl' instruction */
122 * Note that methods are called also with the bl opcode.
124 if (((*code) >> 26) == 18) {
125 ppc_patch (code, addr);
126 mono_arch_flush_icache (code, 4);
130 /* Sanity check: instruction must be 'blrl' */
131 g_assert(*code == 0x4e800021);
133 /* OK, we're now at the 'blrl' instruction. Now walk backwards
134 till we get to a 'mtlr rA' */
136 if((*code & 0x7c0803a6) == 0x7c0803a6) {
137 /* Here we are: we reached the 'mtlr rA'.
138 Extract the register from the instruction */
139 reg = (*code & 0x03e00000) >> 21;
141 case 0 : o = *((int *) (sp + STACK - 8)); break;
142 case 11: o = *((int *) (sp + STACK - 24)); break;
143 case 12: o = *((int *) (sp + STACK - 28)); break;
144 case 13: o = *((int *) (sp + STACK - 32)); break;
145 case 14: o = *((int *) (sp + STACK - 36)); break;
146 case 15: o = *((int *) (sp + STACK - 40)); break;
147 case 16: o = *((int *) (sp + STACK - 44)); break;
148 case 17: o = *((int *) (sp + STACK - 48)); break;
149 case 18: o = *((int *) (sp + STACK - 52)); break;
150 case 19: o = *((int *) (sp + STACK - 56)); break;
151 case 20: o = *((int *) (sp + STACK - 60)); break;
152 case 21: o = *((int *) (sp + STACK - 64)); break;
153 case 22: o = *((int *) (sp + STACK - 68)); break;
154 case 23: o = *((int *) (sp + STACK - 72)); break;
155 case 24: o = *((int *) (sp + STACK - 76)); break;
156 case 25: o = *((int *) (sp + STACK - 80)); break;
157 case 26: o = *((int *) (sp + STACK - 84)); break;
158 case 27: o = *((int *) (sp + STACK - 88)); break;
159 case 28: o = *((int *) (sp + STACK - 92)); break;
160 case 29: o = *((int *) (sp + STACK - 96)); break;
161 case 30: o = *((int *) (sp + STACK - 100)); break;
162 case 31: o = *((int *) (sp + STACK - 4)); break;
164 printf("%s: Unexpected register %d\n",
166 g_assert_not_reached();
172 /* this is not done for non-virtual calls, because in that case
173 we won't have an object, but the actual pointer to the
174 valuetype as the this argument
176 if (method->klass->valuetype)
177 addr = get_unbox_trampoline (method, addr);
179 /* Finally, replace the method-specific trampoline code (which called
180 the generic trampoline code) with a fragment that calls directly the
185 /* FIXME: make the patching thread safe */
187 ppc_patch (o - 4, addr);
189 ppc_stwu (o, ppc_r1, -16, ppc_r1);
190 ppc_mflr (o, ppc_r0);
191 ppc_stw (o, ppc_r31, 12, ppc_r1);
192 ppc_stw (o, ppc_r0, 20, ppc_r1);
193 ppc_mr (o, ppc_r31, ppc_r1);
195 ppc_lis (o, ppc_r0, (guint32) addr >> 16);
196 ppc_ori (o, ppc_r0, ppc_r0, (guint32) addr & 0xffff);
197 ppc_mtlr (o, ppc_r0);
200 ppc_lwz (o, ppc_r11, 0, ppc_r1);
201 ppc_lwz (o, ppc_r0, 4, ppc_r11);
202 ppc_mtlr (o, ppc_r0);
203 ppc_lwz (o, ppc_r31, -4, ppc_r11);
204 ppc_mr (o, ppc_r1, ppc_r11);
207 mono_arch_flush_icache (start, o - start);
208 g_assert(o - start < METHOD_TRAMPOLINE_SIZE);
214 ppc_class_init_trampoline (void *vtable, guint32 *code, char *sp)
216 mono_runtime_class_init (vtable);
219 /* This is the 'bl' instruction */
222 if (((*code) >> 26) == 18) {
223 ppc_ori (code, 0, 0, 0); /* nop */
224 mono_arch_flush_icache (code, 4);
227 g_assert_not_reached ();
233 create_trampoline_code (MonoTrampolineType tramp_type)
235 guint8 *buf, *code = NULL;
236 static guint8* generic_jump_trampoline = NULL;
237 static guint8 *generic_class_init_trampoline = NULL;
240 switch (tramp_type) {
241 case MONO_TRAMPOLINE_GENERIC:
242 if (mono_generic_trampoline_code)
243 return mono_generic_trampoline_code;
245 case MONO_TRAMPOLINE_JUMP:
246 if (generic_jump_trampoline)
247 return generic_jump_trampoline;
249 case MONO_TRAMPOLINE_CLASS_INIT:
250 if (generic_class_init_trampoline)
251 return generic_class_init_trampoline;
256 /* Now we'll create in 'buf' the PowerPC trampoline code. This
257 is the trampoline code common to all methods */
259 code = buf = g_malloc(512);
261 /*-----------------------------------------------------------
262 STEP 0: First create a non-standard function prologue with a
263 stack size big enough to save our registers:
265 lr (We'll be calling functions here, so we
267 r0 (See ppc_magic_trampoline)
268 r1 (sp) (Stack pointer - must save)
269 r3-r10 Function arguments.
270 r11-r31 (See ppc_magic_trampoline)
271 method in r11 (See ppc_magic_trampoline)
273 This prologue is non-standard because r0 is not saved here - it
274 was saved in the method-specific trampoline code
275 -----------------------------------------------------------*/
277 ppc_stwu (buf, ppc_r1, -STACK, ppc_r1);
279 /* Save r0 before modifying it - we will need its contents in
280 'ppc_magic_trampoline' */
281 ppc_stw (buf, ppc_r0, STACK - 8, ppc_r1);
283 ppc_stw (buf, ppc_r31, STACK - 4, ppc_r1);
284 ppc_mr (buf, ppc_r31, ppc_r1);
286 /* Now save our registers. */
287 ppc_stw (buf, ppc_r3, STACK - 12, ppc_r1);
288 ppc_stw (buf, ppc_r4, STACK - 16, ppc_r1);
289 ppc_stw (buf, ppc_r5, STACK - 20, ppc_r1);
290 ppc_stw (buf, ppc_r6, STACK - 24, ppc_r1);
291 ppc_stw (buf, ppc_r7, STACK - 28, ppc_r1);
292 ppc_stw (buf, ppc_r8, STACK - 32, ppc_r1);
293 ppc_stw (buf, ppc_r9, STACK - 36, ppc_r1);
294 ppc_stw (buf, ppc_r10, STACK - 40, ppc_r1);
295 /* STACK - 44 contains r11, which is set in the method-specific
296 part of the trampoline (see bellow this 'if' block) */
297 ppc_stw (buf, ppc_r12, STACK - 48, ppc_r1);
298 ppc_stw (buf, ppc_r13, STACK - 52, ppc_r1);
299 ppc_stw (buf, ppc_r14, STACK - 56, ppc_r1);
300 ppc_stw (buf, ppc_r15, STACK - 60, ppc_r1);
301 ppc_stw (buf, ppc_r16, STACK - 64, ppc_r1);
302 ppc_stw (buf, ppc_r17, STACK - 68, ppc_r1);
303 ppc_stw (buf, ppc_r18, STACK - 72, ppc_r1);
304 ppc_stw (buf, ppc_r19, STACK - 76, ppc_r1);
305 ppc_stw (buf, ppc_r20, STACK - 80, ppc_r1);
306 ppc_stw (buf, ppc_r21, STACK - 84, ppc_r1);
307 ppc_stw (buf, ppc_r22, STACK - 88, ppc_r1);
308 ppc_stw (buf, ppc_r23, STACK - 92, ppc_r1);
309 ppc_stw (buf, ppc_r24, STACK - 96, ppc_r1);
310 ppc_stw (buf, ppc_r25, STACK - 100, ppc_r1);
311 ppc_stw (buf, ppc_r26, STACK - 104, ppc_r1);
312 ppc_stw (buf, ppc_r27, STACK - 108, ppc_r1);
313 ppc_stw (buf, ppc_r28, STACK - 112, ppc_r1);
314 ppc_stw (buf, ppc_r29, STACK - 116, ppc_r1);
315 ppc_stw (buf, ppc_r30, STACK - 120, ppc_r1);
316 /* Save 'method' pseudo-parameter - the one passed in r11 */
317 ppc_stw (buf, ppc_r11, STACK - 124, ppc_r1);
319 /* Save the FP registers */
320 offset = 124 + 4 + 8;
321 for (i = ppc_f1; i <= ppc_f8; ++i) {
322 ppc_stfd (buf, i, STACK - offset, ppc_r1);
326 /*----------------------------------------------------------
327 STEP 1: call 'mono_get_lmf_addr()' to get the address of our
328 LMF. We'll need to restore it after the call to
329 'ppc_magic_trampoline' and before the call to the native
331 ----------------------------------------------------------*/
333 /* Calculate the address and make the call. Keep in mind that
334 we're using r0, so we'll have to restore it before calling
335 'ppc_magic_trampoline' */
336 ppc_lis (buf, ppc_r0, (guint32) mono_get_lmf_addr >> 16);
337 ppc_ori (buf, ppc_r0, ppc_r0, (guint32) mono_get_lmf_addr & 0xffff);
338 ppc_mtlr (buf, ppc_r0);
341 /* XXX Update LMF !!! */
343 /*----------------------------------------------------------
344 STEP 2: call 'ppc_magic_trampoline()', who will compile the
345 code and fix the method vtable entry for us
346 ----------------------------------------------------------*/
350 /* Arg 1: MonoMethod *method. It was put in r11 by the
351 method-specific trampoline code, and then saved before the call
352 to mono_get_lmf_addr()'. Restore r11, by the way :-) */
353 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
354 ppc_li (buf, ppc_r3, 0);
356 ppc_lwz (buf, ppc_r3, STACK - 124, ppc_r1);
357 ppc_lwz (buf, ppc_r11, STACK - 44, ppc_r1);
359 /* Arg 2: code (next address to the instruction that called us) */
360 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
361 ppc_li (buf, ppc_r4, 0);
363 ppc_lwz (buf, ppc_r4, STACK + PPC_RET_ADDR_OFFSET, ppc_r1);
365 /* Arg 3: stack pointer */
366 ppc_mr (buf, ppc_r5, ppc_r1);
368 /* Calculate call address, restore r0 and call
369 'ppc_magic_trampoline'. Return value will be in r3 */
370 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
371 ppc_lis (buf, ppc_r0, (guint32) ppc_class_init_trampoline >> 16);
372 ppc_ori (buf, ppc_r0, ppc_r0, (guint32) ppc_class_init_trampoline & 0xffff);
374 ppc_lis (buf, ppc_r0, (guint32) ppc_magic_trampoline >> 16);
375 ppc_ori (buf, ppc_r0, ppc_r0, (guint32) ppc_magic_trampoline & 0xffff);
377 ppc_mtlr (buf, ppc_r0);
378 ppc_lwz (buf, ppc_r0, STACK - 8, ppc_r1);
381 /* OK, code address is now on r3. Move it to r0, so that we
382 can restore r3 and use it from r0 later */
383 ppc_mr (buf, ppc_r0, ppc_r3);
386 /*----------------------------------------------------------
387 STEP 3: Restore the LMF
388 ----------------------------------------------------------*/
392 /*----------------------------------------------------------
393 STEP 4: call the compiled method
394 ----------------------------------------------------------*/
396 /* Restore registers */
398 ppc_lwz (buf, ppc_r3, STACK - 12, ppc_r1);
399 ppc_lwz (buf, ppc_r4, STACK - 16, ppc_r1);
400 ppc_lwz (buf, ppc_r5, STACK - 20, ppc_r1);
401 ppc_lwz (buf, ppc_r6, STACK - 24, ppc_r1);
402 ppc_lwz (buf, ppc_r7, STACK - 28, ppc_r1);
403 ppc_lwz (buf, ppc_r8, STACK - 32, ppc_r1);
404 ppc_lwz (buf, ppc_r9, STACK - 36, ppc_r1);
405 ppc_lwz (buf, ppc_r10, STACK - 40, ppc_r1);
407 /* Restore the FP registers */
408 offset = 124 + 4 + 8;
409 for (i = ppc_f1; i <= ppc_f8; ++i) {
410 ppc_lfd (buf, i, STACK - offset, ppc_r1);
413 /* We haven't touched any of these, so there's no need to
416 ppc_lwz (buf, ppc_r14, STACK - 56, ppc_r1);
417 ppc_lwz (buf, ppc_r15, STACK - 60, ppc_r1);
418 ppc_lwz (buf, ppc_r16, STACK - 64, ppc_r1);
419 ppc_lwz (buf, ppc_r17, STACK - 68, ppc_r1);
420 ppc_lwz (buf, ppc_r18, STACK - 72, ppc_r1);
421 ppc_lwz (buf, ppc_r19, STACK - 76, ppc_r1);
422 ppc_lwz (buf, ppc_r20, STACK - 80, ppc_r1);
423 ppc_lwz (buf, ppc_r21, STACK - 84, ppc_r1);
424 ppc_lwz (buf, ppc_r22, STACK - 88, ppc_r1);
425 ppc_lwz (buf, ppc_r23, STACK - 92, ppc_r1);
426 ppc_lwz (buf, ppc_r24, STACK - 96, ppc_r1);
427 ppc_lwz (buf, ppc_r25, STACK - 100, ppc_r1);
428 ppc_lwz (buf, ppc_r26, STACK - 104, ppc_r1);
429 ppc_lwz (buf, ppc_r27, STACK - 108, ppc_r1);
430 ppc_lwz (buf, ppc_r28, STACK - 112, ppc_r1);
431 ppc_lwz (buf, ppc_r29, STACK - 116, ppc_r1);
432 ppc_lwz (buf, ppc_r30, STACK - 120, ppc_r1);
435 /* Non-standard function epilogue. Instead of doing a proper
436 return, we just call the compiled code, so
437 that, when it finishes, the method returns here. */
440 /* Restore stack pointer, r31, LR and jump to the code */
441 ppc_lwz (buf, ppc_r1, 0, ppc_r1);
442 ppc_lwz (buf, ppc_r31, -4, ppc_r1);
443 ppc_lwz (buf, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_r1);
444 ppc_mtlr (buf, ppc_r11);
445 ppc_mtctr (buf, ppc_r0);
446 ppc_bcctr (buf, 20, 0);
448 ppc_mtlr (buf, ppc_r0);
451 /* Restore stack pointer, r31, LR and return to caller */
452 ppc_lwz (buf, ppc_r11, 0, ppc_r1);
453 ppc_lwz (buf, ppc_r31, -4, ppc_r11);
454 ppc_mr (buf, ppc_r1, ppc_r11);
455 ppc_lwz (buf, ppc_r0, 4, ppc_r1);
456 ppc_mtlr (buf, ppc_r0);
460 /* Flush instruction cache, since we've generated code */
461 mono_arch_flush_icache (code, buf - code);
464 g_assert ((buf - code) <= 512);
467 switch (tramp_type) {
468 case MONO_TRAMPOLINE_GENERIC:
469 mono_generic_trampoline_code = code;
471 case MONO_TRAMPOLINE_JUMP:
472 generic_jump_trampoline = code;
474 case MONO_TRAMPOLINE_CLASS_INIT:
475 generic_class_init_trampoline = code;
483 * arch_create_jit_trampoline:
484 * @method: pointer to the method info
486 * Creates a trampoline function for virtual methods. If the created
487 * code is called it first starts JIT compilation of method,
488 * and then calls the newly created method. It also replaces the
489 * corresponding vtable entry (see ppc_magic_trampoline).
491 * A trampoline consists of two parts: a main fragment, shared by all method
492 * trampolines, and some code specific to each method, which hard-codes a
493 * reference to that method and then calls the main fragment.
495 * The main fragment contains a call to 'ppc_magic_trampoline', which performs
496 * call to the JIT compiler and substitutes the method-specific fragment with
497 * some code that directly calls the JIT-compiled method.
499 * Returns: a pointer to the newly created code
502 mono_arch_create_jit_trampoline (MonoMethod *method)
505 static guint8 *vc = NULL;
507 /* previously created trampoline code */
511 if (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)
512 return mono_arch_create_jit_trampoline (mono_marshal_get_synchronized_wrapper (method));
514 vc = create_trampoline_code (MONO_TRAMPOLINE_GENERIC);
516 /* This is the method-specific part of the trampoline. Its purpose is
517 to provide the generic part with the MonoMethod *method pointer. We'll
518 use r11 to keep that value, for instance. However, the generic part of
519 the trampoline relies on r11 having the same value it had before coming
520 here, so we must save it before. */
521 code = buf = g_malloc(METHOD_TRAMPOLINE_SIZE);
523 /* Save r11. There's nothing magic in the '44', its just an arbitrary
524 position - see above */
525 ppc_stw (buf, ppc_r11, -44, ppc_r1);
527 /* Now save LR - we'll overwrite it now */
528 ppc_mflr (buf, ppc_r11);
529 ppc_stw (buf, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_r1);
531 /* Prepare the jump to the generic trampoline code.*/
532 ppc_lis (buf, ppc_r11, (guint32) vc >> 16);
533 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) vc & 0xffff);
534 ppc_mtlr (buf, ppc_r11);
536 /* And finally put 'method' in r11 and fly! */
537 ppc_lis (buf, ppc_r11, (guint32) method >> 16);
538 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) method & 0xffff);
541 /* Flush instruction cache, since we've generated code */
542 mono_arch_flush_icache (code, buf - code);
545 g_assert ((buf - code) <= METHOD_TRAMPOLINE_SIZE);
547 /* Store trampoline address */
550 mono_jit_stats.method_trampolines++;
558 * x86_magic_trampoline:
559 * @eax: saved x86 register
560 * @ecx: saved x86 register
561 * @edx: saved x86 register
562 * @esi: saved x86 register
563 * @edi: saved x86 register
564 * @ebx: saved x86 register
565 * @code: pointer into caller code
566 * @method: the method to translate
568 * This method is called by the trampoline functions for virtual
569 * methods. It inspects the caller code to find the address of the
570 * vtable slot, then calls the JIT compiler and writes the address
571 * of the compiled method back to the vtable. All virtual methods
572 * are called with: x86_call_membase (inst, basereg, disp). We always
573 * use 32 bit displacement to ensure that the length of the call
574 * instruction is 6 bytes. We need to get the value of the basereg
575 * and the constant displacement.
578 x86_magic_trampoline (int eax, int ecx, int edx, int esi, int edi,
579 int ebx, guint8 *code, MonoMethod *m)
586 addr = mono_compile_method (m);
589 /* go to the start of the call instruction
591 * address_byte = (m << 6) | (o << 3) | reg
592 * call opcode: 0xff address_byte displacement
597 if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
598 reg = code [4] & 0x07;
599 disp = (signed char)code [5];
601 if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
602 reg = code [1] & 0x07;
603 disp = *((gint32*)(code + 2));
604 } else if ((code [1] == 0xe8)) {
605 *((guint32*)(code + 2)) = (guint)addr - ((guint)code + 1) - 5;
607 } else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
609 * This is a interface call: should check the above code can't catch it earlier
610 * 8b 40 30 mov 0x30(%eax),%eax
614 reg = code [5] & 0x07;
616 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
617 code [4], code [5], code [6]);
618 g_assert_not_reached ();
642 g_assert_not_reached ();
647 if (m->klass->valuetype) {
648 return *((gpointer *)o) = get_unbox_trampoline (m, addr);
650 return *((gpointer *)o) = addr;
655 * mono_arch_create_jit_trampoline:
656 * @method: pointer to the method info
658 * Creates a trampoline function for virtual methods. If the created
659 * code is called it first starts JIT compilation of method,
660 * and then calls the newly created method. I also replaces the
661 * corresponding vtable entry (see x86_magic_trampoline).
663 * Returns: a pointer to the newly created code
666 mono_arch_create_jit_trampoline (MonoMethod *method)
670 /* previously created trampoline code */
674 if (!mono_generic_trampoline_code) {
675 mono_generic_trampoline_code = buf = g_malloc (256);
676 /* save caller save regs because we need to do a call */
677 x86_push_reg (buf, X86_EDX);
678 x86_push_reg (buf, X86_EAX);
679 x86_push_reg (buf, X86_ECX);
683 /* save the IP (caller ip) */
684 x86_push_membase (buf, X86_ESP, 16);
686 x86_push_reg (buf, X86_EBX);
687 x86_push_reg (buf, X86_EDI);
688 x86_push_reg (buf, X86_ESI);
689 x86_push_reg (buf, X86_EBP);
691 /* save method info */
692 x86_push_membase (buf, X86_ESP, 32);
693 /* get the address of lmf for the current thread */
694 x86_call_code (buf, mono_get_lmf_addr);
696 x86_push_reg (buf, X86_EAX);
697 /* push *lfm (previous_lmf) */
698 x86_push_membase (buf, X86_EAX, 0);
700 x86_mov_membase_reg (buf, X86_EAX, 0, X86_ESP, 4);
703 /* push the method info */
704 x86_push_membase (buf, X86_ESP, 44);
705 /* push the return address onto the stack */
706 x86_push_membase (buf, X86_ESP, 52);
708 /* save all register values */
709 x86_push_reg (buf, X86_EBX);
710 x86_push_reg (buf, X86_EDI);
711 x86_push_reg (buf, X86_ESI);
712 x86_push_membase (buf, X86_ESP, 64); /* EDX */
713 x86_push_membase (buf, X86_ESP, 64); /* ECX */
714 x86_push_membase (buf, X86_ESP, 64); /* EAX */
716 x86_call_code (buf, x86_magic_trampoline);
717 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 8*4);
719 /* restore LMF start */
720 /* ebx = previous_lmf */
721 x86_pop_reg (buf, X86_EBX);
723 x86_pop_reg (buf, X86_EDI);
724 /* *(lmf) = previous_lmf */
725 x86_mov_membase_reg (buf, X86_EDI, 0, X86_EBX, 4);
726 /* discard method info */
727 x86_pop_reg (buf, X86_ESI);
728 /* restore caller saved regs */
729 x86_pop_reg (buf, X86_EBP);
730 x86_pop_reg (buf, X86_ESI);
731 x86_pop_reg (buf, X86_EDI);
732 x86_pop_reg (buf, X86_EBX);
733 /* discard save IP */
734 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 4);
735 /* restore LMF end */
737 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 16);
739 /* call the compiled method */
740 x86_jump_reg (buf, X86_EAX);
742 g_assert ((buf - mono_generic_trampoline_code) <= 256);
745 code = buf = g_malloc (16);
746 x86_push_imm (buf, method);
747 x86_jump_code (buf, mono_generic_trampoline_code);
748 g_assert ((buf - code) <= 16);
750 /* store trampoline address */
753 //mono_jit_stats.method_trampolines++;
761 * mono_arch_create_class_init_trampoline:
762 * @vtable: the type to initialize
764 * Creates a trampoline function to run a type initializer.
765 * If the trampoline is called, it calls mono_runtime_class_init with the
766 * given vtable, then patches the caller code so it does not get called any
769 * Returns: a pointer to the newly created code
772 mono_arch_create_class_init_trampoline (MonoVTable *vtable)
774 guint8 *code, *buf, *tramp;
776 tramp = create_trampoline_code (MONO_TRAMPOLINE_CLASS_INIT);
778 /* This is the method-specific part of the trampoline. Its purpose is
779 to provide the generic part with the MonoMethod *method pointer. We'll
780 use r11 to keep that value, for instance. However, the generic part of
781 the trampoline relies on r11 having the same value it had before coming
782 here, so we must save it before. */
783 code = buf = g_malloc(METHOD_TRAMPOLINE_SIZE);
786 ppc_mflr (buf, ppc_r4);
787 ppc_stw (buf, ppc_r4, PPC_RET_ADDR_OFFSET, ppc_sp);
788 ppc_stwu (buf, ppc_sp, -32, ppc_sp);
789 ppc_load (buf, ppc_r3, vtable);
790 ppc_load (buf, ppc_r5, 0);
792 ppc_load (buf, ppc_r0, ppc_class_init_trampoline);
793 ppc_mtlr (buf, ppc_r0);
796 ppc_lwz (buf, ppc_r0, 32 + PPC_RET_ADDR_OFFSET, ppc_sp);
797 ppc_mtlr (buf, ppc_r0);
798 ppc_addic (buf, ppc_sp, ppc_sp, 32);
801 /* Save r11. There's nothing magic in the '44', its just an arbitrary
802 position - see above */
803 ppc_stw (buf, ppc_r11, -44, ppc_r1);
805 /* Now save LR - we'll overwrite it now */
806 ppc_mflr (buf, ppc_r11);
807 ppc_stw (buf, ppc_r11, 4, ppc_r1);
808 ppc_stw (buf, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_r1);
810 /* Prepare the jump to the generic trampoline code.*/
811 ppc_lis (buf, ppc_r11, (guint32) tramp >> 16);
812 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) tramp & 0xffff);
813 ppc_mtlr (buf, ppc_r11);
815 /* And finally put 'vtable' in r11 and fly! */
816 ppc_lis (buf, ppc_r11, (guint32) vtable >> 16);
817 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) vtable & 0xffff);
819 ppc_lwz (buf, ppc_r0, PPC_RET_ADDR_OFFSET, ppc_r1);
820 ppc_mtlr (buf, ppc_r0);
825 /* Flush instruction cache, since we've generated code */
826 mono_arch_flush_icache (code, buf - code);
829 g_assert ((buf - code) <= METHOD_TRAMPOLINE_SIZE);
831 mono_jit_stats.method_trampolines++;
837 * This method is only called when running in the Mono Debugger.
840 mono_debugger_create_notification_function (gpointer *notification_address)
844 ptr = buf = g_malloc0 (16);
846 if (notification_address)
847 *notification_address = buf;