2 * tramp-ppc.c: JIT trampoline code for PowerPC
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Paolo Molaro (lupus@ximian.com)
7 * Carlos Valiente <yo@virutass.net>
9 * (C) 2001 Ximian, Inc.
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/marshal.h>
17 #include <mono/metadata/tabledefs.h>
18 #include <mono/arch/ppc/ppc-codegen.h>
19 #include <mono/metadata/mono-debug-debugger.h>
25 MONO_TRAMPOLINE_GENERIC,
27 MONO_TRAMPOLINE_CLASS_INIT
30 /* adapt to mini later... */
31 #define mono_jit_share_code (1)
34 * Address of the x86 trampoline code. This is used by the debugger to check
35 * whether a method is a trampoline.
37 guint8 *mono_generic_trampoline_code = NULL;
40 * get_unbox_trampoline:
42 * @addr: pointer to native code for @m
44 * when value type methods are called through the vtable we need to unbox the
45 * this argument. This method returns a pointer to a trampoline which does
46 * unboxing before calling the method
49 get_unbox_trampoline (MonoMethod *m, gpointer addr)
54 if (!m->signature->ret->byref && MONO_TYPE_ISSTRUCT (m->signature->ret))
57 start = code = g_malloc (20);
59 ppc_load (code, ppc_r11, addr);
60 ppc_mtctr (code, ppc_r11);
61 ppc_addi (code, this_pos, this_pos, sizeof (MonoObject));
62 ppc_bcctr (code, 20, 0);
63 g_assert ((code - start) <= 20);
68 /* Stack size for trampoline function */
69 #define STACK (144 + 8*8)
71 /* Method-specific trampoline code framgment size */
72 #define METHOD_TRAMPOLINE_SIZE 64
75 * ppc_magic_trampoline:
76 * @code: pointer into caller code
77 * @method: the method to translate
80 * This method is called by the function 'arch_create_jit_trampoline', which in
81 * turn is called by the trampoline functions for virtual methods.
82 * After having called the JIT compiler to compile the method, it inspects the
83 * caller code to find the address of the method-specific part of the
84 * trampoline vtable slot for this method, updates it with a fragment that calls
85 * the newly compiled code and returns this address of the compiled code to
86 * 'arch_create_jit_trampoline'
89 ppc_magic_trampoline (MonoMethod *method, guint32 *code, char *sp)
95 addr = mono_compile_method(method);
98 /* Locate the address of the method-specific trampoline. The call using
99 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
100 looks something like this:
102 mtlr rA ; Move rA (a register containing the
103 ; target address) to LR
104 blrl ; Call function at LR
106 PowerPC instructions are 32-bit long, which means that a 32-bit target
107 address cannot be encoded as an immediate value (because we already
108 have spent some bits to encode the branch instruction!). That's why a
109 'b'ranch to the contents of the 'l'ink 'r'egister (with 'l'ink register
110 update) is needed, instead of a simpler 'branch immediate'. This
111 complicates our purpose here, because 'blrl' overwrites LR, which holds
112 the value we're interested in.
114 Therefore, we need to locate the 'mtlr rA' instruction to know which
115 register LR was loaded from, and then retrieve the value from that
118 /* This is the 'blrl' instruction */
122 * Note that methods are called also with the bl opcode.
124 if (((*code) >> 26) == 18) {
125 ppc_patch (code, addr);
126 mono_arch_flush_icache (code, 4);
130 /* Sanity check: instruction must be 'blrl' */
131 g_assert(*code == 0x4e800021);
133 /* OK, we're now at the 'blrl' instruction. Now walk backwards
134 till we get to a 'mtlr rA' */
136 if((*code & 0x7c0803a6) == 0x7c0803a6) {
137 /* Here we are: we reached the 'mtlr rA'.
138 Extract the register from the instruction */
139 reg = (*code & 0x03e00000) >> 21;
141 case 0 : o = *((int *) (sp + STACK - 8)); break;
142 case 11: o = *((int *) (sp + STACK - 24)); break;
143 case 12: o = *((int *) (sp + STACK - 28)); break;
144 case 13: o = *((int *) (sp + STACK - 32)); break;
145 case 14: o = *((int *) (sp + STACK - 36)); break;
146 case 15: o = *((int *) (sp + STACK - 40)); break;
147 case 16: o = *((int *) (sp + STACK - 44)); break;
148 case 17: o = *((int *) (sp + STACK - 48)); break;
149 case 18: o = *((int *) (sp + STACK - 52)); break;
150 case 19: o = *((int *) (sp + STACK - 56)); break;
151 case 20: o = *((int *) (sp + STACK - 60)); break;
152 case 21: o = *((int *) (sp + STACK - 64)); break;
153 case 22: o = *((int *) (sp + STACK - 68)); break;
154 case 23: o = *((int *) (sp + STACK - 72)); break;
155 case 24: o = *((int *) (sp + STACK - 76)); break;
156 case 25: o = *((int *) (sp + STACK - 80)); break;
157 case 26: o = *((int *) (sp + STACK - 84)); break;
158 case 27: o = *((int *) (sp + STACK - 88)); break;
159 case 28: o = *((int *) (sp + STACK - 92)); break;
160 case 29: o = *((int *) (sp + STACK - 96)); break;
161 case 30: o = *((int *) (sp + STACK - 100)); break;
162 case 31: o = *((int *) (sp + STACK - 4)); break;
164 printf("%s: Unexpected register %d\n",
166 g_assert_not_reached();
172 /* this is not done for non-virtual calls, because in that case
173 we won't have an object, but the actual pointer to the
174 valuetype as the this argument
176 if (method->klass->valuetype)
177 addr = get_unbox_trampoline (method, addr);
179 /* Finally, replace the method-specific trampoline code (which called
180 the generic trampoline code) with a fragment that calls directly the
185 /* FIXME: make the patching thread safe */
187 ppc_patch (o - 4, addr);
189 ppc_stwu (o, ppc_r1, -16, ppc_r1);
190 ppc_mflr (o, ppc_r0);
191 ppc_stw (o, ppc_r31, 12, ppc_r1);
192 ppc_stw (o, ppc_r0, 20, ppc_r1);
193 ppc_mr (o, ppc_r31, ppc_r1);
195 ppc_lis (o, ppc_r0, (guint32) addr >> 16);
196 ppc_ori (o, ppc_r0, ppc_r0, (guint32) addr & 0xffff);
197 ppc_mtlr (o, ppc_r0);
200 ppc_lwz (o, ppc_r11, 0, ppc_r1);
201 ppc_lwz (o, ppc_r0, 4, ppc_r11);
202 ppc_mtlr (o, ppc_r0);
203 ppc_lwz (o, ppc_r31, -4, ppc_r11);
204 ppc_mr (o, ppc_r1, ppc_r11);
207 mono_arch_flush_icache (start, o - start);
208 g_assert(o - start < METHOD_TRAMPOLINE_SIZE);
214 ppc_class_init_trampoline (void *vtable, guint32 *code, char *sp)
216 mono_runtime_class_init (vtable);
219 /* This is the 'bl' instruction */
222 if (((*code) >> 26) == 18) {
223 ppc_ori (code, 0, 0, 0); /* nop */
224 mono_arch_flush_icache (code, 4);
227 g_assert_not_reached ();
233 create_trampoline_code (MonoTrampolineType tramp_type)
235 guint8 *buf, *code = NULL;
236 static guint8* generic_jump_trampoline = NULL;
237 static guint8 *generic_class_init_trampoline = NULL;
240 switch (tramp_type) {
241 case MONO_TRAMPOLINE_GENERIC:
242 if (mono_generic_trampoline_code)
243 return mono_generic_trampoline_code;
245 case MONO_TRAMPOLINE_JUMP:
246 if (generic_jump_trampoline)
247 return generic_jump_trampoline;
249 case MONO_TRAMPOLINE_CLASS_INIT:
250 if (generic_class_init_trampoline)
251 return generic_class_init_trampoline;
256 /* Now we'll create in 'buf' the PowerPC trampoline code. This
257 is the trampoline code common to all methods */
259 code = buf = g_malloc(512);
261 /*-----------------------------------------------------------
262 STEP 0: First create a non-standard function prologue with a
263 stack size big enough to save our registers:
265 lr (We'll be calling functions here, so we
267 r0 (See ppc_magic_trampoline)
268 r1 (sp) (Stack pointer - must save)
269 r3-r10 Function arguments.
270 r11-r31 (See ppc_magic_trampoline)
271 method in r11 (See ppc_magic_trampoline)
273 This prologue is non-standard because r0 is not saved here - it
274 was saved in the method-specific trampoline code
275 -----------------------------------------------------------*/
277 ppc_stwu (buf, ppc_r1, -STACK, ppc_r1);
279 /* Save r0 before modifying it - we will need its contents in
280 'ppc_magic_trampoline' */
281 ppc_stw (buf, ppc_r0, STACK - 8, ppc_r1);
283 ppc_stw (buf, ppc_r31, STACK - 4, ppc_r1);
284 ppc_mr (buf, ppc_r31, ppc_r1);
286 /* Now save our registers. */
287 ppc_stw (buf, ppc_r3, STACK - 12, ppc_r1);
288 ppc_stw (buf, ppc_r4, STACK - 16, ppc_r1);
289 ppc_stw (buf, ppc_r5, STACK - 20, ppc_r1);
290 ppc_stw (buf, ppc_r6, STACK - 24, ppc_r1);
291 ppc_stw (buf, ppc_r7, STACK - 28, ppc_r1);
292 ppc_stw (buf, ppc_r8, STACK - 32, ppc_r1);
293 ppc_stw (buf, ppc_r9, STACK - 36, ppc_r1);
294 ppc_stw (buf, ppc_r10, STACK - 40, ppc_r1);
295 /* STACK - 44 contains r11, which is set in the method-specific
296 part of the trampoline (see bellow this 'if' block) */
297 ppc_stw (buf, ppc_r12, STACK - 48, ppc_r1);
298 ppc_stw (buf, ppc_r13, STACK - 52, ppc_r1);
299 ppc_stw (buf, ppc_r14, STACK - 56, ppc_r1);
300 ppc_stw (buf, ppc_r15, STACK - 60, ppc_r1);
301 ppc_stw (buf, ppc_r16, STACK - 64, ppc_r1);
302 ppc_stw (buf, ppc_r17, STACK - 68, ppc_r1);
303 ppc_stw (buf, ppc_r18, STACK - 72, ppc_r1);
304 ppc_stw (buf, ppc_r19, STACK - 76, ppc_r1);
305 ppc_stw (buf, ppc_r20, STACK - 80, ppc_r1);
306 ppc_stw (buf, ppc_r21, STACK - 84, ppc_r1);
307 ppc_stw (buf, ppc_r22, STACK - 88, ppc_r1);
308 ppc_stw (buf, ppc_r23, STACK - 92, ppc_r1);
309 ppc_stw (buf, ppc_r24, STACK - 96, ppc_r1);
310 ppc_stw (buf, ppc_r25, STACK - 100, ppc_r1);
311 ppc_stw (buf, ppc_r26, STACK - 104, ppc_r1);
312 ppc_stw (buf, ppc_r27, STACK - 108, ppc_r1);
313 ppc_stw (buf, ppc_r28, STACK - 112, ppc_r1);
314 ppc_stw (buf, ppc_r29, STACK - 116, ppc_r1);
315 ppc_stw (buf, ppc_r30, STACK - 120, ppc_r1);
316 /* Save 'method' pseudo-parameter - the one passed in r11 */
317 ppc_stw (buf, ppc_r11, STACK - 124, ppc_r1);
319 /* Save the FP registers */
320 offset = 124 + 4 + 8;
321 for (i = ppc_f1; i <= ppc_f8; ++i) {
322 ppc_stfd (buf, i, STACK - offset, ppc_r1);
326 /*----------------------------------------------------------
327 STEP 1: call 'mono_get_lmf_addr()' to get the address of our
328 LMF. We'll need to restore it after the call to
329 'ppc_magic_trampoline' and before the call to the native
331 ----------------------------------------------------------*/
333 /* Calculate the address and make the call. Keep in mind that
334 we're using r0, so we'll have to restore it before calling
335 'ppc_magic_trampoline' */
336 ppc_lis (buf, ppc_r0, (guint32) mono_get_lmf_addr >> 16);
337 ppc_ori (buf, ppc_r0, ppc_r0, (guint32) mono_get_lmf_addr & 0xffff);
338 ppc_mtlr (buf, ppc_r0);
341 /* XXX Update LMF !!! */
343 /*----------------------------------------------------------
344 STEP 2: call 'ppc_magic_trampoline()', who will compile the
345 code and fix the method vtable entry for us
346 ----------------------------------------------------------*/
350 /* Arg 1: MonoMethod *method. It was put in r11 by the
351 method-specific trampoline code, and then saved before the call
352 to mono_get_lmf_addr()'. Restore r11, by the way :-) */
353 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
354 ppc_li (buf, ppc_r3, 0);
356 ppc_lwz (buf, ppc_r3, STACK - 124, ppc_r1);
357 ppc_lwz (buf, ppc_r11, STACK - 44, ppc_r1);
359 /* Arg 2: code (next address to the instruction that called us) */
360 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
361 ppc_li (buf, ppc_r4, 0);
363 ppc_lwz (buf, ppc_r4, STACK + 4, ppc_r1);
365 /* Arg 3: stack pointer */
366 ppc_mr (buf, ppc_r5, ppc_r1);
368 /* Calculate call address, restore r0 and call
369 'ppc_magic_trampoline'. Return value will be in r3 */
370 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
371 ppc_lis (buf, ppc_r0, (guint32) ppc_class_init_trampoline >> 16);
372 ppc_ori (buf, ppc_r0, ppc_r0, (guint32) ppc_class_init_trampoline & 0xffff);
374 ppc_lis (buf, ppc_r0, (guint32) ppc_magic_trampoline >> 16);
375 ppc_ori (buf, ppc_r0, ppc_r0, (guint32) ppc_magic_trampoline & 0xffff);
377 ppc_mtlr (buf, ppc_r0);
378 ppc_lwz (buf, ppc_r0, STACK - 8, ppc_r1);
381 /* OK, code address is now on r3. Move it to r0, so that we
382 can restore r3 and use it from r0 later */
383 ppc_mr (buf, ppc_r0, ppc_r3);
386 /*----------------------------------------------------------
387 STEP 3: Restore the LMF
388 ----------------------------------------------------------*/
392 /*----------------------------------------------------------
393 STEP 4: call the compiled method
394 ----------------------------------------------------------*/
396 /* Restore registers */
398 ppc_lwz (buf, ppc_r3, STACK - 12, ppc_r1);
399 ppc_lwz (buf, ppc_r4, STACK - 16, ppc_r1);
400 ppc_lwz (buf, ppc_r5, STACK - 20, ppc_r1);
401 ppc_lwz (buf, ppc_r6, STACK - 24, ppc_r1);
402 ppc_lwz (buf, ppc_r7, STACK - 28, ppc_r1);
403 ppc_lwz (buf, ppc_r8, STACK - 32, ppc_r1);
404 ppc_lwz (buf, ppc_r9, STACK - 36, ppc_r1);
405 ppc_lwz (buf, ppc_r10, STACK - 40, ppc_r1);
407 /* Restore the FP registers */
408 offset = 124 + 4 + 8;
409 for (i = ppc_f1; i <= ppc_f8; ++i) {
410 ppc_lfd (buf, i, STACK - offset, ppc_r1);
413 /* We haven't touched any of these, so there's no need to
416 ppc_lwz (buf, ppc_r14, STACK - 56, ppc_r1);
417 ppc_lwz (buf, ppc_r15, STACK - 60, ppc_r1);
418 ppc_lwz (buf, ppc_r16, STACK - 64, ppc_r1);
419 ppc_lwz (buf, ppc_r17, STACK - 68, ppc_r1);
420 ppc_lwz (buf, ppc_r18, STACK - 72, ppc_r1);
421 ppc_lwz (buf, ppc_r19, STACK - 76, ppc_r1);
422 ppc_lwz (buf, ppc_r20, STACK - 80, ppc_r1);
423 ppc_lwz (buf, ppc_r21, STACK - 84, ppc_r1);
424 ppc_lwz (buf, ppc_r22, STACK - 88, ppc_r1);
425 ppc_lwz (buf, ppc_r23, STACK - 92, ppc_r1);
426 ppc_lwz (buf, ppc_r24, STACK - 96, ppc_r1);
427 ppc_lwz (buf, ppc_r25, STACK - 100, ppc_r1);
428 ppc_lwz (buf, ppc_r26, STACK - 104, ppc_r1);
429 ppc_lwz (buf, ppc_r27, STACK - 108, ppc_r1);
430 ppc_lwz (buf, ppc_r28, STACK - 112, ppc_r1);
431 ppc_lwz (buf, ppc_r29, STACK - 116, ppc_r1);
432 ppc_lwz (buf, ppc_r30, STACK - 120, ppc_r1);
435 /* Non-standard function epilogue. Instead of doing a proper
436 return, we just call the compiled code, so
437 that, when it finishes, the method returns here. */
440 /* Restore stack pointer, r31, LR and jump to the code */
441 ppc_lwz (buf, ppc_r1, 0, ppc_r1);
442 ppc_lwz (buf, ppc_r31, -4, ppc_r1);
443 ppc_lwz (buf, ppc_r11, 4, ppc_r1);
444 ppc_mtlr (buf, ppc_r11);
445 ppc_mtctr (buf, ppc_r0);
446 ppc_bcctr (buf, 20, 0);
448 ppc_mtlr (buf, ppc_r0);
451 /* Restore stack pointer, r31, LR and return to caller */
452 ppc_lwz (buf, ppc_r11, 0, ppc_r1);
453 ppc_lwz (buf, ppc_r31, -4, ppc_r11);
454 ppc_mr (buf, ppc_r1, ppc_r11);
455 ppc_lwz (buf, ppc_r0, 4, ppc_r1);
456 ppc_mtlr (buf, ppc_r0);
460 /* Flush instruction cache, since we've generated code */
461 mono_arch_flush_icache (code, buf - code);
464 g_assert ((buf - code) <= 512);
467 switch (tramp_type) {
468 case MONO_TRAMPOLINE_GENERIC:
469 mono_generic_trampoline_code = code;
471 case MONO_TRAMPOLINE_JUMP:
472 generic_jump_trampoline = code;
474 case MONO_TRAMPOLINE_CLASS_INIT:
475 generic_class_init_trampoline = code;
483 * arch_create_jit_trampoline:
484 * @method: pointer to the method info
486 * Creates a trampoline function for virtual methods. If the created
487 * code is called it first starts JIT compilation of method,
488 * and then calls the newly created method. It also replaces the
489 * corresponding vtable entry (see ppc_magic_trampoline).
491 * A trampoline consists of two parts: a main fragment, shared by all method
492 * trampolines, and some code specific to each method, which hard-codes a
493 * reference to that method and then calls the main fragment.
495 * The main fragment contains a call to 'ppc_magic_trampoline', which performs
496 * call to the JIT compiler and substitutes the method-specific fragment with
497 * some code that directly calls the JIT-compiled method.
499 * Returns: a pointer to the newly created code
502 mono_arch_create_jit_trampoline (MonoMethod *method)
505 static guint8 *vc = NULL;
507 /* previously created trampoline code */
511 if (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)
512 return mono_arch_create_jit_trampoline (mono_marshal_get_synchronized_wrapper (method));
514 vc = create_trampoline_code (MONO_TRAMPOLINE_GENERIC);
516 /* This is the method-specific part of the trampoline. Its purpose is
517 to provide the generic part with the MonoMethod *method pointer. We'll
518 use r11 to keep that value, for instance. However, the generic part of
519 the trampoline relies on r11 having the same value it had before coming
520 here, so we must save it before. */
521 code = buf = g_malloc(METHOD_TRAMPOLINE_SIZE);
523 /* Save r11. There's nothing magic in the '44', its just an arbitrary
524 position - see above */
525 ppc_stw (buf, ppc_r11, -44, ppc_r1);
527 /* Now save LR - we'll overwrite it now */
528 ppc_mflr (buf, ppc_r11);
529 ppc_stw (buf, ppc_r11, 4, ppc_r1);
530 ppc_stw (buf, ppc_r11, 8, ppc_r1);
532 /* Prepare the jump to the generic trampoline code.*/
533 ppc_lis (buf, ppc_r11, (guint32) vc >> 16);
534 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) vc & 0xffff);
535 ppc_mtlr (buf, ppc_r11);
537 /* And finally put 'method' in r11 and fly! */
538 ppc_lis (buf, ppc_r11, (guint32) method >> 16);
539 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) method & 0xffff);
542 /* Flush instruction cache, since we've generated code */
543 mono_arch_flush_icache (code, buf - code);
546 g_assert ((buf - code) <= METHOD_TRAMPOLINE_SIZE);
548 /* Store trampoline address */
551 mono_jit_stats.method_trampolines++;
559 * x86_magic_trampoline:
560 * @eax: saved x86 register
561 * @ecx: saved x86 register
562 * @edx: saved x86 register
563 * @esi: saved x86 register
564 * @edi: saved x86 register
565 * @ebx: saved x86 register
566 * @code: pointer into caller code
567 * @method: the method to translate
569 * This method is called by the trampoline functions for virtual
570 * methods. It inspects the caller code to find the address of the
571 * vtable slot, then calls the JIT compiler and writes the address
572 * of the compiled method back to the vtable. All virtual methods
573 * are called with: x86_call_membase (inst, basereg, disp). We always
574 * use 32 bit displacement to ensure that the length of the call
575 * instruction is 6 bytes. We need to get the value of the basereg
576 * and the constant displacement.
579 x86_magic_trampoline (int eax, int ecx, int edx, int esi, int edi,
580 int ebx, guint8 *code, MonoMethod *m)
587 addr = mono_compile_method (m);
590 /* go to the start of the call instruction
592 * address_byte = (m << 6) | (o << 3) | reg
593 * call opcode: 0xff address_byte displacement
598 if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
599 reg = code [4] & 0x07;
600 disp = (signed char)code [5];
602 if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
603 reg = code [1] & 0x07;
604 disp = *((gint32*)(code + 2));
605 } else if ((code [1] == 0xe8)) {
606 *((guint32*)(code + 2)) = (guint)addr - ((guint)code + 1) - 5;
608 } else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
610 * This is a interface call: should check the above code can't catch it earlier
611 * 8b 40 30 mov 0x30(%eax),%eax
615 reg = code [5] & 0x07;
617 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
618 code [4], code [5], code [6]);
619 g_assert_not_reached ();
643 g_assert_not_reached ();
648 if (m->klass->valuetype) {
649 return *((gpointer *)o) = get_unbox_trampoline (m, addr);
651 return *((gpointer *)o) = addr;
656 * mono_arch_create_jit_trampoline:
657 * @method: pointer to the method info
659 * Creates a trampoline function for virtual methods. If the created
660 * code is called it first starts JIT compilation of method,
661 * and then calls the newly created method. I also replaces the
662 * corresponding vtable entry (see x86_magic_trampoline).
664 * Returns: a pointer to the newly created code
667 mono_arch_create_jit_trampoline (MonoMethod *method)
671 /* previously created trampoline code */
675 if (!mono_generic_trampoline_code) {
676 mono_generic_trampoline_code = buf = g_malloc (256);
677 /* save caller save regs because we need to do a call */
678 x86_push_reg (buf, X86_EDX);
679 x86_push_reg (buf, X86_EAX);
680 x86_push_reg (buf, X86_ECX);
684 /* save the IP (caller ip) */
685 x86_push_membase (buf, X86_ESP, 16);
687 x86_push_reg (buf, X86_EBX);
688 x86_push_reg (buf, X86_EDI);
689 x86_push_reg (buf, X86_ESI);
690 x86_push_reg (buf, X86_EBP);
692 /* save method info */
693 x86_push_membase (buf, X86_ESP, 32);
694 /* get the address of lmf for the current thread */
695 x86_call_code (buf, mono_get_lmf_addr);
697 x86_push_reg (buf, X86_EAX);
698 /* push *lfm (previous_lmf) */
699 x86_push_membase (buf, X86_EAX, 0);
701 x86_mov_membase_reg (buf, X86_EAX, 0, X86_ESP, 4);
704 /* push the method info */
705 x86_push_membase (buf, X86_ESP, 44);
706 /* push the return address onto the stack */
707 x86_push_membase (buf, X86_ESP, 52);
709 /* save all register values */
710 x86_push_reg (buf, X86_EBX);
711 x86_push_reg (buf, X86_EDI);
712 x86_push_reg (buf, X86_ESI);
713 x86_push_membase (buf, X86_ESP, 64); /* EDX */
714 x86_push_membase (buf, X86_ESP, 64); /* ECX */
715 x86_push_membase (buf, X86_ESP, 64); /* EAX */
717 x86_call_code (buf, x86_magic_trampoline);
718 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 8*4);
720 /* restore LMF start */
721 /* ebx = previous_lmf */
722 x86_pop_reg (buf, X86_EBX);
724 x86_pop_reg (buf, X86_EDI);
725 /* *(lmf) = previous_lmf */
726 x86_mov_membase_reg (buf, X86_EDI, 0, X86_EBX, 4);
727 /* discard method info */
728 x86_pop_reg (buf, X86_ESI);
729 /* restore caller saved regs */
730 x86_pop_reg (buf, X86_EBP);
731 x86_pop_reg (buf, X86_ESI);
732 x86_pop_reg (buf, X86_EDI);
733 x86_pop_reg (buf, X86_EBX);
734 /* discard save IP */
735 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 4);
736 /* restore LMF end */
738 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 16);
740 /* call the compiled method */
741 x86_jump_reg (buf, X86_EAX);
743 g_assert ((buf - mono_generic_trampoline_code) <= 256);
746 code = buf = g_malloc (16);
747 x86_push_imm (buf, method);
748 x86_jump_code (buf, mono_generic_trampoline_code);
749 g_assert ((buf - code) <= 16);
751 /* store trampoline address */
754 //mono_jit_stats.method_trampolines++;
762 * mono_arch_create_class_init_trampoline:
763 * @vtable: the type to initialize
765 * Creates a trampoline function to run a type initializer.
766 * If the trampoline is called, it calls mono_runtime_class_init with the
767 * given vtable, then patches the caller code so it does not get called any
770 * Returns: a pointer to the newly created code
773 mono_arch_create_class_init_trampoline (MonoVTable *vtable)
775 guint8 *code, *buf, *tramp;
777 tramp = create_trampoline_code (MONO_TRAMPOLINE_CLASS_INIT);
779 /* This is the method-specific part of the trampoline. Its purpose is
780 to provide the generic part with the MonoMethod *method pointer. We'll
781 use r11 to keep that value, for instance. However, the generic part of
782 the trampoline relies on r11 having the same value it had before coming
783 here, so we must save it before. */
784 code = buf = g_malloc(METHOD_TRAMPOLINE_SIZE);
787 ppc_mflr (buf, ppc_r4);
788 ppc_stw (buf, ppc_r4, PPC_RET_ADDR_OFFSET, ppc_sp);
789 ppc_stwu (buf, ppc_sp, -32, ppc_sp);
790 ppc_load (buf, ppc_r3, vtable);
791 ppc_load (buf, ppc_r5, 0);
793 ppc_load (buf, ppc_r0, ppc_class_init_trampoline);
794 ppc_mtlr (buf, ppc_r0);
797 ppc_lwz (buf, ppc_r0, 32 + PPC_RET_ADDR_OFFSET, ppc_sp);
798 ppc_mtlr (buf, ppc_r0);
799 ppc_addic (buf, ppc_sp, ppc_sp, 32);
802 /* Save r11. There's nothing magic in the '44', its just an arbitrary
803 position - see above */
804 ppc_stw (buf, ppc_r11, -44, ppc_r1);
806 /* Now save LR - we'll overwrite it now */
807 ppc_mflr (buf, ppc_r11);
808 ppc_stw (buf, ppc_r11, 4, ppc_r1);
809 ppc_stw (buf, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_r1);
811 /* Prepare the jump to the generic trampoline code.*/
812 ppc_lis (buf, ppc_r11, (guint32) tramp >> 16);
813 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) tramp & 0xffff);
814 ppc_mtlr (buf, ppc_r11);
816 /* And finally put 'vtable' in r11 and fly! */
817 ppc_lis (buf, ppc_r11, (guint32) vtable >> 16);
818 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) vtable & 0xffff);
820 ppc_lwz (buf, ppc_r0, PPC_RET_ADDR_OFFSET, ppc_r1);
821 ppc_mtlr (buf, ppc_r0);
826 /* Flush instruction cache, since we've generated code */
827 mono_arch_flush_icache (code, buf - code);
830 g_assert ((buf - code) <= METHOD_TRAMPOLINE_SIZE);
832 mono_jit_stats.method_trampolines++;
838 * This method is only called when running in the Mono Debugger.
841 mono_debugger_create_notification_function (gpointer *notification_address)
845 ptr = buf = g_malloc0 (16);
847 if (notification_address)
848 *notification_address = buf;