2 * trampoline.c: JIT trampoline code
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Paolo Molaro (lupus@ximian.com)
7 * Carlos Valiente <yo@virutass.net>
9 * (C) 2001 Ximian, Inc.
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/arch/ppc/ppc-codegen.h>
18 #include <mono/metadata/mono-debug-debugger.h>
23 /* adapt to mini later... */
24 #define mono_jit_share_code (1)
27 * Address of the x86 trampoline code. This is used by the debugger to check
28 * whether a method is a trampoline.
30 guint8 *mono_generic_trampoline_code = NULL;
33 * Address of a special breakpoint trampoline code for the debugger.
35 guint8 *mono_breakpoint_trampoline_code = NULL;
38 * get_unbox_trampoline:
40 * @addr: pointer to native code for @m
42 * when value type methods are called through the vtable we need to unbox the
43 * this argument. This method returns a pointer to a trampoline which does
44 * unboxing before calling the method
47 get_unbox_trampoline (MonoMethod *m, gpointer addr)
52 if (!m->signature->ret->byref && MONO_TYPE_ISSTRUCT (m->signature->ret))
55 start = code = g_malloc (20);
57 ppc_load (code, ppc_r11, addr);
58 ppc_mtctr (code, ppc_r11);
59 ppc_addi (code, this_pos, this_pos, sizeof (MonoObject));
60 ppc_bcctr (code, 20, 0);
61 g_assert ((code - start) <= 20);
67 * get_breakpoint_trampoline:
69 * @addr: pointer to native code for @m
71 * creates a special trampoline for the debugger which is used to get
72 * a breakpoint after compiling a method.
75 get_breakpoint_trampoline (MonoMethod *m, guint32 breakpoint_id, gpointer addr)
77 guint8 *code, *start, *buf;
79 if (!mono_breakpoint_trampoline_code) {
80 mono_breakpoint_trampoline_code = buf = g_malloc (8);
83 /* x86_breakpoint (buf);
84 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 8);
87 g_assert ((buf - mono_breakpoint_trampoline_code) <= 8);
90 start = code = g_malloc (22);
92 /* x86_push_imm (code, addr);
93 x86_push_imm (code, breakpoint_id);
94 x86_push_imm (code, m);
95 x86_jump_code (code, mono_breakpoint_trampoline_code);*/
96 g_assert ((code - start) <= 22);
101 /* Stack size for trampoline function */
104 /* Method-specific trampoline code framgment size */
105 #define METHOD_TRAMPOLINE_SIZE 64
108 * ppc_magic_trampoline:
109 * @code: pointer into caller code
110 * @method: the method to translate
113 * This method is called by the function 'arch_create_jit_trampoline', which in
114 * turn is called by the trampoline functions for virtual methods.
115 * After having called the JIT compiler to compile the method, it inspects the
116 * caller code to find the address of the method-specific part of the
117 * trampoline vtable slot for this method, updates it with a fragment that calls
118 * the newly compiled code and returns this address of the compiled code to
119 * 'arch_create_jit_trampoline'
122 ppc_magic_trampoline (MonoMethod *method, guint32 *code, char *sp)
128 EnterCriticalSection(metadata_section);
129 addr = mono_compile_method(method);
130 LeaveCriticalSection(metadata_section);
133 /* Locate the address of the method-specific trampoline. The call using
134 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
135 looks something like this:
137 mtlr rA ; Move rA (a register containing the
138 ; target address) to LR
139 blrl ; Call function at LR
141 PowerPC instructions are 32-bit long, which means that a 32-bit target
142 address cannot be encoded as an immediate value (because we already
143 have spent some bits to encode the branch instruction!). That's why a
144 'b'ranch to the contents of the 'l'ink 'r'egister (with 'l'ink register
145 update) is needed, instead of a simpler 'branch immediate'. This
146 complicates our purpose here, because 'blrl' overwrites LR, which holds
147 the value we're interested in.
149 Therefore, we need to locate the 'mtlr rA' instruction to know which
150 register LR was loaded from, and then retrieve the value from that
153 /* This is the 'blrl' instruction */
157 * Note that methods are called also with the bl opcode.
159 if (((*code) >> 26) == 18) {
160 ppc_patch (code, addr);
161 mono_arch_flush_icache (code, 4);
165 /* Sanity check: instruction must be 'blrl' */
166 g_assert(*code == 0x4e800021);
168 /* OK, we're now at the 'blrl' instruction. Now walk backwards
169 till we get to a 'mtlr rA' */
171 if((*code & 0x7c0803a6) == 0x7c0803a6) {
172 /* Here we are: we reached the 'mtlr rA'.
173 Extract the register from the instruction */
174 reg = (*code & 0x03e00000) >> 21;
176 case 0 : o = *((int *) (sp + STACK - 8)); break;
177 case 11: o = *((int *) (sp + STACK - 24)); break;
178 case 12: o = *((int *) (sp + STACK - 28)); break;
179 case 13: o = *((int *) (sp + STACK - 32)); break;
180 case 14: o = *((int *) (sp + STACK - 36)); break;
181 case 15: o = *((int *) (sp + STACK - 40)); break;
182 case 16: o = *((int *) (sp + STACK - 44)); break;
183 case 17: o = *((int *) (sp + STACK - 48)); break;
184 case 18: o = *((int *) (sp + STACK - 52)); break;
185 case 19: o = *((int *) (sp + STACK - 56)); break;
186 case 20: o = *((int *) (sp + STACK - 60)); break;
187 case 21: o = *((int *) (sp + STACK - 64)); break;
188 case 22: o = *((int *) (sp + STACK - 68)); break;
189 case 23: o = *((int *) (sp + STACK - 72)); break;
190 case 24: o = *((int *) (sp + STACK - 76)); break;
191 case 25: o = *((int *) (sp + STACK - 80)); break;
192 case 26: o = *((int *) (sp + STACK - 84)); break;
193 case 27: o = *((int *) (sp + STACK - 88)); break;
194 case 28: o = *((int *) (sp + STACK - 92)); break;
195 case 29: o = *((int *) (sp + STACK - 96)); break;
196 case 30: o = *((int *) (sp + STACK - 100)); break;
197 case 31: o = *((int *) (sp + STACK - 4)); break;
199 printf("%s: Unexpected register %d\n",
201 g_assert_not_reached();
207 /* this is not done for non-virtual calls, because in that case
208 we won't have an object, but the actual pointer to the
209 valuetype as the this argument
211 if (method->klass->valuetype)
212 addr = get_unbox_trampoline (method, addr);
214 /* Finally, replace the method-specific trampoline code (which called
215 the generic trampoline code) with a fragment that calls directly the
219 ppc_stwu (o, ppc_r1, -16, ppc_r1);
220 ppc_mflr (o, ppc_r0);
221 ppc_stw (o, ppc_r31, 12, ppc_r1);
222 ppc_stw (o, ppc_r0, 20, ppc_r1);
223 ppc_mr (o, ppc_r31, ppc_r1);
225 ppc_lis (o, ppc_r0, (guint32) addr >> 16);
226 ppc_ori (o, ppc_r0, ppc_r0, (guint32) addr & 0xffff);
227 ppc_mtlr (o, ppc_r0);
230 ppc_lwz (o, ppc_r11, 0, ppc_r1);
231 ppc_lwz (o, ppc_r0, 4, ppc_r11);
232 ppc_mtlr (o, ppc_r0);
233 ppc_lwz (o, ppc_r31, -4, ppc_r11);
234 ppc_mr (o, ppc_r1, ppc_r11);
237 mono_arch_flush_icache (start, o - start);
238 g_assert(o - start < METHOD_TRAMPOLINE_SIZE);
244 * arch_create_jit_trampoline:
245 * @method: pointer to the method info
247 * Creates a trampoline function for virtual methods. If the created
248 * code is called it first starts JIT compilation of method,
249 * and then calls the newly created method. It also replaces the
250 * corresponding vtable entry (see ppc_magic_trampoline).
252 * A trampoline consists of two parts: a main fragment, shared by all method
253 * trampolines, and some code specific to each method, which hard-codes a
254 * reference to that method and then calls the main fragment.
256 * The main fragment contains a call to 'ppc_magic_trampoline', which performs
257 * call to the JIT compiler and substitutes the method-specific fragment with
258 * some code that directly calls the JIT-compiled method.
260 * Returns: a pointer to the newly created code
263 mono_arch_create_jit_trampoline (MonoMethod *method)
266 static guint8 *vc = NULL;
268 /* previously created trampoline code */
272 /* we immediately compile runtime provided functions */
273 if (method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) {
274 method->info = mono_compile_method (method);
278 /* icalls use method->addr */
279 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
280 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
283 nm = mono_marshal_get_native_wrapper (method);
284 method->info = mono_compile_method (nm);
288 if (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)
289 return mono_arch_create_jit_trampoline (mono_marshal_get_synchronized_wrapper (method));
292 /* Now we'll create in 'buf' the PowerPC trampoline code. This
293 is the trampoline code common to all methods */
295 vc = buf = g_malloc(512);
297 /*-----------------------------------------------------------
298 STEP 0: First create a non-standard function prologue with a
299 stack size big enough to save our registers:
301 lr (We'll be calling functions here, so we
303 r0 (See ppc_magic_trampoline)
304 r1 (sp) (Stack pointer - must save)
305 r3-r10 Function arguments.
306 r11-r31 (See ppc_magic_trampoline)
307 method in r11 (See ppc_magic_trampoline)
309 This prologue is non-standard because r0 is not saved here - it
310 was saved in the method-specific trampoline code
311 -----------------------------------------------------------*/
313 ppc_stwu (buf, ppc_r1, -STACK, ppc_r1);
315 /* Save r0 before modifying it - we will need its contents in
316 'ppc_magic_trampoline' */
317 ppc_stw (buf, ppc_r0, STACK - 8, ppc_r1);
319 ppc_stw (buf, ppc_r31, STACK - 4, ppc_r1);
320 ppc_mr (buf, ppc_r31, ppc_r1);
322 /* Now save our registers. */
323 ppc_stw (buf, ppc_r3, STACK - 12, ppc_r1);
324 ppc_stw (buf, ppc_r4, STACK - 16, ppc_r1);
325 ppc_stw (buf, ppc_r5, STACK - 20, ppc_r1);
326 ppc_stw (buf, ppc_r6, STACK - 24, ppc_r1);
327 ppc_stw (buf, ppc_r7, STACK - 28, ppc_r1);
328 ppc_stw (buf, ppc_r8, STACK - 32, ppc_r1);
329 ppc_stw (buf, ppc_r9, STACK - 36, ppc_r1);
330 ppc_stw (buf, ppc_r10, STACK - 40, ppc_r1);
331 /* STACK - 44 contains r11, which is set in the method-specific
332 part of the trampoline (see bellow this 'if' block) */
333 ppc_stw (buf, ppc_r12, STACK - 48, ppc_r1);
334 ppc_stw (buf, ppc_r13, STACK - 52, ppc_r1);
335 ppc_stw (buf, ppc_r14, STACK - 56, ppc_r1);
336 ppc_stw (buf, ppc_r15, STACK - 60, ppc_r1);
337 ppc_stw (buf, ppc_r16, STACK - 64, ppc_r1);
338 ppc_stw (buf, ppc_r17, STACK - 68, ppc_r1);
339 ppc_stw (buf, ppc_r18, STACK - 72, ppc_r1);
340 ppc_stw (buf, ppc_r19, STACK - 76, ppc_r1);
341 ppc_stw (buf, ppc_r20, STACK - 80, ppc_r1);
342 ppc_stw (buf, ppc_r21, STACK - 84, ppc_r1);
343 ppc_stw (buf, ppc_r22, STACK - 88, ppc_r1);
344 ppc_stw (buf, ppc_r23, STACK - 92, ppc_r1);
345 ppc_stw (buf, ppc_r24, STACK - 96, ppc_r1);
346 ppc_stw (buf, ppc_r25, STACK - 100, ppc_r1);
347 ppc_stw (buf, ppc_r26, STACK - 104, ppc_r1);
348 ppc_stw (buf, ppc_r27, STACK - 108, ppc_r1);
349 ppc_stw (buf, ppc_r28, STACK - 112, ppc_r1);
350 ppc_stw (buf, ppc_r29, STACK - 116, ppc_r1);
351 ppc_stw (buf, ppc_r30, STACK - 120, ppc_r1);
352 /* Save 'method' pseudo-parameter - the one passed in r11 */
353 ppc_stw (buf, ppc_r11, STACK - 124, ppc_r1);
355 /*----------------------------------------------------------
356 STEP 1: call 'mono_get_lmf_addr()' to get the address of our
357 LMF. We'll need to restore it after the call to
358 'ppc_magic_trampoline' and before the call to the native
360 ----------------------------------------------------------*/
362 /* Calculate the address and make the call. Keep in mind that
363 we're using r0, so we'll have to restore it before calling
364 'ppc_magic_trampoline' */
365 ppc_lis (buf, ppc_r0, (guint32) mono_get_lmf_addr >> 16);
366 ppc_ori (buf, ppc_r0, ppc_r0, (guint32) mono_get_lmf_addr & 0xffff);
367 ppc_mtlr (buf, ppc_r0);
370 /* XXX Update LMF !!! */
372 /*----------------------------------------------------------
373 STEP 2: call 'ppc_magic_trampoline()', who will compile the
374 code and fix the method vtable entry for us
375 ----------------------------------------------------------*/
379 /* Arg 1: MonoMethod *method. It was put in r11 by the
380 method-specific trampoline code, and then saved before the call
381 to mono_get_lmf_addr()'. Restore r11, by the way :-) */
382 ppc_lwz (buf, ppc_r3, STACK - 124, ppc_r1);
383 ppc_lwz (buf, ppc_r11, STACK - 44, ppc_r1);
385 /* Arg 2: code (next address to the instruction that called us) */
386 ppc_lwz (buf, ppc_r4, STACK + 4, ppc_r1);
388 /* Arg 3: stack pointer */
389 ppc_mr (buf, ppc_r5, ppc_r1);
391 /* Calculate call address, restore r0 and call
392 'ppc_magic_trampoline'. Return value will be in r3 */
393 ppc_lis (buf, ppc_r0, (guint32) ppc_magic_trampoline >> 16);
394 ppc_ori (buf, ppc_r0, ppc_r0, (guint32) ppc_magic_trampoline & 0xffff);
395 ppc_mtlr (buf, ppc_r0);
396 ppc_lwz (buf, ppc_r0, STACK - 8, ppc_r1);
399 /* OK, code address is now on r3. Move it to r0, so that we
400 can restore r3 and use it from r0 later */
401 ppc_mr (buf, ppc_r0, ppc_r3);
404 /*----------------------------------------------------------
405 STEP 3: Restore the LMF
406 ----------------------------------------------------------*/
410 /*----------------------------------------------------------
411 STEP 4: call the compiled method
412 ----------------------------------------------------------*/
414 /* Restore registers */
416 ppc_lwz (buf, ppc_r3, STACK - 12, ppc_r1);
417 ppc_lwz (buf, ppc_r4, STACK - 16, ppc_r1);
418 ppc_lwz (buf, ppc_r5, STACK - 20, ppc_r1);
419 ppc_lwz (buf, ppc_r6, STACK - 24, ppc_r1);
420 ppc_lwz (buf, ppc_r7, STACK - 28, ppc_r1);
421 ppc_lwz (buf, ppc_r8, STACK - 32, ppc_r1);
422 ppc_lwz (buf, ppc_r9, STACK - 36, ppc_r1);
423 ppc_lwz (buf, ppc_r10, STACK - 40, ppc_r1);
425 /* We haven't touched any of these, so there's no need to
428 ppc_lwz (buf, ppc_r14, STACK - 56, ppc_r1);
429 ppc_lwz (buf, ppc_r15, STACK - 60, ppc_r1);
430 ppc_lwz (buf, ppc_r16, STACK - 64, ppc_r1);
431 ppc_lwz (buf, ppc_r17, STACK - 68, ppc_r1);
432 ppc_lwz (buf, ppc_r18, STACK - 72, ppc_r1);
433 ppc_lwz (buf, ppc_r19, STACK - 76, ppc_r1);
434 ppc_lwz (buf, ppc_r20, STACK - 80, ppc_r1);
435 ppc_lwz (buf, ppc_r21, STACK - 84, ppc_r1);
436 ppc_lwz (buf, ppc_r22, STACK - 88, ppc_r1);
437 ppc_lwz (buf, ppc_r23, STACK - 92, ppc_r1);
438 ppc_lwz (buf, ppc_r24, STACK - 96, ppc_r1);
439 ppc_lwz (buf, ppc_r25, STACK - 100, ppc_r1);
440 ppc_lwz (buf, ppc_r26, STACK - 104, ppc_r1);
441 ppc_lwz (buf, ppc_r27, STACK - 108, ppc_r1);
442 ppc_lwz (buf, ppc_r28, STACK - 112, ppc_r1);
443 ppc_lwz (buf, ppc_r29, STACK - 116, ppc_r1);
444 ppc_lwz (buf, ppc_r30, STACK - 120, ppc_r1);
447 /* Non-standard function epilogue. Instead of doing a proper
448 return, we just call the compiled code, so
449 that, when it finishes, the method returns here. */
451 ppc_mtlr (buf, ppc_r0);
454 /* Restore stack pointer, r31, LR and return to caler */
455 ppc_lwz (buf, ppc_r11, 0, ppc_r1);
456 ppc_lwz (buf, ppc_r31, -4, ppc_r11);
457 ppc_mr (buf, ppc_r1, ppc_r11);
458 ppc_lwz (buf, ppc_r0, 4, ppc_r1);
459 ppc_mtlr (buf, ppc_r0);
462 /* Flush instruction cache, sice we've generated code */
463 mono_arch_flush_icache (vc, buf - vc);
466 g_assert ((buf - vc) <= 512);
469 /* This is the method-specific part of the trampoline. Its purpose is
470 to provide the generic part with the MonoMethod *method pointer. We'll
471 use r11 to keep that value, for instance. However, the generic part of
472 the trampoline relies on r11 having the same value it had before coming
473 here, so we must save it before. */
474 code = buf = g_malloc(METHOD_TRAMPOLINE_SIZE);
476 /* Save r11. There's nothing magic in the '44', its just an arbitrary
477 position - see above */
478 ppc_stw (buf, ppc_r11, -44, ppc_r1);
480 /* Now save LR - we'll overwrite it now */
481 ppc_mflr (buf, ppc_r11);
482 ppc_stw (buf, ppc_r11, 4, ppc_r1);
484 /* Prepare the jump to the generic trampoline code.*/
485 ppc_lis (buf, ppc_r11, (guint32) vc >> 16);
486 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) vc & 0xffff);
487 ppc_mtlr (buf, ppc_r11);
489 /* And finally put 'method' in r11 and fly! */
490 ppc_lis (buf, ppc_r11, (guint32) method >> 16);
491 ppc_ori (buf, ppc_r11, ppc_r11, (guint32) method & 0xffff);
494 /* Flush instruction cache, sice we've generated code */
495 mono_arch_flush_icache (code, buf - code);
498 g_assert ((buf - code) <= METHOD_TRAMPOLINE_SIZE);
500 /* store trampoline address */
503 mono_jit_stats.method_trampolines++;
511 * x86_magic_trampoline:
512 * @eax: saved x86 register
513 * @ecx: saved x86 register
514 * @edx: saved x86 register
515 * @esi: saved x86 register
516 * @edi: saved x86 register
517 * @ebx: saved x86 register
518 * @code: pointer into caller code
519 * @method: the method to translate
521 * This method is called by the trampoline functions for virtual
522 * methods. It inspects the caller code to find the address of the
523 * vtable slot, then calls the JIT compiler and writes the address
524 * of the compiled method back to the vtable. All virtual methods
525 * are called with: x86_call_membase (inst, basereg, disp). We always
526 * use 32 bit displacement to ensure that the length of the call
527 * instruction is 6 bytes. We need to get the value of the basereg
528 * and the constant displacement.
531 x86_magic_trampoline (int eax, int ecx, int edx, int esi, int edi,
532 int ebx, guint8 *code, MonoMethod *m)
537 guint32 breakpoint_id;
538 gpointer addr, trampoline;
540 EnterCriticalSection (metadata_section);
541 addr = mono_compile_method (m);
542 LeaveCriticalSection (metadata_section);
545 /* go to the start of the call instruction
547 * address_byte = (m << 6) | (o << 3) | reg
548 * call opcode: 0xff address_byte displacement
553 if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
554 reg = code [4] & 0x07;
555 disp = (signed char)code [5];
557 if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
558 reg = code [1] & 0x07;
559 disp = *((gint32*)(code + 2));
560 } else if ((code [1] == 0xe8)) {
561 breakpoint_id = mono_debugger_method_has_breakpoint (m, TRUE);
563 mono_remove_breakpoint (breakpoint_id);
564 trampoline = get_breakpoint_trampoline (m, breakpoint_id, addr);
567 *((guint32*)(code + 2)) = (guint)addr - ((guint)code + 1) - 5;
569 } else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
571 * This is a interface call: should check the above code can't catch it earlier
572 * 8b 40 30 mov 0x30(%eax),%eax
576 reg = code [5] & 0x07;
578 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
579 code [4], code [5], code [6]);
580 g_assert_not_reached ();
604 g_assert_not_reached ();
609 if (m->klass->valuetype) {
610 trampoline = *((gpointer *)o) = get_unbox_trampoline (m, addr);
612 trampoline = *((gpointer *)o) = addr;
615 breakpoint_id = mono_debugger_method_has_breakpoint (m, TRUE);
617 mono_debugger_remove_breakpoint (breakpoint_id);
618 return get_breakpoint_trampoline (m, breakpoint_id, trampoline);
625 * mono_arch_create_jit_trampoline:
626 * @method: pointer to the method info
628 * Creates a trampoline function for virtual methods. If the created
629 * code is called it first starts JIT compilation of method,
630 * and then calls the newly created method. I also replaces the
631 * corresponding vtable entry (see x86_magic_trampoline).
633 * Returns: a pointer to the newly created code
636 mono_arch_create_jit_trampoline (MonoMethod *method)
640 /* previously created trampoline code */
644 /* we immediately compile runtime provided functions */
645 if (method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) {
646 method->info = mono_compile_method (method);
650 /* icalls use method->addr */
651 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
652 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
655 nm = mono_marshal_get_native_wrapper (method);
656 method->info = mono_compile_method (nm);
660 if (!mono_generic_trampoline_code) {
661 mono_generic_trampoline_code = buf = g_malloc (256);
662 /* save caller save regs because we need to do a call */
663 x86_push_reg (buf, X86_EDX);
664 x86_push_reg (buf, X86_EAX);
665 x86_push_reg (buf, X86_ECX);
669 /* save the IP (caller ip) */
670 x86_push_membase (buf, X86_ESP, 16);
672 x86_push_reg (buf, X86_EBX);
673 x86_push_reg (buf, X86_EDI);
674 x86_push_reg (buf, X86_ESI);
675 x86_push_reg (buf, X86_EBP);
677 /* save method info */
678 x86_push_membase (buf, X86_ESP, 32);
679 /* get the address of lmf for the current thread */
680 x86_call_code (buf, mono_get_lmf_addr);
682 x86_push_reg (buf, X86_EAX);
683 /* push *lfm (previous_lmf) */
684 x86_push_membase (buf, X86_EAX, 0);
686 x86_mov_membase_reg (buf, X86_EAX, 0, X86_ESP, 4);
689 /* push the method info */
690 x86_push_membase (buf, X86_ESP, 44);
691 /* push the return address onto the stack */
692 x86_push_membase (buf, X86_ESP, 52);
694 /* save all register values */
695 x86_push_reg (buf, X86_EBX);
696 x86_push_reg (buf, X86_EDI);
697 x86_push_reg (buf, X86_ESI);
698 x86_push_membase (buf, X86_ESP, 64); /* EDX */
699 x86_push_membase (buf, X86_ESP, 64); /* ECX */
700 x86_push_membase (buf, X86_ESP, 64); /* EAX */
702 x86_call_code (buf, x86_magic_trampoline);
703 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 8*4);
705 /* restore LMF start */
706 /* ebx = previous_lmf */
707 x86_pop_reg (buf, X86_EBX);
709 x86_pop_reg (buf, X86_EDI);
710 /* *(lmf) = previous_lmf */
711 x86_mov_membase_reg (buf, X86_EDI, 0, X86_EBX, 4);
712 /* discard method info */
713 x86_pop_reg (buf, X86_ESI);
714 /* restore caller saved regs */
715 x86_pop_reg (buf, X86_EBP);
716 x86_pop_reg (buf, X86_ESI);
717 x86_pop_reg (buf, X86_EDI);
718 x86_pop_reg (buf, X86_EBX);
719 /* discard save IP */
720 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 4);
721 /* restore LMF end */
723 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 16);
725 /* call the compiled method */
726 x86_jump_reg (buf, X86_EAX);
728 g_assert ((buf - mono_generic_trampoline_code) <= 256);
731 code = buf = g_malloc (16);
732 x86_push_imm (buf, method);
733 x86_jump_code (buf, mono_generic_trampoline_code);
734 g_assert ((buf - code) <= 16);
736 /* store trampoline address */
739 //mono_jit_stats.method_trampolines++;