* Merged in twisti-branch.
[cacao.git] / src / vm / jit / powerpc / asmpart.S
index a8322ce2f8769e535851bee37a926da4439373cf..6b4dd2d6d777910d417a7ba6275c4b4a00eeaf09 100644 (file)
@@ -1,6 +1,6 @@
 /* src/vm/jit/powerpc/asmpart.S - Java-C interface functions for PowerPC
                
-   Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+   Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
    J. Wenninger, Institut f. Computersprachen - TU Wien
    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
    02110-1301, USA.
 
-   Contact: cacao@cacaojvm.org
-
-   Authors: Andreas Krall
-            Reinhard Grafl
-            Stefan Ring
-
-   Changes: Christian Thalinger
-            Edwin Steiner
-
-   $Id: asmpart.S 5077 2006-07-04 19:06:56Z twisti $
+   $Id: asmpart.S 7454 2007-03-05 15:40:48Z tbfg $
 
 */
 
@@ -60,6 +51,7 @@
        .globl asm_vm_call_method_double
 
        .globl asm_vm_call_method_exception_handler
+       .globl asm_vm_call_method_end
 
        .globl asm_call_jit_compiler
 
 
        .globl asm_abstractmethoderror
 
-       .globl asm_wrapper_patcher
+       .globl asm_patcher_wrapper
 
+#if defined(ENABLE_REPLACEMENT)
        .globl asm_replacement_out
        .globl asm_replacement_in
+#endif
 
        .globl asm_cacheflush
+
+       .globl asm_compare_and_swap
+       .globl asm_memory_barrier
+
        .globl asm_criticalsections
        .globl asm_getclassvalues_atomic
 
        .long   0                         /* line number table size               */
        .long   0                         /* fltsave                              */
        .long   0                         /* intsave                              */
-       .long   0                         /* isleaf                               */
+       .long   0                         /* IsLeaf                               */
        .long   0                         /* IsSync                               */
        .long   0                         /* frame size                           */
        .long   0                         /* codeinfo pointer                     */
@@ -114,8 +112,8 @@ asm_vm_call_method_long:
 asm_vm_call_method_float:
 asm_vm_call_method_double:
        mflr    r0
-       stw     r0,LA_LR_OFFSET(r1)
-       stwu    r1,-40*4(r1)
+       stw     r0,LA_LR_OFFSET(sp)
+       stwu    sp,-40*4(sp)              /* keep stack 16-byte aligned           */
 
        stw     s0,8*4(sp)                /* save used callee saved registers     */
        stw     a0,9*4(sp)                /* save method pointer for compiler     */
@@ -130,20 +128,20 @@ asm_vm_call_method_double:
        stfd    ftmp2,16*4(sp)
 
 #if defined(__DARWIN__)
-       stw     t1,18*4(r1)
-       stw     t2,19*4(r1)
-       stw     t3,20*4(r1)
-       stw     t4,21*4(r1)
-       stw     t5,22*4(r1)
-       stw     t6,23*4(r1)
-       stw     t7,24*4(r1)
-
-       stfd    ft0,26*4(r1)
-       stfd    ft1,28*4(r1)
-       stfd    ft2,30*4(r1)
-       stfd    ft3,32*4(r1)
-       stfd    ft4,34*4(r1)
-       stfd    ft5,36*4(r1)
+       stw     t1,18*4(sp)
+       stw     t2,19*4(sp)
+       stw     t3,20*4(sp)
+       stw     t4,21*4(sp)
+       stw     t5,22*4(sp)
+       stw     t6,23*4(sp)
+       stw     t7,24*4(sp)
+
+       stfd    ft0,26*4(sp)
+       stfd    ft1,28*4(sp)
+       stfd    ft2,30*4(sp)
+       stfd    ft3,32*4(sp)
+       stfd    ft4,34*4(sp)
+       stfd    ft5,36*4(sp)
 #else
        SAVE_TEMPORARY_REGISTERS(18)      /* the offset has to be even            */
 #endif
@@ -412,31 +410,31 @@ L_asm_vm_call_method_return:
        lfd     ftmp2,16*4(sp)
 
 #if defined(__DARWIN__)
-       lwz     t1,18*4(r1)
-       lwz     t2,19*4(r1)
-       lwz     t3,20*4(r1)
-       lwz     t4,21*4(r1)
-       lwz     t5,22*4(r1)
-       lwz     t6,23*4(r1)
-       lwz     t7,24*4(r1)
-
-       lfd     ft0,26*4(r1)
-       lfd     ft1,28*4(r1)
-       lfd     ft2,30*4(r1)
-       lfd     ft3,32*4(r1)
-       lfd     ft4,34*4(r1)
-       lfd     ft5,36*4(r1)
+       lwz     t1,18*4(sp)
+       lwz     t2,19*4(sp)
+       lwz     t3,20*4(sp)
+       lwz     t4,21*4(sp)
+       lwz     t5,22*4(sp)
+       lwz     t6,23*4(sp)
+       lwz     t7,24*4(sp)
+
+       lfd     ft0,26*4(sp)
+       lfd     ft1,28*4(sp)
+       lfd     ft2,30*4(sp)
+       lfd     ft3,32*4(sp)
+       lfd     ft4,34*4(sp)
+       lfd     ft5,36*4(sp)
 #else
        RESTORE_TEMPORARY_REGISTERS(18)   /* the offset has to be even            */
 #endif
 
-       lwz     r0,40*4+LA_LR_OFFSET(r1)
+       lwz     r0,40*4+LA_LR_OFFSET(sp)
        mtlr    r0
-       addi    r1,r1,40*4
+       addi    sp,sp,40*4
        blr
 
 asm_vm_call_method_exception_handler:
-       mr      r3,itmp1
+       mr      a0,itmp1
        bl      builtin_throw_exception
        b       L_asm_vm_call_method_return
 
@@ -682,6 +680,8 @@ L_handle_fda12:
        b       L_register_copy
 #endif
 
+asm_vm_call_method_end:
+       nop
 
 /* asm_call_jit_compiler *******************************************************
 
@@ -692,74 +692,74 @@ L_handle_fda12:
 asm_call_jit_compiler:
 L_asm_call_jit_compiler:                /* required for PIC code              */
        mflr    r0
-       stw     r0,LA_LR_OFFSET(r1)         /* save return address                */
-       stwu    r1,-(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)(r1)
+       stw     r0,LA_LR_OFFSET(sp)         /* save return address                */
+       stwu    sp,-(LA_SIZE + 4*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)(sp)
 
 #if defined(__DARWIN__)
-       stw     a0,(LA_WORD_SIZE+5+0)*4(r1)
-       stw     a1,(LA_WORD_SIZE+5+1)*4(r1)
-       stw     a2,(LA_WORD_SIZE+5+2)*4(r1)
-       stw     a3,(LA_WORD_SIZE+5+3)*4(r1)
-       stw     a4,(LA_WORD_SIZE+5+4)*4(r1)
-       stw     a5,(LA_WORD_SIZE+5+5)*4(r1)
-       stw     a6,(LA_WORD_SIZE+5+6)*4(r1)
-       stw     a7,(LA_WORD_SIZE+5+7)*4(r1)
-
-       stfd    fa0,(LA_WORD_SIZE+5+8)*4(r1)
-       stfd    fa1,(LA_WORD_SIZE+5+10)*4(r1)
-       stfd    fa2,(LA_WORD_SIZE+5+12)*4(r1)
-       stfd    fa3,(LA_WORD_SIZE+5+14)*4(r1)
-       stfd    fa4,(LA_WORD_SIZE+5+16)*4(r1)
-       stfd    fa5,(LA_WORD_SIZE+5+18)*4(r1)
-       stfd    fa6,(LA_WORD_SIZE+5+20)*4(r1)
-       stfd    fa7,(LA_WORD_SIZE+5+22)*4(r1)
-       stfd    fa8,(LA_WORD_SIZE+5+24)*4(r1)
-       stfd    fa9,(LA_WORD_SIZE+5+26)*4(r1)
-       stfd    fa10,(LA_WORD_SIZE+5+28)*4(r1)
-       stfd    fa11,(LA_WORD_SIZE+5+30)*4(r1)
-       stfd    fa12,(LA_WORD_SIZE+5+32)*4(r1)
+       stw     a0,LA_SIZE+(4+0)*4(sp)
+       stw     a1,LA_SIZE+(4+1)*4(sp)
+       stw     a2,LA_SIZE+(4+2)*4(sp)
+       stw     a3,LA_SIZE+(4+3)*4(sp)
+       stw     a4,LA_SIZE+(4+4)*4(sp)
+       stw     a5,LA_SIZE+(4+5)*4(sp)
+       stw     a6,LA_SIZE+(4+6)*4(sp)
+       stw     a7,LA_SIZE+(4+7)*4(sp)
+
+       stfd    fa0,LA_SIZE+(4+8)*4(sp)
+       stfd    fa1,LA_SIZE+(4+10)*4(sp)
+       stfd    fa2,LA_SIZE+(4+12)*4(sp)
+       stfd    fa3,LA_SIZE+(4+14)*4(sp)
+       stfd    fa4,LA_SIZE+(4+16)*4(sp)
+       stfd    fa5,LA_SIZE+(4+18)*4(sp)
+       stfd    fa6,LA_SIZE+(4+20)*4(sp)
+       stfd    fa7,LA_SIZE+(4+22)*4(sp)
+       stfd    fa8,LA_SIZE+(4+24)*4(sp)
+       stfd    fa9,LA_SIZE+(4+26)*4(sp)
+       stfd    fa10,LA_SIZE+(4+28)*4(sp)
+       stfd    fa11,LA_SIZE+(4+30)*4(sp)
+       stfd    fa12,LA_SIZE+(4+32)*4(sp)
 #else
-       SAVE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1)
+       SAVE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS)
 #endif
 
        mr      a0,itmp1
        mr      a1,mptr
-       addi    a2,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)
-       lwz     a3,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)+LA_LR_OFFSET(sp)
+       addi    a2,sp,(LA_SIZE + 4*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)
+       lwz     a3,(LA_SIZE + 4*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)+LA_LR_OFFSET(sp)
        bl      jit_asm_compile
        mr      pv,v0                       /* move address to pv register        */
 
 #if defined(__DARWIN__)
-       lwz     a0,(LA_WORD_SIZE+5+0)*4(r1)
-       lwz     a1,(LA_WORD_SIZE+5+1)*4(r1)
-       lwz     a2,(LA_WORD_SIZE+5+2)*4(r1)
-       lwz     a3,(LA_WORD_SIZE+5+3)*4(r1)
-       lwz     a4,(LA_WORD_SIZE+5+4)*4(r1)
-       lwz     a5,(LA_WORD_SIZE+5+5)*4(r1)
-       lwz     a6,(LA_WORD_SIZE+5+6)*4(r1)
-       lwz     a7,(LA_WORD_SIZE+5+7)*4(r1)
-
-       lfd     fa0,(LA_WORD_SIZE+5+8)*4(r1)
-       lfd     fa1,(LA_WORD_SIZE+5+10)*4(r1)
-       lfd     fa2,(LA_WORD_SIZE+5+12)*4(r1)
-       lfd     fa3,(LA_WORD_SIZE+5+14)*4(r1)
-       lfd     fa4,(LA_WORD_SIZE+5+16)*4(r1)
-       lfd     fa5,(LA_WORD_SIZE+5+18)*4(r1)
-       lfd     fa6,(LA_WORD_SIZE+5+20)*4(r1)
-       lfd     fa7,(LA_WORD_SIZE+5+22)*4(r1)
-       lfd     fa8,(LA_WORD_SIZE+5+24)*4(r1)
-       lfd     fa9,(LA_WORD_SIZE+5+26)*4(r1)
-       lfd     fa10,(LA_WORD_SIZE+5+28)*4(r1)
-       lfd     fa11,(LA_WORD_SIZE+5+30)*4(r1)
-       lfd     fa12,(LA_WORD_SIZE+5+32)*4(r1)
+       lwz     a0,LA_SIZE+(4+0)*4(sp)
+       lwz     a1,LA_SIZE+(4+1)*4(sp)
+       lwz     a2,LA_SIZE+(4+2)*4(sp)
+       lwz     a3,LA_SIZE+(4+3)*4(sp)
+       lwz     a4,LA_SIZE+(4+4)*4(sp)
+       lwz     a5,LA_SIZE+(4+5)*4(sp)
+       lwz     a6,LA_SIZE+(4+6)*4(sp)
+       lwz     a7,LA_SIZE+(4+7)*4(sp)
+
+       lfd     fa0,LA_SIZE+(4+8)*4(sp)
+       lfd     fa1,LA_SIZE+(4+10)*4(sp)
+       lfd     fa2,LA_SIZE+(4+12)*4(sp)
+       lfd     fa3,LA_SIZE+(4+14)*4(sp)
+       lfd     fa4,LA_SIZE+(4+16)*4(sp)
+       lfd     fa5,LA_SIZE+(4+18)*4(sp)
+       lfd     fa6,LA_SIZE+(4+20)*4(sp)
+       lfd     fa7,LA_SIZE+(4+22)*4(sp)
+       lfd     fa8,LA_SIZE+(4+24)*4(sp)
+       lfd     fa9,LA_SIZE+(4+26)*4(sp)
+       lfd     fa10,LA_SIZE+(4+28)*4(sp)
+       lfd     fa11,LA_SIZE+(4+30)*4(sp)
+       lfd     fa12,LA_SIZE+(4+32)*4(sp)
 #else
-       RESTORE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1)
+       RESTORE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS)
 #endif
 
-       lwz     itmp1,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)+LA_LR_OFFSET(r1)
+       lwz     itmp1,(LA_SIZE + 4*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)+LA_LR_OFFSET(sp)
        mtlr    itmp1
 
-       addi    sp,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)
+       addi    sp,sp,(LA_SIZE + 4*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)
 
        mr.     pv,pv                       /* test for exception                 */
        beq     L_asm_call_jit_compiler_exception
@@ -795,17 +795,25 @@ L_asm_call_jit_compiler_exception:
                
 asm_handle_nat_exception:
 L_asm_handle_nat_exception:             /* required for PIC code              */
-       mflr    r9
-       lwz     itmp3,4(r9)
-       extsh   itmp3,itmp3
-       add     pv,itmp3,r9
-       lwz     itmp3,8(r9)
-       srwi    itmp3,itmp3,16
-       cmpwi   itmp3,0x3dad
-       bne     L_asm_handle_exception
-       lwz     itmp3,8(r9)
-       slwi    itmp3,itmp3,16
-       add     pv,pv,itmp3
+L_asm_handle_exception_stack_loop:
+       mflr    r0
+       addi    sp,sp,-(LA_SIZE+((4+6)*4))  /* allocate stack (+4 for darwin)     */
+       stw     xptr,LA_SIZE+(4+0)*4(sp)    /* save exception pointer             */
+       stw     xpc,LA_SIZE+(4+1)*4(sp)     /* save exception pc                  */
+       stw     r0,LA_SIZE+(4+3)*4(sp)      /* save return address                */
+       li      itmp3,0
+       stw     itmp3,LA_SIZE+(4+4)*4(sp)   /* save maybe-leaf flag (cleared)     */
+
+       mr      a0,r0                       /* pass return address                */
+       bl      md_codegen_get_pv_from_pc   /* get PV from RA                     */
+       stw     v0,LA_SIZE+(4+2)*4(sp)      /* save data segment pointer          */
+
+       lwz     a0,LA_SIZE+(4+0)*4(sp)      /* pass xptr                          */
+       lwz     a1,LA_SIZE+(4+1)*4(sp)      /* pass xpc                           */
+       lwz     a2,LA_SIZE+(4+2)*4(sp)      /* pass PV (v0 == a0)                 */
+       addi    a3,sp,LA_SIZE+((4+6)*4)     /* pass Java SP                       */
+
+       b       L_asm_handle_exception_continue
 
 asm_handle_exception:
 L_asm_handle_exception:                 /* required for PIC code              */
@@ -817,38 +825,34 @@ L_asm_handle_exception:                 /* required for PIC code              */
        SAVE_TEMPORARY_REGISTERS(ARG_CNT)   /* case this is a leaf method         */
 #endif
 
-       li      a3,(ARG_CNT+TMP_CNT)*8      /* prepare a3 for handle_exception    */
-       li      a4,1                        /* set maybe-leaf flag                */
-
-L_asm_handle_exception_stack_loop:
-       addi    sp,sp,-(LA_WORD_SIZE+4+5)*4 /* allocate stack                     */
-       stw     xptr,LA_SIZE+4*4(sp)        /* save exception pointer             */
-       stw     xpc,LA_SIZE+5*4(sp)         /* save exception pc                  */
-       stw     pv,LA_SIZE+6*4(sp)          /* save data segment pointer          */
-       mflr    r0                          /* save return address                */
-       stw     r0,LA_SIZE+5*4(sp)
-       add     a3,a3,sp                    /* calculate Java sp into a3...       */
-       addi    a3,a3,(LA_WORD_SIZE+4+5)*4
-       stw     a4,LA_SIZE+8*4(sp)          /* save maybe-leaf flag               */
+       addi    sp,sp,-(LA_SIZE+(4+6)*4)    /* allocate stack                     */
+       stw     xptr,LA_SIZE+(4+0)*4(sp)    /* save xptr                          */
+       stw     pv,LA_SIZE+(4+2)*4(sp)      /* save PV                            */
+       mflr    r0                          /* save RA                            */
+       stw     r0,LA_SIZE+(4+3)*4(sp)
+       li      t0,1                        /* set maybe-leaf flag                */
+       stw     t0,LA_SIZE+(4+4)*4(sp)      /* save maybe-leaf flag               */
 
        mr      a0,xptr                     /* pass exception pointer             */
        mr      a1,xpc                      /* pass exception pc                  */
        mr      a2,pv                       /* pass data segment pointer          */
-                                           /* a3 is still set                    */
+       addi    a3,sp,LA_SIZE+(ARG_CNT+TMP_CNT)*8+(4+6)*4
+
+L_asm_handle_exception_continue:
        bl      exceptions_handle_exception
 
        mr.     v0,v0
        beq     L_asm_handle_exception_not_catched
 
        mr      xpc,v0                      /* move handlerpc into xpc            */
-       lwz     xptr,LA_SIZE+4*4(sp)        /* restore exception pointer          */
-       lwz     pv,LA_SIZE+6*4(sp)          /* restore data segment pointer       */
-       lwz     r0,LA_SIZE+5*4(sp)          /* restore return address             */
+       lwz     xptr,LA_SIZE+(4+0)*4(sp)    /* restore xptr                       */
+       lwz     pv,LA_SIZE+(4+2)*4(sp)      /* restore PV                         */
+       lwz     r0,LA_SIZE+(4+3)*4(sp)      /* restore RA                         */
        mtlr    r0
-       lwz     a4,LA_SIZE+8*4(sp)          /* get maybe-leaf flag                */
-       addi    sp,sp,(LA_WORD_SIZE+4+5)*4  /* free stack frame                   */
+       lwz     t0,LA_SIZE+(4+4)*4(sp)      /* get maybe-leaf flag                */
+       addi    sp,sp,LA_SIZE+(4+6)*4       /* free stack frame                   */
 
-       mr.     a4,a4
+       mr.     t0,t0
        beq     L_asm_handle_exception_no_leaf
 
 #if defined(__DARWIN__)
@@ -864,102 +868,90 @@ L_asm_handle_exception_no_leaf:
        bctr
 
 L_asm_handle_exception_not_catched:
-       lwz     xptr,LA_SIZE+4*4(sp)        /* restore exception pointer          */
-       lwz     pv,LA_SIZE+6*4(sp)          /* restore data segment pointer       */
-       lwz     r0,LA_SIZE+5*4(sp)          /* restore return address             */
+       lwz     xptr,LA_SIZE+(4+0)*4(sp)    /* restore xptr                       */
+       lwz     pv,LA_SIZE+(4+2)*4(sp)      /* restore PV                         */
+       lwz     r0,LA_SIZE+(4+3)*4(sp)      /* restore RA                         */
        mtlr    r0
-       lwz     a4,LA_SIZE+8*4(sp)          /* get maybe-leaf flag                */
-       addi    sp,sp,(LA_WORD_SIZE+4+5)*4  /* free stack frame                   */
+       lwz     t0,LA_SIZE+(4+4)*4(sp)      /* get maybe-leaf flag                */
+       addi    sp,sp,LA_SIZE+(4+6)*4       /* free stack frame                   */
 
-       mr.     a4,a4
+       mr.     t0,t0
        beq     L_asm_handle_exception_no_leaf_stack
 
        addi    sp,sp,(ARG_CNT+TMP_CNT)*8   /* remove maybe-leaf stackframe       */
-       li      a4,0                        /* clear the maybe-leaf flag          */
+       li      t0,0                        /* clear the maybe-leaf flag          */
 
 L_asm_handle_exception_no_leaf_stack:
-       lwz     t0,FrameSize(pv)            /* get frame size                     */
-       add     t0,sp,t0                    /* pointer to save area               */
+       lwz     t1,FrameSize(pv)            /* get frame size                     */
+       add     t1,sp,t1                    /* pointer to save area               */
 
-       lwz     t1,IsLeaf(pv)               /* is leaf procedure                  */
-       mr.     t1,t1
+       lwz     t2,IsLeaf(pv)               /* is leaf procedure                  */
+       mr.     t2,t2
        bne     L_asm_handle_exception_no_ra_restore
 
-       lwz     r0,LA_LR_OFFSET(t0)         /* restore ra                         */
+       lwz     r0,LA_LR_OFFSET(t1)         /* restore ra                         */
        mtlr    r0
 
 L_asm_handle_exception_no_ra_restore:
        mflr    xpc                         /* the new xpc is ra                  */
-       lwz     t1,IntSave(pv)              /* t1 = saved int register count      */
+       mr      t4,xpc                      /* save RA                            */
+       lwz     t2,IntSave(pv)              /* t2 = saved int register count      */
        bl      ex_int1
 ex_int1:
-       mflr    t2                          /* t2 = current pc                    */
+       mflr    t3                          /* t3 = current pc                    */
 #if defined(__DARWIN__)
-       addi    t2,t2,lo16(ex_int2-ex_int1)
+       addi    t3,t3,lo16(ex_int2-ex_int1)
 #else
-       addi    t2,t2,(ex_int2-ex_int1)@l
+       addi    t3,t3,(ex_int2-ex_int1)@l
 #endif
-       slwi    t1,t1,2                     /* t1 = register count * 4            */
-       subf    t2,t1,t2                    /* t2 = IntSave - t1                  */
-       mtctr   t2
+       slwi    t2,t2,2                     /* t2 = register count * 4            */
+       subf    t3,t2,t3                    /* t3 = IntSave - t2                  */
+       mtctr   t3
        bctr
 
-       lwz     s0,-10*4(t0)
-       lwz     s1,-9*4(t0)
-       lwz     s2,-8*4(t0)
-       lwz     s3,-7*4(t0)
-       lwz     s4,-6*4(t0)
-       lwz     s5,-5*4(t0)
-       lwz     s6,-4*4(t0)
-       lwz     s7,-3*4(t0)
-       lwz     s8,-2*4(t0)
-       lwz     s9,-1*4(t0)
+       lwz     s0,-10*4(t1)
+       lwz     s1,-9*4(t1)
+       lwz     s2,-8*4(t1)
+       lwz     s3,-7*4(t1)
+       lwz     s4,-6*4(t1)
+       lwz     s5,-5*4(t1)
+       lwz     s6,-4*4(t1)
+       lwz     s7,-3*4(t1)
+       lwz     s8,-2*4(t1)
+       lwz     s9,-1*4(t1)
 
 ex_int2:
-       subf    t0,t1,t0                    /* t0 = t0 - register count * 4       */
+       subf    t1,t2,t1                    /* t1 = t1 - register count * 4       */
 
-       lwz     t1,FltSave(pv)
+       lwz     t2,FltSave(pv)
        bl      ex_flt1
 ex_flt1:
-       mflr    t2
+       mflr    t3
 #if defined(__DARWIN__)
-       addi    t2,t2,lo16(ex_flt2-ex_flt1)
+       addi    t3,t3,lo16(ex_flt2-ex_flt1)
 #else
-       addi    t2,t2,(ex_flt2-ex_flt1)@l
+       addi    t3,t3,(ex_flt2-ex_flt1)@l
 #endif
-       slwi    t1,t1,2                     /* t1 = register count * 4            */
-       subf    t2,t1,t2                    /* t2 = FltSave - t1                  */
-       mtctr   t2
+       slwi    t2,t2,2                     /* t2 = register count * 4            */
+       subf    t3,t2,t3                    /* t3 = FltSave - t2                  */
+       mtctr   t3
        bctr
 
-       lfd     fs0,-10*8(t0)
-       lfd     fs1,-9*8(t0)
-       lfd     fs2,-8*8(t0)
-       lfd     fs3,-7*8(t0)
-       lfd     fs4,-6*8(t0)
-       lfd     fs5,-5*8(t0)
-       lfd     fs6,-4*8(t0)
-       lfd     fs7,-3*8(t0)
-       lfd     fs8,-2*8(t0)
-       lfd     fs9,-1*8(t0)
+       lfd     fs0,-10*8(t1)
+       lfd     fs1,-9*8(t1)
+       lfd     fs2,-8*8(t1)
+       lfd     fs3,-7*8(t1)
+       lfd     fs4,-6*8(t1)
+       lfd     fs5,-5*8(t1)
+       lfd     fs6,-4*8(t1)
+       lfd     fs7,-3*8(t1)
+       lfd     fs8,-2*8(t1)
+       lfd     fs9,-1*8(t1)
 
 ex_flt2:
-       lwz     t0,FrameSize(pv)            /* get frame size                     */
-       add     sp,sp,t0                    /* unwind stack                       */
-       li      a3,0                        /* prepare a3 for handle_exception    */
-
-       mtlr    xpc
-       lwz     itmp3,4(xpc)
-       extsh   itmp3,itmp3
-       add     pv,itmp3,xpc
-       lwz     itmp3,8(xpc)
-       srwi    itmp3,itmp3,16
-       cmpwi   itmp3,0x3dad
-       bne     L_asm_handle_exception_stack_loop
-       lwz     itmp3,8(xpc)
-       slwi    itmp3,itmp3,16
-       add     pv,pv,itmp3
-
+       mtlr    t4                          /* restore RA                         */
+       lwz     t1,FrameSize(pv)            /* get frame size                     */
+       add     sp,sp,t1                    /* unwind stack                       */
        b       L_asm_handle_exception_stack_loop
 
 
@@ -986,7 +978,7 @@ asm_abstractmethoderror:
        b       L_asm_handle_nat_exception
 
 
-/* asm_wrapper_patcher *********************************************************
+/* asm_patcher_wrapper *********************************************************
 
    XXX
 
@@ -1000,21 +992,21 @@ asm_abstractmethoderror:
 
 *******************************************************************************/
 
-asm_wrapper_patcher:
+asm_patcher_wrapper:
        mflr    r0                    /* get Java return address (leaf)           */
        stw     r0,6*4(sp)            /* store it in the stub stackframe          */
                                      /* keep stack 16-bytes aligned: 6+1+37 = 44 */
        stwu    sp,-(LA_SIZE+(5+58)*4)(sp)
 
 #if defined(__DARWIN__)
-       stw     a0,LA_SIZE+(5+0)*4(r1)      /* save argument registers            */
-       stw     a1,LA_SIZE+(5+1)*4(r1)      /* preserve linkage area (24 bytes)   */
-       stw     a2,LA_SIZE+(5+2)*4(r1)      /* and 4 bytes for 4 argument         */
-       stw     a3,LA_SIZE+(5+3)*4(r1)
-       stw     a4,LA_SIZE+(5+4)*4(r1)
-       stw     a5,LA_SIZE+(5+5)*4(r1)
-       stw     a6,LA_SIZE+(5+6)*4(r1)
-       stw     a7,LA_SIZE+(5+7)*4(r1)
+       stw     a0,LA_SIZE+(5+0)*4(sp)      /* save argument registers            */
+       stw     a1,LA_SIZE+(5+1)*4(sp)      /* preserve linkage area (24 bytes)   */
+       stw     a2,LA_SIZE+(5+2)*4(sp)      /* and 4 bytes for 4 argument         */
+       stw     a3,LA_SIZE+(5+3)*4(sp)
+       stw     a4,LA_SIZE+(5+4)*4(sp)
+       stw     a5,LA_SIZE+(5+5)*4(sp)
+       stw     a6,LA_SIZE+(5+6)*4(sp)
+       stw     a7,LA_SIZE+(5+7)*4(sp)
 
        stfd    fa0,LA_SIZE+(5+8)*4(sp)
        stfd    fa1,LA_SIZE+(5+10)*4(sp)
@@ -1030,24 +1022,25 @@ asm_wrapper_patcher:
        stfd    fa11,LA_SIZE+(5+30)*4(sp)
        stfd    fa12,LA_SIZE+(5+32)*4(sp)
 
-       stw     t0,(LA_WORD_SIZE+5+33)*4(r1)
-       stw     t1,(LA_WORD_SIZE+5+34)*4(r1)
-       stw     t2,(LA_WORD_SIZE+5+35)*4(r1)
-       stw     t3,(LA_WORD_SIZE+5+36)*4(r1)
-       stw     t4,(LA_WORD_SIZE+5+37)*4(r1)
-       stw     t5,(LA_WORD_SIZE+5+38)*4(r1)
-       stw     t6,(LA_WORD_SIZE+5+39)*4(r1)
-       stw     t7,(LA_WORD_SIZE+5+40)*4(r1)
-
-       stfd    ft0,(LA_WORD_SIZE+5+42)*4(r1)
-       stfd    ft1,(LA_WORD_SIZE+5+44)*4(r1)
-       stfd    ft2,(LA_WORD_SIZE+5+46)*4(r1)
-       stfd    ft3,(LA_WORD_SIZE+5+48)*4(r1)
-       stfd    ft4,(LA_WORD_SIZE+5+50)*4(r1)
-       stfd    ft5,(LA_WORD_SIZE+5+52)*4(r1)
+       stw     t0,LA_SIZE+(5+33)*4(sp)
+       stw     t1,LA_SIZE+(5+34)*4(sp)
+       stw     t2,LA_SIZE+(5+35)*4(sp)
+       stw     t3,LA_SIZE+(5+36)*4(sp)
+       stw     t4,LA_SIZE+(5+37)*4(sp)
+       stw     t5,LA_SIZE+(5+38)*4(sp)
+       stw     t6,LA_SIZE+(5+39)*4(sp)
+       stw     t7,LA_SIZE+(5+40)*4(sp)
+
+       stfd    ft0,LA_SIZE+(5+42)*4(sp)
+       stfd    ft1,LA_SIZE+(5+44)*4(sp)
+       stfd    ft2,LA_SIZE+(5+46)*4(sp)
+       stfd    ft3,LA_SIZE+(5+48)*4(sp)
+       stfd    ft4,LA_SIZE+(5+50)*4(sp)
+       stfd    ft5,LA_SIZE+(5+52)*4(sp)
 #else
-       SAVE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1) /* save 8 int/8 float arguments   */
-       SAVE_TEMPORARY_REGISTERS(LA_WORD_SIZE+1+24)
+       /* save 8 int/8 float arguments */
+       SAVE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS+1)
+       SAVE_TEMPORARY_REGISTERS(LA_SIZE_IN_POINTERS+1+24)
 #endif
 
        stw     itmp1,LA_SIZE+(5+54)*4(sp)
@@ -1061,14 +1054,14 @@ asm_wrapper_patcher:
        stw     v0,LA_SIZE+(5+57)*4(sp)     /* save return value                  */
 
 #if defined(__DARWIN__)
-       lwz     a0,LA_SIZE+(5+0)*4(r1)
-       lwz     a1,LA_SIZE+(5+1)*4(r1)
-       lwz     a2,LA_SIZE+(5+2)*4(r1)
-       lwz     a3,LA_SIZE+(5+3)*4(r1)
-       lwz     a4,LA_SIZE+(5+4)*4(r1)
-       lwz     a5,LA_SIZE+(5+5)*4(r1)
-       lwz     a6,LA_SIZE+(5+6)*4(r1)
-       lwz     a7,LA_SIZE+(5+7)*4(r1)
+       lwz     a0,LA_SIZE+(5+0)*4(sp)
+       lwz     a1,LA_SIZE+(5+1)*4(sp)
+       lwz     a2,LA_SIZE+(5+2)*4(sp)
+       lwz     a3,LA_SIZE+(5+3)*4(sp)
+       lwz     a4,LA_SIZE+(5+4)*4(sp)
+       lwz     a5,LA_SIZE+(5+5)*4(sp)
+       lwz     a6,LA_SIZE+(5+6)*4(sp)
+       lwz     a7,LA_SIZE+(5+7)*4(sp)
 
        lfd     fa0,LA_SIZE+(5+8)*4(sp)
        lfd     fa1,LA_SIZE+(5+10)*4(sp)
@@ -1084,24 +1077,25 @@ asm_wrapper_patcher:
        lfd     fa11,LA_SIZE+(5+30)*4(sp)
        lfd     fa12,LA_SIZE+(5+32)*4(sp)
 
-       lwz     t0,(LA_WORD_SIZE+5+33)*4(r1)
-       lwz     t1,(LA_WORD_SIZE+5+34)*4(r1)
-       lwz     t2,(LA_WORD_SIZE+5+35)*4(r1)
-       lwz     t3,(LA_WORD_SIZE+5+36)*4(r1)
-       lwz     t4,(LA_WORD_SIZE+5+37)*4(r1)
-       lwz     t5,(LA_WORD_SIZE+5+38)*4(r1)
-       lwz     t6,(LA_WORD_SIZE+5+39)*4(r1)
-       lwz     t7,(LA_WORD_SIZE+5+40)*4(r1)
-
-       lfd     ft0,(LA_WORD_SIZE+5+42)*4(r1)
-       lfd     ft1,(LA_WORD_SIZE+5+44)*4(r1)
-       lfd     ft2,(LA_WORD_SIZE+5+46)*4(r1)
-       lfd     ft3,(LA_WORD_SIZE+5+48)*4(r1)
-       lfd     ft4,(LA_WORD_SIZE+5+50)*4(r1)
-       lfd     ft5,(LA_WORD_SIZE+5+52)*4(r1)
+       lwz     t0,LA_SIZE+(5+33)*4(sp)
+       lwz     t1,LA_SIZE+(5+34)*4(sp)
+       lwz     t2,LA_SIZE+(5+35)*4(sp)
+       lwz     t3,LA_SIZE+(5+36)*4(sp)
+       lwz     t4,LA_SIZE+(5+37)*4(sp)
+       lwz     t5,LA_SIZE+(5+38)*4(sp)
+       lwz     t6,LA_SIZE+(5+39)*4(sp)
+       lwz     t7,LA_SIZE+(5+40)*4(sp)
+
+       lfd     ft0,LA_SIZE+(5+42)*4(sp)
+       lfd     ft1,LA_SIZE+(5+44)*4(sp)
+       lfd     ft2,LA_SIZE+(5+46)*4(sp)
+       lfd     ft3,LA_SIZE+(5+48)*4(sp)
+       lfd     ft4,LA_SIZE+(5+50)*4(sp)
+       lfd     ft5,LA_SIZE+(5+52)*4(sp)
 #else
-       RESTORE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1) /* restore 8 int/8 float args  */
-       RESTORE_TEMPORARY_REGISTERS(LA_WORD_SIZE+1+24)
+       /* restore 8 int/8 float arguments */
+       RESTORE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS+1)
+       RESTORE_TEMPORARY_REGISTERS(LA_SIZE_IN_POINTERS+1+24)
 #endif
 
        lwz     itmp1,LA_SIZE+(5+54)*4(sp)
@@ -1109,27 +1103,28 @@ asm_wrapper_patcher:
        lwz     pv,LA_SIZE+(5+56)*4(sp)
        lwz     itmp3,LA_SIZE+(5+57)*4(sp)  /* restore return value into temp reg.*/
 
-       lwz     r0,(6+LA_WORD_SIZE+5+58)*4(sp) /* restore RA                      */
+       lwz     r0,6*4+LA_SIZE+(5+58)*4(sp) /* restore RA                         */
        mtlr    r0
 
        mr.     itmp3,itmp3           /* check for an exception                   */
-       bne     L_asm_wrapper_patcher_exception
+       bne     L_asm_patcher_wrapper_exception
 
                                      /* get return address (into JIT code)       */
-       lwz     itmp3,(5+LA_WORD_SIZE+5+58)*4(sp)
+       lwz     itmp3,5*4+LA_SIZE+(5+58)*4(sp)
 
                                      /* remove stack frame + patcher stub stack  */
-       addi    sp,sp,(8+LA_WORD_SIZE+5+58)*4
+       addi    sp,sp,8*4+LA_SIZE+(5+58)*4
 
        mtctr   itmp3
        bctr                          /* jump to new patched code                 */
 
-L_asm_wrapper_patcher_exception:
+L_asm_patcher_wrapper_exception:
        mr      xptr,itmp3                  /* get exception                      */
-       lwz     xpc,(5+LA_WORD_SIZE+5+58)*4(sp)
-       addi    sp,sp,(8+LA_WORD_SIZE+5+58)*4
+       lwz     xpc,5*4+LA_SIZE+(5+58)*4(sp)
+       addi    sp,sp,8*4+LA_SIZE+(5+58)*4
        b       L_asm_handle_exception
 
+#if defined(ENABLE_REPLACEMENT)
 
 /* asm_replacement_out *********************************************************
 
@@ -1152,46 +1147,48 @@ L_asm_wrapper_patcher_exception:
        /* XXX we should find a cleaner solution here */
 #define REPLACEMENT_ROOM  512
 
+#define sizeexecutionstate_ALIGNED  ((sizeexecutionstate + 15) & ~15)
+
 asm_replacement_out:
     /* create stack frame */
-       addi    sp,sp,-(sizeexecutionstate + REPLACEMENT_ROOM) /* XXX align */
+       addi    sp,sp,-(sizeexecutionstate_ALIGNED + REPLACEMENT_ROOM)
 
        /* save link register */
-       mflr    r16
+       mflr    itmp3
 
        /* save registers in execution state */
-       stw     r0 ,( 0*8+offes_intregs)(sp)
-       stw     r1 ,( 1*8+offes_intregs)(sp)
-       stw     r2 ,( 2*8+offes_intregs)(sp)
-       stw     r3 ,( 3*8+offes_intregs)(sp)
-       stw     r4 ,( 4*8+offes_intregs)(sp)
-       stw     r5 ,( 5*8+offes_intregs)(sp)
-       stw     r6 ,( 6*8+offes_intregs)(sp)
-       stw     r7 ,( 7*8+offes_intregs)(sp)
-       stw     r8 ,( 8*8+offes_intregs)(sp)
-       stw     r9 ,( 9*8+offes_intregs)(sp)
-       stw     r10,(10*8+offes_intregs)(sp)
-       stw     r11,(11*8+offes_intregs)(sp)
-       stw     r12,(12*8+offes_intregs)(sp)
-       stw     r13,(13*8+offes_intregs)(sp)
-       stw     r14,(14*8+offes_intregs)(sp)
-       stw     r15,(15*8+offes_intregs)(sp)
-       stw     r16,(16*8+offes_intregs)(sp) /* link register */
-       stw     r17,(17*8+offes_intregs)(sp)
-       stw     r18,(18*8+offes_intregs)(sp)
-       stw     r19,(19*8+offes_intregs)(sp)
-       stw     r20,(20*8+offes_intregs)(sp)
-       stw     r21,(21*8+offes_intregs)(sp)
-       stw     r22,(22*8+offes_intregs)(sp)
-       stw     r23,(23*8+offes_intregs)(sp)
-       stw     r24,(24*8+offes_intregs)(sp)
-       stw     r25,(25*8+offes_intregs)(sp)
-       stw     r26,(26*8+offes_intregs)(sp)
-       stw     r27,(27*8+offes_intregs)(sp)
-       stw     r28,(28*8+offes_intregs)(sp)
-       stw     r29,(29*8+offes_intregs)(sp)
-       stw     r30,(30*8+offes_intregs)(sp)
-       stw     r31,(31*8+offes_intregs)(sp)
+       stw     r0 ,( 0*4+offes_intregs)(sp)
+       stw     r1 ,( 1*4+offes_intregs)(sp)
+       stw     r2 ,( 2*4+offes_intregs)(sp)
+       stw     r3 ,( 3*4+offes_intregs)(sp)
+       stw     r4 ,( 4*4+offes_intregs)(sp)
+       stw     r5 ,( 5*4+offes_intregs)(sp)
+       stw     r6 ,( 6*4+offes_intregs)(sp)
+       stw     r7 ,( 7*4+offes_intregs)(sp)
+       stw     r8 ,( 8*4+offes_intregs)(sp)
+       stw     r9 ,( 9*4+offes_intregs)(sp)
+       stw     r10,(10*4+offes_intregs)(sp)
+       stw     r11,(11*4+offes_intregs)(sp)
+       stw     r12,(12*4+offes_intregs)(sp)
+       stw     r13,(13*4+offes_intregs)(sp)
+       stw     r14,(14*4+offes_intregs)(sp)
+       stw     r15,(15*4+offes_intregs)(sp)
+       stw     r16,(16*4+offes_intregs)(sp) /* link register stored as itmp3 */
+       stw     r17,(17*4+offes_intregs)(sp)
+       stw     r18,(18*4+offes_intregs)(sp)
+       stw     r19,(19*4+offes_intregs)(sp)
+       stw     r20,(20*4+offes_intregs)(sp)
+       stw     r21,(21*4+offes_intregs)(sp)
+       stw     r22,(22*4+offes_intregs)(sp)
+       stw     r23,(23*4+offes_intregs)(sp)
+       stw     r24,(24*4+offes_intregs)(sp)
+       stw     r25,(25*4+offes_intregs)(sp)
+       stw     r26,(26*4+offes_intregs)(sp)
+       stw     r27,(27*4+offes_intregs)(sp)
+       stw     r28,(28*4+offes_intregs)(sp)
+       stw     r29,(29*4+offes_intregs)(sp)
+       stw     r30,(30*4+offes_intregs)(sp)
+       stw     r31,(31*4+offes_intregs)(sp)
        
        stfd    fr0 ,( 0*8+offes_fltregs)(sp)
        stfd    fr1 ,( 1*8+offes_fltregs)(sp)
@@ -1227,7 +1224,7 @@ asm_replacement_out:
        stfd    fr31,(31*8+offes_fltregs)(sp)
        
        /* calculate sp of method */
-       addi    itmp1,sp,(sizeexecutionstate + REPLACEMENT_ROOM + 4*4)
+       addi    itmp1,sp,(sizeexecutionstate_ALIGNED + REPLACEMENT_ROOM + 4*4)
        stw     itmp1,(offes_sp)(sp)
 
        /* store pv */
@@ -1249,122 +1246,151 @@ asm_replacement_out:
    NOTE: itmp3 is not restored!
 
    C prototype:
-      void asm_replacement_in(executionstate *es);
+      void asm_replacement_in(executionstate *es, replace_safestack_t *st);
 
 *******************************************************************************/
 
 asm_replacement_in:
-       /* a0 == executionstate *es */
+       /* a0 == executionstate *es      */
+       /* a1 == replace_safestack_t *st */
+
+       /* get arguments */
+       mr              s1,a1                       /* replace_safestack_t *st            */
+       mr              s2,a0                       /* executionstate *es == safe stack   */
+
+       /* switch to the safe stack */
+       mr              sp,s2
+
+       /* reserve linkage area */
+       addi    sp,sp,-(LA_SIZE_ALIGNED)
 
-       /* set new sp and pv */
-       lwz     sp,(offes_sp)(a0)
-       lwz     pv,(offes_pv)(a0)
+       /* call replace_build_execution_state(st) */
+       mr              a0,s1
+       bl              replace_build_execution_state
+
+       /* set new sp */
+       lwz             sp,(offes_sp)(s2)
+
+       /* build stack frame */
+       addi    sp,sp,-(sizeexecutionstate_ALIGNED)
+
+       /* call replace_free_safestack(st,& of allocated executionstate_t) */
+       mr              a1,sp /* tmpes */
+       mr              a0,s1 /* st    */
+       addi    sp,sp,-(LA_SIZE_ALIGNED)  /* reserve linkage area */
+       bl              replace_free_safestack
+       addi    sp,sp,+(LA_SIZE_ALIGNED)  /* tear down linkage area */
+
+       /* set new pv */
+       lwz     pv,(offes_pv)(sp)
        
        /* copy registers from execution state */
-       lwz     r0 ,( 0*8+offes_intregs)(a0)
+       lwz     r0 ,( 0*4+offes_intregs)(sp)
        /* r1 is sp                       */
        /* r2 is reserved                 */
-       /* a0 is loaded below             */
-       lwz     r4 ,( 4*8+offes_intregs)(a0)
-       lwz     r5 ,( 5*8+offes_intregs)(a0)
-       lwz     r6 ,( 6*8+offes_intregs)(a0)
-       lwz     r7 ,( 7*8+offes_intregs)(a0)
-       lwz     r8 ,( 8*8+offes_intregs)(a0)
-       lwz     r9 ,( 9*8+offes_intregs)(a0)
-       lwz     r10,(10*8+offes_intregs)(a0)
-       lwz     r11,(11*8+offes_intregs)(a0)
-       lwz     r12,(12*8+offes_intregs)(a0)
+       lwz     a0 ,( 3*4+offes_intregs)(sp)
+       lwz     r4 ,( 4*4+offes_intregs)(sp)
+       lwz     r5 ,( 5*4+offes_intregs)(sp)
+       lwz     r6 ,( 6*4+offes_intregs)(sp)
+       lwz     r7 ,( 7*4+offes_intregs)(sp)
+       lwz     r8 ,( 8*4+offes_intregs)(sp)
+       lwz     r9 ,( 9*4+offes_intregs)(sp)
+       lwz     r10,(10*4+offes_intregs)(sp)
+       lwz     r11,(11*4+offes_intregs)(sp)
+       lwz     r12,(12*4+offes_intregs)(sp)
        /* r13 is pv                      */
-       lwz     r14,(14*8+offes_intregs)(a0)
-       lwz     r15,(15*8+offes_intregs)(a0)
-       lwz     r16,(16*8+offes_intregs)(a0) /* link register */
-       lwz     r17,(17*8+offes_intregs)(a0)
-       lwz     r18,(18*8+offes_intregs)(a0)
-       lwz     r19,(19*8+offes_intregs)(a0)
-       lwz     r20,(20*8+offes_intregs)(a0)
-       lwz     r21,(21*8+offes_intregs)(a0)
-       lwz     r22,(22*8+offes_intregs)(a0)
-       lwz     r23,(23*8+offes_intregs)(a0)
-       lwz     r24,(24*8+offes_intregs)(a0)
-       lwz     r25,(25*8+offes_intregs)(a0)
-       lwz     r26,(26*8+offes_intregs)(a0)
-       lwz     r27,(27*8+offes_intregs)(a0)
-       lwz     r28,(28*8+offes_intregs)(a0)
-       lwz     r29,(29*8+offes_intregs)(a0)
-       lwz     r30,(30*8+offes_intregs)(a0)
-       lwz     r31,(31*8+offes_intregs)(a0)
+       lwz     r14,(14*4+offes_intregs)(sp)
+       lwz     r15,(15*4+offes_intregs)(sp)
+       lwz     r16,(16*4+offes_intregs)(sp) /* itmp3, later to link register */
+       lwz     r17,(17*4+offes_intregs)(sp)
+       lwz     r18,(18*4+offes_intregs)(sp)
+       lwz     r19,(19*4+offes_intregs)(sp)
+       lwz     r20,(20*4+offes_intregs)(sp)
+       lwz     r21,(21*4+offes_intregs)(sp)
+       lwz     r22,(22*4+offes_intregs)(sp)
+       lwz     r23,(23*4+offes_intregs)(sp)
+       lwz     r24,(24*4+offes_intregs)(sp)
+       lwz     r25,(25*4+offes_intregs)(sp)
+       lwz     r26,(26*4+offes_intregs)(sp)
+       lwz     r27,(27*4+offes_intregs)(sp)
+       lwz     r28,(28*4+offes_intregs)(sp)
+       lwz     r29,(29*4+offes_intregs)(sp)
+       lwz     r30,(30*4+offes_intregs)(sp)
+       lwz     r31,(31*4+offes_intregs)(sp)
        
-       lfd     fr0 ,( 0*8+offes_fltregs)(a0)
-       lfd     fr1 ,( 1*8+offes_fltregs)(a0)
-       lfd     fr2 ,( 2*8+offes_fltregs)(a0)
-       lfd     fr3 ,( 3*8+offes_fltregs)(a0)
-       lfd     fr4 ,( 4*8+offes_fltregs)(a0)
-       lfd     fr5 ,( 5*8+offes_fltregs)(a0)
-       lfd     fr6 ,( 6*8+offes_fltregs)(a0)
-       lfd     fr7 ,( 7*8+offes_fltregs)(a0)
-       lfd     fr8 ,( 8*8+offes_fltregs)(a0)
-       lfd     fr9 ,( 9*8+offes_fltregs)(a0)
-       lfd     fr10,(10*8+offes_fltregs)(a0)
-       lfd     fr11,(11*8+offes_fltregs)(a0)
-       lfd     fr12,(12*8+offes_fltregs)(a0)
-       lfd     fr13,(13*8+offes_fltregs)(a0)
-       lfd     fr14,(14*8+offes_fltregs)(a0)
-       lfd     fr15,(15*8+offes_fltregs)(a0)
-       lfd     fr16,(16*8+offes_fltregs)(a0)
-       lfd     fr17,(17*8+offes_fltregs)(a0)
-       lfd     fr18,(18*8+offes_fltregs)(a0)
-       lfd     fr19,(19*8+offes_fltregs)(a0)
-       lfd     fr20,(20*8+offes_fltregs)(a0)
-       lfd     fr21,(21*8+offes_fltregs)(a0)
-       lfd     fr22,(22*8+offes_fltregs)(a0)
-       lfd     fr23,(23*8+offes_fltregs)(a0)
-       lfd     fr24,(24*8+offes_fltregs)(a0)
-       lfd     fr25,(25*8+offes_fltregs)(a0)
-       lfd     fr26,(26*8+offes_fltregs)(a0)
-       lfd     fr27,(27*8+offes_fltregs)(a0)
-       lfd     fr28,(28*8+offes_fltregs)(a0)
-       lfd     fr29,(29*8+offes_fltregs)(a0)
-       lfd     fr30,(30*8+offes_fltregs)(a0)
-       lfd     fr31,(31*8+offes_fltregs)(a0)
+       lfd     fr0 ,( 0*8+offes_fltregs)(sp)
+       lfd     fr1 ,( 1*8+offes_fltregs)(sp)
+       lfd     fr2 ,( 2*8+offes_fltregs)(sp)
+       lfd     fr3 ,( 3*8+offes_fltregs)(sp)
+       lfd     fr4 ,( 4*8+offes_fltregs)(sp)
+       lfd     fr5 ,( 5*8+offes_fltregs)(sp)
+       lfd     fr6 ,( 6*8+offes_fltregs)(sp)
+       lfd     fr7 ,( 7*8+offes_fltregs)(sp)
+       lfd     fr8 ,( 8*8+offes_fltregs)(sp)
+       lfd     fr9 ,( 9*8+offes_fltregs)(sp)
+       lfd     fr10,(10*8+offes_fltregs)(sp)
+       lfd     fr11,(11*8+offes_fltregs)(sp)
+       lfd     fr12,(12*8+offes_fltregs)(sp)
+       lfd     fr13,(13*8+offes_fltregs)(sp)
+       lfd     fr14,(14*8+offes_fltregs)(sp)
+       lfd     fr15,(15*8+offes_fltregs)(sp)
+       lfd     fr16,(16*8+offes_fltregs)(sp)
+       lfd     fr17,(17*8+offes_fltregs)(sp)
+       lfd     fr18,(18*8+offes_fltregs)(sp)
+       lfd     fr19,(19*8+offes_fltregs)(sp)
+       lfd     fr20,(20*8+offes_fltregs)(sp)
+       lfd     fr21,(21*8+offes_fltregs)(sp)
+       lfd     fr22,(22*8+offes_fltregs)(sp)
+       lfd     fr23,(23*8+offes_fltregs)(sp)
+       lfd     fr24,(24*8+offes_fltregs)(sp)
+       lfd     fr25,(25*8+offes_fltregs)(sp)
+       lfd     fr26,(26*8+offes_fltregs)(sp)
+       lfd     fr27,(27*8+offes_fltregs)(sp)
+       lfd     fr28,(28*8+offes_fltregs)(sp)
+       lfd     fr29,(29*8+offes_fltregs)(sp)
+       lfd     fr30,(30*8+offes_fltregs)(sp)
+       lfd     fr31,(31*8+offes_fltregs)(sp)
 
        /* restore link register */
 
-       mtlr    r16
+       mtlr    itmp3
        
        /* load new pc */
 
-       lwz     itmp3,offes_pc(a0)
+       lwz     itmp3,offes_pc(sp)
 
-       /* load a0 */
-       
-       lwz     a0,(3*8+offes_intregs)(a0)
+       /* remove stack frame */
+
+       addi    sp,sp,+(sizeexecutionstate_ALIGNED)
 
        /* jump to new code */
 
        mtctr   itmp3
        bctr
 
+#endif /* defined(ENABLE_REPLACEMENT) */
+
 /*********************************************************************/
 
 asm_cacheflush:
-       add     r4,r3,r4
-       rlwinm  r3,r3,0,0,26
-       addi    r4,r4,31
-       rlwinm  r4,r4,0,0,26
-       mr      r5,r3
+       add     a1,a0,a1
+       rlwinm  a0,a0,0,0,26
+       addi    a1,a1,31
+       rlwinm  a1,a1,0,0,26
+       mr      a2,a0
 1:
-       cmplw   r3,r4
+       cmplw   a0,a1
        bge     0f
-       dcbst   0,r3
-       addi    r3,r3,32
+       dcbst   0,a0
+       addi    a0,a0,32
        b       1b
 0:
        sync
 1:
-       cmplw   r5,r4
+       cmplw   a2,a1
        bge     0f
-       icbi    0,r5
-       addi    r5,r5,32
+       icbi    0,a2
+       addi    a2,a2,32
        b       1b
 0:
        sync
@@ -1372,16 +1398,45 @@ asm_cacheflush:
        blr
 
 
+/* asm_compare_and_swap ********************************************************
+
+   XXX
+
+*******************************************************************************/
+
+asm_compare_and_swap:
+1:  lwarx   a6,r0,a0 
+       subf.   r0,a6,a1 
+       bne-    2f 
+       or      r0,a2,a2 
+       stwcx.  r0,r0,a0 
+       bne-    1b 
+2: 
+       mr      a0,a6
+       blr
+
+
+/* asm_memory_barrier **********************************************************
+
+   XXX
+
+*******************************************************************************/
+
+asm_memory_barrier:
+       sync
+       blr
+
+
 asm_getclassvalues_atomic:
 _crit_restart:
 _crit_begin:
-       lwz     r6,offbaseval(r3)
-       lwz     r7,offdiffval(r3)
-       lwz     r8,offbaseval(r4)
+       lwz     a3,offbaseval(a0)
+       lwz     a4,offdiffval(a0)
+       lwz     a5,offbaseval(a1)
 _crit_end:
-       stw     r6,offcast_super_baseval(r5)
-       stw     r7,offcast_super_diffval(r5)
-       stw     r8,offcast_sub_baseval(r5)
+       stw     a3,offcast_super_baseval(a2)
+       stw     a4,offcast_super_diffval(a2)
+       stw     a5,offcast_sub_baseval(a2)
        blr
 
        .data
@@ -1417,6 +1472,26 @@ L_builtin_throw_exception$lazy_ptr:
        .long dyld_stub_binding_helper
 
 
+.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+       .align 2
+L_md_codegen_get_pv_from_pc$stub:
+       .indirect_symbol _md_codegen_get_pv_from_pc
+       mflr r0
+       bcl 20,31,L00$_md_codegen_get_pv_from_pc
+L00$_md_codegen_get_pv_from_pc:
+       mflr r11
+       addis r11,r11,ha16(L_md_codegen_get_pv_from_pc$lazy_ptr - L00$_md_codegen_get_pv_from_pc)
+       mtlr r0
+       lwzu r12,lo16(L_md_codegen_get_pv_from_pc$lazy_ptr - L00$_md_codegen_get_pv_from_pc)(r11)
+       mtctr r12
+       bctr
+.data
+.lazy_symbol_pointer
+L_md_codegen_get_pv_from_pc$lazy_ptr:
+       .indirect_symbol _md_codegen_get_pv_from_pc
+       .long dyld_stub_binding_helper
+
+
 .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
        .align 2
 L_exceptions_handle_exception$stub:
@@ -1537,6 +1612,26 @@ L_exceptions_asm_new_abstractmethoderror$lazy_ptr:
        .long dyld_stub_binding_helper
 
 
+.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+       .align 2
+L_patcher_wrapper$stub:
+       .indirect_symbol _patcher_wrapper
+       mflr r0
+       bcl 20,31,L00$_patcher_wrapper
+L00$_patcher_wrapper:
+       mflr r11
+       addis r11,r11,ha16(L_patcher_wrapper$lazy_ptr - L00$_patcher_wrapper)
+       mtlr r0
+       lwzu r12,lo16(L_patcher_wrapper$lazy_ptr - L00$_patcher_wrapper)(r11)
+       mtctr r12
+       bctr
+.data
+.lazy_symbol_pointer
+L_patcher_wrapper$lazy_ptr:
+       .indirect_symbol _patcher_wrapper
+       .long dyld_stub_binding_helper
+
+
 .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
        .align 2
 L_replace_me$stub:
@@ -1556,6 +1651,46 @@ L_replace_me$lazy_ptr:
        .indirect_symbol _replace_me
        .long dyld_stub_binding_helper
 
+
+.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+       .align 2
+L_replace_build_execution_state$stub:
+       .indirect_symbol _replace_build_execution_state
+       mflr r0
+       bcl 20,31,L00$_replace_build_execution_state
+L00$_replace_build_execution_state:
+       mflr r11
+       addis r11,r11,ha16(L_replace_build_execution_state$lazy_ptr - L00$_replace_build_execution_state)
+       mtlr r0
+       lwzu r12,lo16(L_replace_build_execution_state$lazy_ptr - L00$_replace_build_execution_state)(r11)
+       mtctr r12
+       bctr
+.data
+.lazy_symbol_pointer
+L_replace_build_execution_state$lazy_ptr:
+       .indirect_symbol _replace_build_execution_state
+       .long dyld_stub_binding_helper
+
+
+.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+       .align 2
+L_replace_free_safestack$stub:
+       .indirect_symbol _replace_free_safestack
+       mflr r0
+       bcl 20,31,L00$_replace_free_safestack
+L00$_replace_free_safestack:
+       mflr r11
+       addis r11,r11,ha16(L_replace_free_safestack$lazy_ptr - L00$_replace_free_safestack)
+       mtlr r0
+       lwzu r12,lo16(L_replace_free_safestack$lazy_ptr - L00$_replace_free_safestack)(r11)
+       mtctr r12
+       bctr
+.data
+.lazy_symbol_pointer
+L_replace_free_safestack$lazy_ptr:
+       .indirect_symbol _replace_free_safestack
+       .long dyld_stub_binding_helper
+
 #endif /* defined(__DARWIN__) */