X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=src%2Fvm%2Fjit%2Fpowerpc%2Fasmpart.S;h=a17fe5217c394ee5659ac43ae1a4cf76ab5fdf84;hb=9f859ad50d3d5d98c185d40b86b2179bc4dc9aeb;hp=e53423d6c31cb32160ff388f3e698551746cd832;hpb=b231165a5347337c61c17cf1e5460cf3ec2a2523;p=cacao.git diff --git a/src/vm/jit/powerpc/asmpart.S b/src/vm/jit/powerpc/asmpart.S index e53423d6c..a17fe5217 100644 --- a/src/vm/jit/powerpc/asmpart.S +++ b/src/vm/jit/powerpc/asmpart.S @@ -1,6 +1,6 @@ /* src/vm/jit/powerpc/asmpart.S - Java-C interface functions for PowerPC - Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel, + Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel, C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich, J. Wenninger, Institut f. Computersprachen - TU Wien @@ -22,17 +22,6 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - Contact: cacao@cacaojvm.org - - Authors: Andreas Krall - Reinhard Grafl - Stefan Ring - - Changes: Christian Thalinger - Edwin Steiner - - $Id: asmpart.S 5287 2006-09-04 14:21:11Z twisti $ - */ @@ -43,7 +32,6 @@ #include "vm/jit/abi-asm.h" #include "vm/jit/methodheader.h" -#include "vm/jit/powerpc/offsets.h" .text @@ -60,6 +48,7 @@ .globl asm_vm_call_method_double .globl asm_vm_call_method_exception_handler + .globl asm_vm_call_method_end .globl asm_call_jit_compiler @@ -68,14 +57,15 @@ .globl asm_abstractmethoderror - .globl asm_patcher_wrapper - +#if defined(ENABLE_REPLACEMENT) .globl asm_replacement_out .globl asm_replacement_in +#endif .globl asm_cacheflush - .globl asm_criticalsections - .globl asm_getclassvalues_atomic + + .globl asm_compare_and_swap + .globl asm_memory_barrier /* asm_vm_call_method ********************************************************** @@ -118,7 +108,7 @@ asm_vm_call_method_double: stwu sp,-40*4(sp) /* keep stack 16-byte aligned */ stw s0,8*4(sp) /* save used callee saved registers */ - stw a0,9*4(sp) /* save method pointer for compiler */ + stw a0,9*4(sp) /* save method PV */ #if defined(__DARWIN__) stw itmp1,10*4(sp) /* register r11 is callee saved */ @@ -145,248 +135,68 @@ asm_vm_call_method_double: stfd ft4,34*4(sp) stfd ft5,36*4(sp) #else - SAVE_TEMPORARY_REGISTERS(18) /* the offset has to be even */ + SAVE_TEMPORARY_REGISTERS(18) /* the offset has to be even */ #endif - mr itmp2,a1 /* arg count */ - mr itmp1,a2 /* pointer to arg block */ + mr t0,a1 /* address of data structure */ + mr t1,a2 /* stack argument count */ + + mr s0,sp /* save SP */ + + lwz a0,0*8+4(t0) /* we are on big-endian */ + lwz a1,1*8+4(t0) + lwz a2,2*8+4(t0) + lwz a3,3*8+4(t0) + lwz a4,4*8+4(t0) + lwz a5,5*8+4(t0) + lwz a6,6*8+4(t0) + lwz a7,7*8+4(t0) + + lfd fa0,8*8(t0) + lfd fa1,9*8(t0) + lfd fa2,10*8(t0) + lfd fa3,11*8(t0) + lfd fa4,12*8(t0) + lfd fa5,13*8(t0) + lfd fa6,14*8(t0) + lfd fa7,15*8(t0) - mr t4,itmp2 /* save argument count */ - mr t5,itmp1 /* save argument block pointer */ - - mr s0,sp /* save current sp to s0 */ - - addi itmp1,itmp1,-sizevmarg /* initialize pointer (smaller code) */ - addi itmp2,itmp2,1 /* initialize argument count */ - li t0,0 /* initialize integer argument counter */ - li t1,0 /* initialize float argument counter */ - li t6,0 /* initialize integer register counter */ #if defined(__DARWIN__) - li t7,0 /* initialize stack slot counter */ + lfd fa8,16*8(t0) + lfd fa9,17*8(t0) + lfd fa10,18*8(t0) + lfd fa11,19*8(t0) + lfd fa12,20*8(t0) #endif - mflr r0 /* save link register (PIC code) */ - bl L_asm_vm_call_method_get_pc -L_asm_vm_call_method_get_pc: - mflr t3 /* t3 contains the current pc */ - mtlr r0 - -L_register_copy: - addi itmp1,itmp1,sizevmarg /* goto next argument block */ - addi itmp2,itmp2,-1 /* argument count - 1 */ - mr. itmp2,itmp2 - beq L_register_copy_done - - lwz itmp3,offvmargtype+4(itmp1) - andi. r0,itmp3,0x0002 /* is this a float/double type? */ - bne L_register_handle_float - -L_register_handle_int: - cmpwi t6,INT_ARG_CNT /* are we out of integer argument */ - beq L_register_copy /* registers? yes, next loop */ - - andi. r0,itmp3,0x0001 /* is this a 2-word type? */ - bne L_register_handle_long - -#if defined(__DARWIN__) - addis itmp3,t3,ha16(L_jumptable_int - L_asm_vm_call_method_get_pc) - la itmp3,lo16(L_jumptable_int - L_asm_vm_call_method_get_pc)(itmp3) -#else - lis itmp3,L_jumptable_int@ha - addi itmp3,itmp3,L_jumptable_int@l -#endif - - slwi t2,t6,2 /* multiple of 4-bytes */ - add itmp3,itmp3,t2 /* calculate address of jumptable */ - lwz itmp3,0(itmp3) /* load function address */ - mtctr itmp3 - addi t0,t0,1 /* integer argument counter */ - addi t6,t6,1 /* integer argument register counter */ -#if defined(__DARWIN__) - addi t7,t7,1 /* stack slot counter */ -#endif - bctr - -L_register_handle_long: -#if defined(__DARWIN__) - addis itmp3,t3,ha16(L_jumptable_long - L_asm_vm_call_method_get_pc) - la itmp3,lo16(L_jumptable_long - L_asm_vm_call_method_get_pc)(itmp3) -#else - lis itmp3,L_jumptable_long@ha - addi itmp3,itmp3,L_jumptable_long@l -#endif -#if !defined(__DARWIN__) - addi t6,t6,1 /* align to even numbers */ - andi. t6,t6,0xfffe -#endif - - cmpwi t6,(INT_ARG_CNT - 1) /* are we out of integer argument */ - blt L_register_handle_long_continue /* registers? */ - - li t6,INT_ARG_CNT /* yes, set integer argument register */ - b L_register_copy /* count to max and next loop */ - -L_register_handle_long_continue: - slwi t2,t6,2 /* multiple of 4-bytes */ - add itmp3,itmp3,t2 /* calculate address of jumptable */ - lwz itmp3,0(itmp3) /* load function address */ - mtctr itmp3 - addi t0,t0,1 /* integer argument counter */ - addi t6,t6,2 /* integer argument register counter */ -#if defined(__DARWIN__) - addi t7,t7,2 /* stack slot counter */ -#endif - bctr - -L_register_handle_float: - cmpwi t1,FLT_ARG_CNT /* are we out of float argument */ - beq L_register_copy /* registers? yes, next loop */ - - andi. r0,itmp3,0x0001 /* is this a 2-word type? */ - bne L_register_handle_double + mr. t1,t1 + beq L_asm_vm_call_method_stack_copy_done -#if defined(__DARWIN__) - addis itmp3,t3,ha16(L_jumptable_float - L_asm_vm_call_method_get_pc) - la itmp3,lo16(L_jumptable_float - L_asm_vm_call_method_get_pc)(itmp3) -#else - lis itmp3,L_jumptable_float@ha - addi itmp3,itmp3,L_jumptable_float@l -#endif + slwi t2,t1,3 /* calculate stackframe size (* 8) */ - slwi t2,t1,2 /* multiple of 4-bytes */ - add itmp3,itmp3,t2 /* calculate address of jumptable */ - lwz itmp3,0(itmp3) /* load function address */ - mtctr itmp3 - addi t1,t1,1 /* float argument counter */ -#if defined(__DARWIN__) - addi t7,t7,1 /* stack slot counter */ - addi t6,t6,1 /* skip 1 integer argument register */ -#endif - bctr + sub sp,sp,t2 /* create stackframe */ + mr t2,sp /* temporary stack pointer */ -L_register_handle_double: +L_asm_vm_call_method_stack_copy_loop: #if defined(__DARWIN__) - addis itmp3,t3,ha16(L_jumptable_double - L_asm_vm_call_method_get_pc) - la itmp3,lo16(L_jumptable_double - L_asm_vm_call_method_get_pc)(itmp3) + lwz t3,21*8+0(t0) /* load argument */ + lwz t4,21*8+4(t0) #else - lis itmp3,L_jumptable_double@ha - addi itmp3,itmp3,L_jumptable_double@l -#endif - - slwi t2,t1,2 /* multiple of 4-bytes */ - add itmp3,itmp3,t2 /* calculate address of jumptable */ - lwz itmp3,0(itmp3) /* load function address */ - mtctr itmp3 - addi t1,t1,1 /* float argument counter */ -#if defined(__DARWIN__) - addi t7,t7,2 /* stack slot counter */ - addi t6,t6,2 /* skip 2 integer argument registers */ -#endif - bctr - -L_register_copy_done: - /* calculate remaining arguments */ - sub itmp3,t4,t0 /* - integer arguments in registers */ - sub itmp3,itmp3,t1 /* - float arguments in registers */ - mr. itmp3,itmp3 - beq L_stack_copy_done - - mr itmp2,t4 /* restore argument count */ - mr itmp1,t5 /* restore argument block pointer */ - - slwi t4,itmp3,3 /* XXX use 8-bytes slots for now */ - addi t4,t4,LA_SIZE /* add size of linkage area */ - -#if defined(__DARWIN__) - slwi t5,t7,2 /* add stack space for arguments */ - add t4,t4,t5 -#endif - - sub sp,sp,t4 - - mr t6,sp /* use t6 as temporary sp */ - addi t6,t6,LA_SIZE /* skip linkage area */ -#if defined(__DARWIN__) - add t6,t6,t5 /* skip stack space for arguments */ + lwz t3,16*8+0(t0) /* load argument */ + lwz t4,16*8+4(t0) #endif + stw t3,0(t2) /* store argument on stack */ + stw t4,4(t2) - addi itmp1,itmp1,-sizevmarg /* initialize pointer (smaller code) */ - addi itmp2,itmp2,1 /* initialize argument count */ - -L_stack_copy_loop: - addi itmp1,itmp1,sizevmarg /* goto next argument block */ - addi itmp2,itmp2,-1 /* argument count - 1 */ - mr. itmp2,itmp2 - beq L_stack_copy_done - - lwz itmp3,offvmargtype+4(itmp1) - andi. r0,itmp3,0x0002 /* is this a float/double type? */ - bne L_stack_handle_float - -L_stack_handle_int: - addi t0,t0,-1 /* arguments assigned to registers */ - mr. t0,t0 - bge L_stack_copy_loop - - andi. r0,itmp3,0x0001 /* is this a 2-word type? */ - bne L_stack_handle_long - - lwz itmp3,offvmargdata+4(itmp1) /* get integer argument */ - stw itmp3,0(t6) /* and store it on the stack */ - addi t6,t6,4 /* increase temporary sp by 1 slot */ - b L_stack_copy_loop - -L_stack_handle_long: -#if !defined(__DARWIN__) - addi t6,t6,4 /* align stack to 8-bytes */ - rlwinm t6,t6,0,30,28 /* clear lower 4-bits */ -#endif - - lwz itmp3,offvmargdata+0(itmp1) /* get long argument */ - stw itmp3,0(t6) /* and store it on the stack */ - lwz itmp3,offvmargdata+4(itmp1) - stw itmp3,4(t6) - addi t6,t6,8 /* increase temporary sp by 2 slots */ - b L_stack_copy_loop - -L_stack_handle_float: - addi t1,t1,-1 /* arguments assigned to registers */ + addi t0,t0,8 /* load address of next argument */ + addi t2,t2,8 /* increase stack pointer */ + addi t1,t1,-1 /* subtract 1 argument */ mr. t1,t1 - bge L_stack_copy_loop - - andi. r0,itmp3,0x0001 /* is this a 2-word type? */ - bne L_stack_handle_double - - lfs ftmp3,offvmargdata(itmp1) /* get float argument */ - stfs ftmp3,0(t6) /* and store it on the stack */ - addi t6,t6,4 /* increase temporary sp by 1 slot */ - b L_stack_copy_loop - -L_stack_handle_double: -#if !defined(__DARWIN__) - addi t6,t6,4 /* align stack to 8-bytes */ - rlwinm t6,t6,0,30,28 /* clear lower 4-bits */ -#endif - - lfd ftmp3,offvmargdata(itmp1) /* get double argument */ - stfd ftmp3,0(t6) /* and store it on the stack */ - addi t6,t6,8 /* increase temporary sp by 2 slots */ - b L_stack_copy_loop - -L_stack_copy_done: - lwz itmp1,9*4(s0) /* pass method pointer via tmp1 */ - -#if defined(__DARWIN__) - addis mptr,t3,ha16(L_asm_call_jit_compiler - L_asm_vm_call_method_get_pc) - la mptr,lo16(L_asm_call_jit_compiler - L_asm_vm_call_method_get_pc)(mptr) -#else - lis mptr,L_asm_call_jit_compiler@ha - addi mptr,mptr,L_asm_call_jit_compiler@l -#endif - stw mptr,7*4(s0) - addi mptr,s0,7*4 + bgt L_asm_vm_call_method_stack_copy_loop - lwz pv,0*4(mptr) +L_asm_vm_call_method_stack_copy_done: + addi mptr,s0,9*4 /* get address of PV */ + lwz pv,0*4(mptr) /* load PV */ mtctr pv bctrl 1: @@ -398,17 +208,17 @@ L_stack_copy_done: #endif L_asm_vm_call_method_return: - mr sp,s0 /* restore the function's sp */ + mr sp,s0 /* restore the SP */ - lwz s0,8*4(sp) /* restore used callee saved registers */ + lwz s0,8*4(sp) /* restore used callee saved registers*/ #if defined(__DARWIN__) - lwz itmp1,10*4(sp) /* register r11 is callee saved */ + lwz itmp1,10*4(sp) /* register r11 is callee saved */ #endif - lwz pv,11*4(sp) /* save PV register */ + lwz pv,11*4(sp) /* save PV register */ lwz itmp3,12*4(sp) - lfd ftmp1,14*4(sp) /* registers f14-f31 are callee saved */ + lfd ftmp1,14*4(sp) /* registers f14-f31 are callee saved */ lfd ftmp2,16*4(sp) #if defined(__DARWIN__) @@ -440,247 +250,8 @@ asm_vm_call_method_exception_handler: bl builtin_throw_exception b L_asm_vm_call_method_return - - .data - .align 2 - -L_jumptable_int: - .long L_handle_a0 - .long L_handle_a1 - .long L_handle_a2 - .long L_handle_a3 - .long L_handle_a4 - .long L_handle_a5 - .long L_handle_a6 - .long L_handle_a7 - - .text - .align 2 - -L_handle_a0: - lwz a0,offvmargdata+4(itmp1) - b L_register_copy -L_handle_a1: - lwz a1,offvmargdata+4(itmp1) - b L_register_copy -L_handle_a2: - lwz a2,offvmargdata+4(itmp1) - b L_register_copy -L_handle_a3: - lwz a3,offvmargdata+4(itmp1) - b L_register_copy -L_handle_a4: - lwz a4,offvmargdata+4(itmp1) - b L_register_copy -L_handle_a5: - lwz a5,offvmargdata+4(itmp1) - b L_register_copy -L_handle_a6: - lwz a6,offvmargdata+4(itmp1) - b L_register_copy -L_handle_a7: - lwz a7,offvmargdata+4(itmp1) - b L_register_copy - - - .data - .align 2 - -L_jumptable_long: -#if defined(__DARWIN__) - .long L_handle_a0_a1 - .long L_handle_a1_a2 - .long L_handle_a2_a3 - .long L_handle_a3_a4 - .long L_handle_a4_a5 - .long L_handle_a5_a6 - .long L_handle_a6_a7 -#else - /* we have two entries here, so we get the even argument register - alignment for linux */ - - .long L_handle_a0_a1 - .long 0 - .long L_handle_a2_a3 - .long 0 - .long L_handle_a4_a5 - .long 0 - .long L_handle_a6_a7 -#endif - - .text - .align 2 - -L_handle_a0_a1: - lwz a0,offvmargdata+0(itmp1) - lwz a1,offvmargdata+4(itmp1) - b L_register_copy -#if defined(__DARWIN__) -L_handle_a1_a2: - lwz a1,offvmargdata+0(itmp1) - lwz a2,offvmargdata+4(itmp1) - b L_register_copy -#endif -L_handle_a2_a3: - lwz a2,offvmargdata+0(itmp1) - lwz a3,offvmargdata+4(itmp1) - b L_register_copy -#if defined(__DARWIN__) -L_handle_a3_a4: - lwz a3,offvmargdata+0(itmp1) - lwz a4,offvmargdata+4(itmp1) - b L_register_copy -#endif -L_handle_a4_a5: - lwz a4,offvmargdata+0(itmp1) - lwz a5,offvmargdata+4(itmp1) - b L_register_copy -#if defined(__DARWIN__) -L_handle_a5_a6: - lwz a5,offvmargdata+0(itmp1) - lwz a6,offvmargdata+4(itmp1) - b L_register_copy -#endif -L_handle_a6_a7: - lwz a6,offvmargdata+0(itmp1) - lwz a7,offvmargdata+4(itmp1) - b L_register_copy - - - .data - .align 2 - -L_jumptable_float: - .long L_handle_fa0 - .long L_handle_fa1 - .long L_handle_fa2 - .long L_handle_fa3 - .long L_handle_fa4 - .long L_handle_fa5 - .long L_handle_fa6 - .long L_handle_fa7 - -#if defined(__DARWIN__) - .long L_handle_fa8 - .long L_handle_fa9 - .long L_handle_fa10 - .long L_handle_fa11 - .long L_handle_fa12 -#endif - - .text - .align 2 - -L_handle_fa0: - lfs fa0,offvmargdata(itmp1) - b L_register_copy -L_handle_fa1: - lfs fa1,offvmargdata(itmp1) - b L_register_copy -L_handle_fa2: - lfs fa2,offvmargdata(itmp1) - b L_register_copy -L_handle_fa3: - lfs fa3,offvmargdata(itmp1) - b L_register_copy -L_handle_fa4: - lfs fa4,offvmargdata(itmp1) - b L_register_copy -L_handle_fa5: - lfs fa5,offvmargdata(itmp1) - b L_register_copy -L_handle_fa6: - lfs fa6,offvmargdata(itmp1) - b L_register_copy -L_handle_fa7: - lfs fa7,offvmargdata(itmp1) - b L_register_copy - -#if defined(__DARWIN__) -L_handle_fa8: - lfs fa8,offvmargdata(itmp1) - b L_register_copy -L_handle_fa9: - lfs fa9,offvmargdata(itmp1) - b L_register_copy -L_handle_fa10: - lfs fa10,offvmargdata(itmp1) - b L_register_copy -L_handle_fa11: - lfs fa11,offvmargdata(itmp1) - b L_register_copy -L_handle_fa12: - lfs fa12,offvmargdata(itmp1) - b L_register_copy -#endif - - - .data - .align 2 - -L_jumptable_double: - .long L_handle_fda0 - .long L_handle_fda1 - .long L_handle_fda2 - .long L_handle_fda3 - .long L_handle_fda4 - .long L_handle_fda5 - .long L_handle_fda6 - .long L_handle_fda7 - -#if defined(__DARWIN__) - .long L_handle_fda8 - .long L_handle_fda9 - .long L_handle_fda10 - .long L_handle_fda11 - .long L_handle_fda12 -#endif - - .text - .align 2 - -L_handle_fda0: - lfd fa0,offvmargdata(itmp1) - b L_register_copy -L_handle_fda1: - lfd fa1,offvmargdata(itmp1) - b L_register_copy -L_handle_fda2: - lfd fa2,offvmargdata(itmp1) - b L_register_copy -L_handle_fda3: - lfd fa3,offvmargdata(itmp1) - b L_register_copy -L_handle_fda4: - lfd fa4,offvmargdata(itmp1) - b L_register_copy -L_handle_fda5: - lfd fa5,offvmargdata(itmp1) - b L_register_copy -L_handle_fda6: - lfd fa6,offvmargdata(itmp1) - b L_register_copy -L_handle_fda7: - lfd fa7,offvmargdata(itmp1) - b L_register_copy - -#if defined(__DARWIN__) -L_handle_fda8: - lfd fa8,offvmargdata(itmp1) - b L_register_copy -L_handle_fda9: - lfd fa9,offvmargdata(itmp1) - b L_register_copy -L_handle_fda10: - lfd fa10,offvmargdata(itmp1) - b L_register_copy -L_handle_fda11: - lfd fa11,offvmargdata(itmp1) - b L_register_copy -L_handle_fda12: - lfd fa12,offvmargdata(itmp1) - b L_register_copy -#endif +asm_vm_call_method_end: + nop /* asm_call_jit_compiler ******************************************************* @@ -909,16 +480,16 @@ ex_int1: mtctr t3 bctr - lwz s0,-10*4(t1) - lwz s1,-9*4(t1) - lwz s2,-8*4(t1) - lwz s3,-7*4(t1) - lwz s4,-6*4(t1) - lwz s5,-5*4(t1) - lwz s6,-4*4(t1) - lwz s7,-3*4(t1) - lwz s8,-2*4(t1) - lwz s9,-1*4(t1) + lwz s0,-10*8(t1) + lwz s1,-9*8(t1) + lwz s2,-8*8(t1) + lwz s3,-7*8(t1) + lwz s4,-6*8(t1) + lwz s5,-5*8(t1) + lwz s6,-4*8(t1) + lwz s7,-3*8(t1) + lwz s8,-2*8(t1) + lwz s9,-1*8(t1) ex_int2: subf t1,t2,t1 /* t1 = t1 - register count * 4 */ @@ -978,152 +549,7 @@ asm_abstractmethoderror: b L_asm_handle_nat_exception -/* asm_patcher_wrapper ********************************************************* - - XXX - - Stack layout: - 20 return address into JIT code (patch position) - 16 pointer to virtual java_objectheader - 12 machine code (which is patched back later) - 8 unresolved class/method/field reference - 4 data segment displacement from load instructions - 0 patcher function pointer to call (pv is saved here afterwards) - -*******************************************************************************/ - -asm_patcher_wrapper: - mflr r0 /* get Java return address (leaf) */ - stw r0,6*4(sp) /* store it in the stub stackframe */ - /* keep stack 16-bytes aligned: 6+1+37 = 44 */ - stwu sp,-(LA_SIZE+(5+58)*4)(sp) - -#if defined(__DARWIN__) - stw a0,LA_SIZE+(5+0)*4(sp) /* save argument registers */ - stw a1,LA_SIZE+(5+1)*4(sp) /* preserve linkage area (24 bytes) */ - stw a2,LA_SIZE+(5+2)*4(sp) /* and 4 bytes for 4 argument */ - stw a3,LA_SIZE+(5+3)*4(sp) - stw a4,LA_SIZE+(5+4)*4(sp) - stw a5,LA_SIZE+(5+5)*4(sp) - stw a6,LA_SIZE+(5+6)*4(sp) - stw a7,LA_SIZE+(5+7)*4(sp) - - stfd fa0,LA_SIZE+(5+8)*4(sp) - stfd fa1,LA_SIZE+(5+10)*4(sp) - stfd fa2,LA_SIZE+(5+12)*4(sp) - stfd fa3,LA_SIZE+(5+14)*4(sp) - stfd fa4,LA_SIZE+(5+16)*4(sp) - stfd fa5,LA_SIZE+(5+18)*4(sp) - stfd fa6,LA_SIZE+(5+20)*4(sp) - stfd fa7,LA_SIZE+(5+22)*4(sp) - stfd fa8,LA_SIZE+(5+24)*4(sp) - stfd fa9,LA_SIZE+(5+26)*4(sp) - stfd fa10,LA_SIZE+(5+28)*4(sp) - stfd fa11,LA_SIZE+(5+30)*4(sp) - stfd fa12,LA_SIZE+(5+32)*4(sp) - - stw t0,LA_SIZE+(5+33)*4(sp) - stw t1,LA_SIZE+(5+34)*4(sp) - stw t2,LA_SIZE+(5+35)*4(sp) - stw t3,LA_SIZE+(5+36)*4(sp) - stw t4,LA_SIZE+(5+37)*4(sp) - stw t5,LA_SIZE+(5+38)*4(sp) - stw t6,LA_SIZE+(5+39)*4(sp) - stw t7,LA_SIZE+(5+40)*4(sp) - - stfd ft0,LA_SIZE+(5+42)*4(sp) - stfd ft1,LA_SIZE+(5+44)*4(sp) - stfd ft2,LA_SIZE+(5+46)*4(sp) - stfd ft3,LA_SIZE+(5+48)*4(sp) - stfd ft4,LA_SIZE+(5+50)*4(sp) - stfd ft5,LA_SIZE+(5+52)*4(sp) -#else - /* save 8 int/8 float arguments */ - SAVE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS+1) - SAVE_TEMPORARY_REGISTERS(LA_SIZE_IN_POINTERS+1+24) -#endif - - stw itmp1,LA_SIZE+(5+54)*4(sp) - stw itmp2,LA_SIZE+(5+55)*4(sp) - stw pv,LA_SIZE+(5+56)*4(sp) - - addi a0,sp,LA_SIZE+(5+58)*4 /* pass SP of patcher stub */ - mr a1,pv /* pass PV */ - mr a2,r0 /* pass RA (correct for leafs) */ - bl patcher_wrapper - stw v0,LA_SIZE+(5+57)*4(sp) /* save return value */ - -#if defined(__DARWIN__) - lwz a0,LA_SIZE+(5+0)*4(sp) - lwz a1,LA_SIZE+(5+1)*4(sp) - lwz a2,LA_SIZE+(5+2)*4(sp) - lwz a3,LA_SIZE+(5+3)*4(sp) - lwz a4,LA_SIZE+(5+4)*4(sp) - lwz a5,LA_SIZE+(5+5)*4(sp) - lwz a6,LA_SIZE+(5+6)*4(sp) - lwz a7,LA_SIZE+(5+7)*4(sp) - - lfd fa0,LA_SIZE+(5+8)*4(sp) - lfd fa1,LA_SIZE+(5+10)*4(sp) - lfd fa2,LA_SIZE+(5+12)*4(sp) - lfd fa3,LA_SIZE+(5+14)*4(sp) - lfd fa4,LA_SIZE+(5+16)*4(sp) - lfd fa5,LA_SIZE+(5+18)*4(sp) - lfd fa6,LA_SIZE+(5+20)*4(sp) - lfd fa7,LA_SIZE+(5+22)*4(sp) - lfd fa8,LA_SIZE+(5+24)*4(sp) - lfd fa9,LA_SIZE+(5+26)*4(sp) - lfd fa10,LA_SIZE+(5+28)*4(sp) - lfd fa11,LA_SIZE+(5+30)*4(sp) - lfd fa12,LA_SIZE+(5+32)*4(sp) - - lwz t0,LA_SIZE+(5+33)*4(sp) - lwz t1,LA_SIZE+(5+34)*4(sp) - lwz t2,LA_SIZE+(5+35)*4(sp) - lwz t3,LA_SIZE+(5+36)*4(sp) - lwz t4,LA_SIZE+(5+37)*4(sp) - lwz t5,LA_SIZE+(5+38)*4(sp) - lwz t6,LA_SIZE+(5+39)*4(sp) - lwz t7,LA_SIZE+(5+40)*4(sp) - - lfd ft0,LA_SIZE+(5+42)*4(sp) - lfd ft1,LA_SIZE+(5+44)*4(sp) - lfd ft2,LA_SIZE+(5+46)*4(sp) - lfd ft3,LA_SIZE+(5+48)*4(sp) - lfd ft4,LA_SIZE+(5+50)*4(sp) - lfd ft5,LA_SIZE+(5+52)*4(sp) -#else - /* restore 8 int/8 float arguments */ - RESTORE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS+1) - RESTORE_TEMPORARY_REGISTERS(LA_SIZE_IN_POINTERS+1+24) -#endif - - lwz itmp1,LA_SIZE+(5+54)*4(sp) - lwz itmp2,LA_SIZE+(5+55)*4(sp) - lwz pv,LA_SIZE+(5+56)*4(sp) - lwz itmp3,LA_SIZE+(5+57)*4(sp) /* restore return value into temp reg.*/ - - lwz r0,6*4+LA_SIZE+(5+58)*4(sp) /* restore RA */ - mtlr r0 - - mr. itmp3,itmp3 /* check for an exception */ - bne L_asm_patcher_wrapper_exception - - /* get return address (into JIT code) */ - lwz itmp3,5*4+LA_SIZE+(5+58)*4(sp) - - /* remove stack frame + patcher stub stack */ - addi sp,sp,8*4+LA_SIZE+(5+58)*4 - - mtctr itmp3 - bctr /* jump to new patched code */ - -L_asm_patcher_wrapper_exception: - mr xptr,itmp3 /* get exception */ - lwz xpc,5*4+LA_SIZE+(5+58)*4(sp) - addi sp,sp,8*4+LA_SIZE+(5+58)*4 - b L_asm_handle_exception - +#if defined(ENABLE_REPLACEMENT) /* asm_replacement_out ********************************************************* @@ -1146,46 +572,48 @@ L_asm_patcher_wrapper_exception: /* XXX we should find a cleaner solution here */ #define REPLACEMENT_ROOM 512 +#define sizeexecutionstate_ALIGNED ((sizeexecutionstate + 15) & ~15) + asm_replacement_out: /* create stack frame */ - addi sp,sp,-(sizeexecutionstate + REPLACEMENT_ROOM) /* XXX align */ + addi sp,sp,-(sizeexecutionstate_ALIGNED + REPLACEMENT_ROOM) /* save link register */ mflr itmp3 /* save registers in execution state */ - stw r0 ,( 0*8+offes_intregs)(sp) - stw r1 ,( 1*8+offes_intregs)(sp) - stw r2 ,( 2*8+offes_intregs)(sp) - stw r3 ,( 3*8+offes_intregs)(sp) - stw r4 ,( 4*8+offes_intregs)(sp) - stw r5 ,( 5*8+offes_intregs)(sp) - stw r6 ,( 6*8+offes_intregs)(sp) - stw r7 ,( 7*8+offes_intregs)(sp) - stw r8 ,( 8*8+offes_intregs)(sp) - stw r9 ,( 9*8+offes_intregs)(sp) - stw r10,(10*8+offes_intregs)(sp) - stw r11,(11*8+offes_intregs)(sp) - stw r12,(12*8+offes_intregs)(sp) - stw r13,(13*8+offes_intregs)(sp) - stw r14,(14*8+offes_intregs)(sp) - stw r15,(15*8+offes_intregs)(sp) - stw r16,(16*8+offes_intregs)(sp) /* link register */ - stw r17,(17*8+offes_intregs)(sp) - stw r18,(18*8+offes_intregs)(sp) - stw r19,(19*8+offes_intregs)(sp) - stw r20,(20*8+offes_intregs)(sp) - stw r21,(21*8+offes_intregs)(sp) - stw r22,(22*8+offes_intregs)(sp) - stw r23,(23*8+offes_intregs)(sp) - stw r24,(24*8+offes_intregs)(sp) - stw r25,(25*8+offes_intregs)(sp) - stw r26,(26*8+offes_intregs)(sp) - stw r27,(27*8+offes_intregs)(sp) - stw r28,(28*8+offes_intregs)(sp) - stw r29,(29*8+offes_intregs)(sp) - stw r30,(30*8+offes_intregs)(sp) - stw r31,(31*8+offes_intregs)(sp) + stw r0 ,( 0*4+offes_intregs)(sp) + stw r1 ,( 1*4+offes_intregs)(sp) + stw r2 ,( 2*4+offes_intregs)(sp) + stw r3 ,( 3*4+offes_intregs)(sp) + stw r4 ,( 4*4+offes_intregs)(sp) + stw r5 ,( 5*4+offes_intregs)(sp) + stw r6 ,( 6*4+offes_intregs)(sp) + stw r7 ,( 7*4+offes_intregs)(sp) + stw r8 ,( 8*4+offes_intregs)(sp) + stw r9 ,( 9*4+offes_intregs)(sp) + stw r10,(10*4+offes_intregs)(sp) + stw r11,(11*4+offes_intregs)(sp) + stw r12,(12*4+offes_intregs)(sp) + stw r13,(13*4+offes_intregs)(sp) + stw r14,(14*4+offes_intregs)(sp) + stw r15,(15*4+offes_intregs)(sp) + stw r16,(16*4+offes_intregs)(sp) /* link register stored as itmp3 */ + stw r17,(17*4+offes_intregs)(sp) + stw r18,(18*4+offes_intregs)(sp) + stw r19,(19*4+offes_intregs)(sp) + stw r20,(20*4+offes_intregs)(sp) + stw r21,(21*4+offes_intregs)(sp) + stw r22,(22*4+offes_intregs)(sp) + stw r23,(23*4+offes_intregs)(sp) + stw r24,(24*4+offes_intregs)(sp) + stw r25,(25*4+offes_intregs)(sp) + stw r26,(26*4+offes_intregs)(sp) + stw r27,(27*4+offes_intregs)(sp) + stw r28,(28*4+offes_intregs)(sp) + stw r29,(29*4+offes_intregs)(sp) + stw r30,(30*4+offes_intregs)(sp) + stw r31,(31*4+offes_intregs)(sp) stfd fr0 ,( 0*8+offes_fltregs)(sp) stfd fr1 ,( 1*8+offes_fltregs)(sp) @@ -1221,7 +649,7 @@ asm_replacement_out: stfd fr31,(31*8+offes_fltregs)(sp) /* calculate sp of method */ - addi itmp1,sp,(sizeexecutionstate + REPLACEMENT_ROOM + 4*4) + addi itmp1,sp,(sizeexecutionstate_ALIGNED + REPLACEMENT_ROOM + 4*4) stw itmp1,(offes_sp)(sp) /* store pv */ @@ -1243,83 +671,110 @@ asm_replacement_out: NOTE: itmp3 is not restored! C prototype: - void asm_replacement_in(executionstate *es); + void asm_replacement_in(executionstate *es, replace_safestack_t *st); *******************************************************************************/ asm_replacement_in: - /* a0 == executionstate *es */ + /* a0 == executionstate *es */ + /* a1 == replace_safestack_t *st */ + + /* get arguments */ + mr s1,a1 /* replace_safestack_t *st */ + mr s2,a0 /* executionstate *es == safe stack */ - /* set new sp and pv */ - lwz sp,(offes_sp)(a0) - lwz pv,(offes_pv)(a0) + /* switch to the safe stack */ + mr sp,s2 + + /* reserve linkage area */ + addi sp,sp,-(LA_SIZE_ALIGNED) + + /* call replace_build_execution_state(st) */ + mr a0,s1 + bl replace_build_execution_state + + /* set new sp */ + lwz sp,(offes_sp)(s2) + + /* build stack frame */ + addi sp,sp,-(sizeexecutionstate_ALIGNED) + + /* call replace_free_safestack(st,& of allocated executionstate_t) */ + mr a1,sp /* tmpes */ + mr a0,s1 /* st */ + addi sp,sp,-(LA_SIZE_ALIGNED) /* reserve linkage area */ + bl replace_free_safestack + addi sp,sp,+(LA_SIZE_ALIGNED) /* tear down linkage area */ + + /* set new pv */ + lwz pv,(offes_pv)(sp) /* copy registers from execution state */ - lwz r0 ,( 0*8+offes_intregs)(a0) + lwz r0 ,( 0*4+offes_intregs)(sp) /* r1 is sp */ /* r2 is reserved */ - /* a0 is loaded below */ - lwz r4 ,( 4*8+offes_intregs)(a0) - lwz r5 ,( 5*8+offes_intregs)(a0) - lwz r6 ,( 6*8+offes_intregs)(a0) - lwz r7 ,( 7*8+offes_intregs)(a0) - lwz r8 ,( 8*8+offes_intregs)(a0) - lwz r9 ,( 9*8+offes_intregs)(a0) - lwz r10,(10*8+offes_intregs)(a0) - lwz r11,(11*8+offes_intregs)(a0) - lwz r12,(12*8+offes_intregs)(a0) + lwz a0 ,( 3*4+offes_intregs)(sp) + lwz r4 ,( 4*4+offes_intregs)(sp) + lwz r5 ,( 5*4+offes_intregs)(sp) + lwz r6 ,( 6*4+offes_intregs)(sp) + lwz r7 ,( 7*4+offes_intregs)(sp) + lwz r8 ,( 8*4+offes_intregs)(sp) + lwz r9 ,( 9*4+offes_intregs)(sp) + lwz r10,(10*4+offes_intregs)(sp) + lwz r11,(11*4+offes_intregs)(sp) + lwz r12,(12*4+offes_intregs)(sp) /* r13 is pv */ - lwz r14,(14*8+offes_intregs)(a0) - lwz r15,(15*8+offes_intregs)(a0) - lwz r16,(16*8+offes_intregs)(a0) /* link register */ - lwz r17,(17*8+offes_intregs)(a0) - lwz r18,(18*8+offes_intregs)(a0) - lwz r19,(19*8+offes_intregs)(a0) - lwz r20,(20*8+offes_intregs)(a0) - lwz r21,(21*8+offes_intregs)(a0) - lwz r22,(22*8+offes_intregs)(a0) - lwz r23,(23*8+offes_intregs)(a0) - lwz r24,(24*8+offes_intregs)(a0) - lwz r25,(25*8+offes_intregs)(a0) - lwz r26,(26*8+offes_intregs)(a0) - lwz r27,(27*8+offes_intregs)(a0) - lwz r28,(28*8+offes_intregs)(a0) - lwz r29,(29*8+offes_intregs)(a0) - lwz r30,(30*8+offes_intregs)(a0) - lwz r31,(31*8+offes_intregs)(a0) + lwz r14,(14*4+offes_intregs)(sp) + lwz r15,(15*4+offes_intregs)(sp) + lwz r16,(16*4+offes_intregs)(sp) /* itmp3, later to link register */ + lwz r17,(17*4+offes_intregs)(sp) + lwz r18,(18*4+offes_intregs)(sp) + lwz r19,(19*4+offes_intregs)(sp) + lwz r20,(20*4+offes_intregs)(sp) + lwz r21,(21*4+offes_intregs)(sp) + lwz r22,(22*4+offes_intregs)(sp) + lwz r23,(23*4+offes_intregs)(sp) + lwz r24,(24*4+offes_intregs)(sp) + lwz r25,(25*4+offes_intregs)(sp) + lwz r26,(26*4+offes_intregs)(sp) + lwz r27,(27*4+offes_intregs)(sp) + lwz r28,(28*4+offes_intregs)(sp) + lwz r29,(29*4+offes_intregs)(sp) + lwz r30,(30*4+offes_intregs)(sp) + lwz r31,(31*4+offes_intregs)(sp) - lfd fr0 ,( 0*8+offes_fltregs)(a0) - lfd fr1 ,( 1*8+offes_fltregs)(a0) - lfd fr2 ,( 2*8+offes_fltregs)(a0) - lfd fr3 ,( 3*8+offes_fltregs)(a0) - lfd fr4 ,( 4*8+offes_fltregs)(a0) - lfd fr5 ,( 5*8+offes_fltregs)(a0) - lfd fr6 ,( 6*8+offes_fltregs)(a0) - lfd fr7 ,( 7*8+offes_fltregs)(a0) - lfd fr8 ,( 8*8+offes_fltregs)(a0) - lfd fr9 ,( 9*8+offes_fltregs)(a0) - lfd fr10,(10*8+offes_fltregs)(a0) - lfd fr11,(11*8+offes_fltregs)(a0) - lfd fr12,(12*8+offes_fltregs)(a0) - lfd fr13,(13*8+offes_fltregs)(a0) - lfd fr14,(14*8+offes_fltregs)(a0) - lfd fr15,(15*8+offes_fltregs)(a0) - lfd fr16,(16*8+offes_fltregs)(a0) - lfd fr17,(17*8+offes_fltregs)(a0) - lfd fr18,(18*8+offes_fltregs)(a0) - lfd fr19,(19*8+offes_fltregs)(a0) - lfd fr20,(20*8+offes_fltregs)(a0) - lfd fr21,(21*8+offes_fltregs)(a0) - lfd fr22,(22*8+offes_fltregs)(a0) - lfd fr23,(23*8+offes_fltregs)(a0) - lfd fr24,(24*8+offes_fltregs)(a0) - lfd fr25,(25*8+offes_fltregs)(a0) - lfd fr26,(26*8+offes_fltregs)(a0) - lfd fr27,(27*8+offes_fltregs)(a0) - lfd fr28,(28*8+offes_fltregs)(a0) - lfd fr29,(29*8+offes_fltregs)(a0) - lfd fr30,(30*8+offes_fltregs)(a0) - lfd fr31,(31*8+offes_fltregs)(a0) + lfd fr0 ,( 0*8+offes_fltregs)(sp) + lfd fr1 ,( 1*8+offes_fltregs)(sp) + lfd fr2 ,( 2*8+offes_fltregs)(sp) + lfd fr3 ,( 3*8+offes_fltregs)(sp) + lfd fr4 ,( 4*8+offes_fltregs)(sp) + lfd fr5 ,( 5*8+offes_fltregs)(sp) + lfd fr6 ,( 6*8+offes_fltregs)(sp) + lfd fr7 ,( 7*8+offes_fltregs)(sp) + lfd fr8 ,( 8*8+offes_fltregs)(sp) + lfd fr9 ,( 9*8+offes_fltregs)(sp) + lfd fr10,(10*8+offes_fltregs)(sp) + lfd fr11,(11*8+offes_fltregs)(sp) + lfd fr12,(12*8+offes_fltregs)(sp) + lfd fr13,(13*8+offes_fltregs)(sp) + lfd fr14,(14*8+offes_fltregs)(sp) + lfd fr15,(15*8+offes_fltregs)(sp) + lfd fr16,(16*8+offes_fltregs)(sp) + lfd fr17,(17*8+offes_fltregs)(sp) + lfd fr18,(18*8+offes_fltregs)(sp) + lfd fr19,(19*8+offes_fltregs)(sp) + lfd fr20,(20*8+offes_fltregs)(sp) + lfd fr21,(21*8+offes_fltregs)(sp) + lfd fr22,(22*8+offes_fltregs)(sp) + lfd fr23,(23*8+offes_fltregs)(sp) + lfd fr24,(24*8+offes_fltregs)(sp) + lfd fr25,(25*8+offes_fltregs)(sp) + lfd fr26,(26*8+offes_fltregs)(sp) + lfd fr27,(27*8+offes_fltregs)(sp) + lfd fr28,(28*8+offes_fltregs)(sp) + lfd fr29,(29*8+offes_fltregs)(sp) + lfd fr30,(30*8+offes_fltregs)(sp) + lfd fr31,(31*8+offes_fltregs)(sp) /* restore link register */ @@ -1327,17 +782,19 @@ asm_replacement_in: /* load new pc */ - lwz itmp3,offes_pc(a0) + lwz itmp3,offes_pc(sp) - /* load a0 */ - - lwz a0,(3*8+offes_intregs)(a0) + /* remove stack frame */ + + addi sp,sp,+(sizeexecutionstate_ALIGNED) /* jump to new code */ mtctr itmp3 bctr +#endif /* defined(ENABLE_REPLACEMENT) */ + /*********************************************************************/ asm_cacheflush: @@ -1366,27 +823,34 @@ asm_cacheflush: blr -asm_getclassvalues_atomic: -_crit_restart: -_crit_begin: - lwz a3,offbaseval(a0) - lwz a4,offdiffval(a0) - lwz a5,offbaseval(a1) -_crit_end: - stw a3,offcast_super_baseval(a2) - stw a4,offcast_super_diffval(a2) - stw a5,offcast_sub_baseval(a2) +/* asm_compare_and_swap ******************************************************** + + XXX + +*******************************************************************************/ + +asm_compare_and_swap: +1: + lwarx a6,0,a0 + subf. r0,a6,a1 + bne- 2f + or r0,a2,a2 + stwcx. r0,0,a0 + bne- 1b +2: + mr a0,a6 blr - .data -asm_criticalsections: -#if defined(ENABLE_THREADS) - .long _crit_begin - .long _crit_end - .long _crit_restart -#endif - .long 0 +/* asm_memory_barrier ********************************************************** + + XXX + +*******************************************************************************/ + +asm_memory_barrier: + sync + blr #if defined(__DARWIN__) @@ -1411,6 +875,26 @@ L_builtin_throw_exception$lazy_ptr: .long dyld_stub_binding_helper +.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 2 +L_md_codegen_get_pv_from_pc$stub: + .indirect_symbol _md_codegen_get_pv_from_pc + mflr r0 + bcl 20,31,L00$_md_codegen_get_pv_from_pc +L00$_md_codegen_get_pv_from_pc: + mflr r11 + addis r11,r11,ha16(L_md_codegen_get_pv_from_pc$lazy_ptr - L00$_md_codegen_get_pv_from_pc) + mtlr r0 + lwzu r12,lo16(L_md_codegen_get_pv_from_pc$lazy_ptr - L00$_md_codegen_get_pv_from_pc)(r11) + mtctr r12 + bctr +.data +.lazy_symbol_pointer +L_md_codegen_get_pv_from_pc$lazy_ptr: + .indirect_symbol _md_codegen_get_pv_from_pc + .long dyld_stub_binding_helper + + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 .align 2 L_exceptions_handle_exception$stub: @@ -1531,52 +1015,76 @@ L_exceptions_asm_new_abstractmethoderror$lazy_ptr: .long dyld_stub_binding_helper +# if defined(ENABLE_REPLACEMENT) + +.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 2 +L_replace_me$stub: + .indirect_symbol _replace_me + mflr r0 + bcl 20,31,L00$_replace_me +L00$_replace_me: + mflr r11 + addis r11,r11,ha16(L_replace_me$lazy_ptr - L00$_replace_me) + mtlr r0 + lwzu r12,lo16(L_replace_me$lazy_ptr - L00$_replace_me)(r11) + mtctr r12 + bctr +.data +.lazy_symbol_pointer +L_replace_me$lazy_ptr: + .indirect_symbol _replace_me + .long dyld_stub_binding_helper + + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 .align 2 -L_patcher_wrapper$stub: - .indirect_symbol _patcher_wrapper +L_replace_build_execution_state$stub: + .indirect_symbol _replace_build_execution_state mflr r0 - bcl 20,31,L00$_patcher_wrapper -L00$_patcher_wrapper: + bcl 20,31,L00$_replace_build_execution_state +L00$_replace_build_execution_state: mflr r11 - addis r11,r11,ha16(L_patcher_wrapper$lazy_ptr - L00$_patcher_wrapper) + addis r11,r11,ha16(L_replace_build_execution_state$lazy_ptr - L00$_replace_build_execution_state) mtlr r0 - lwzu r12,lo16(L_patcher_wrapper$lazy_ptr - L00$_patcher_wrapper)(r11) + lwzu r12,lo16(L_replace_build_execution_state$lazy_ptr - L00$_replace_build_execution_state)(r11) mtctr r12 bctr .data .lazy_symbol_pointer -L_patcher_wrapper$lazy_ptr: - .indirect_symbol _patcher_wrapper +L_replace_build_execution_state$lazy_ptr: + .indirect_symbol _replace_build_execution_state .long dyld_stub_binding_helper .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 .align 2 -L_replace_me$stub: - .indirect_symbol _replace_me +L_replace_free_safestack$stub: + .indirect_symbol _replace_free_safestack mflr r0 - bcl 20,31,L00$_replace_me -L00$_replace_me: + bcl 20,31,L00$_replace_free_safestack +L00$_replace_free_safestack: mflr r11 - addis r11,r11,ha16(L_replace_me$lazy_ptr - L00$_replace_me) + addis r11,r11,ha16(L_replace_free_safestack$lazy_ptr - L00$_replace_free_safestack) mtlr r0 - lwzu r12,lo16(L_replace_me$lazy_ptr - L00$_replace_me)(r11) + lwzu r12,lo16(L_replace_free_safestack$lazy_ptr - L00$_replace_free_safestack)(r11) mtctr r12 bctr .data .lazy_symbol_pointer -L_replace_me$lazy_ptr: - .indirect_symbol _replace_me +L_replace_free_safestack$lazy_ptr: + .indirect_symbol _replace_free_safestack .long dyld_stub_binding_helper +# endif /* ENABLE_REPLACEMENT */ + #endif /* defined(__DARWIN__) */ -/* Disable exec-stacks, required for Gentoo ***********************************/ +/* disable exec-stacks ********************************************************/ -#if defined(__GCC__) && defined(__ELF__) - .section .note.GNU-stack,"",@progbits +#if defined(__linux__) && defined(__ELF__) + .section .note.GNU-stack,"",%progbits #endif