-/* src/vm/jit/powerpc64/asmpart.S - Java-C interface functions for PowerPC
+/* src/vm/jit/powerpc64/asmpart.S - Java-C interface functions for PowerPC64
- Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+ Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
J. Wenninger, Institut f. Computersprachen - TU Wien
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
- Contact: cacao@cacaojvm.org
-
- Authors: Andreas Krall
- Reinhard Grafl
- Stefan Ring
-
- Changes: Christian Thalinger
- Edwin Steiner
-
- $Id: asmpart.S 5282 2006-08-28 19:31:37Z tbfg $
-
*/
#include "config.h"
+#define __ASSEMBLY__
+
#include "md-abi.h"
#include "md-asm.h"
#include "vm/jit/abi-asm.h"
#include "vm/jit/methodheader.h"
-#include "vm/jit/powerpc64/offsets.h"
-
-.section ".toc","aw"
-.section ".text"
-
- .align 2
/* export functions ***********************************************************/
-#ifdef ENABLE_LIBJVM
- .globl asm_vm_call_method
- .globl asm_vm_call_method_int
- .globl asm_vm_call_method_long
- .globl asm_vm_call_method_float
- .globl asm_vm_call_method_double
-#else
- .globl .asm_vm_call_method
- .globl .asm_vm_call_method_int
- .globl .asm_vm_call_method_long
- .globl .asm_vm_call_method_float
- .globl .asm_vm_call_method_double
-#endif
.globl asm_vm_call_method_exception_handler
+ .globl asm_vm_call_method_end
.globl asm_call_jit_compiler
.globl asm_patcher_wrapper
+#if defined(ENABLE_REPLACEMENT)
.globl asm_replacement_out
.globl .asm_replacement_in
+#endif
- .globl .asm_cacheflush /* no function descriptor needed, only called direct */
- .globl asm_criticalsections
- .globl .asm_getclassvalues_atomic
+ .globl asm_cacheflush
/* asm_vm_call_method **********************************************************
* void *arg1, void *arg2, void *arg3, void *arg4); *
* *
*******************************************************************************/
-
- .align 2
-
- .long 0 /* catch type all */
- .long 0 /* exception handler pc */
- .long 0 /* end pc */
- .long 0 /* start pc */
- .long 1 /* extable size */
- .long 0 /* line number table start */
- .long 0 /* line number table size */
- .long 0 /* fltsave */
- .long 0 /* intsave */
- .long 0 /* isleaf */
- .long 0 /* IsSync */
- .long 0 /* frame size */
- .long 0 /* codeinfo pointer */
+ /* this is the method header see src/vm/jit/methodheader.h */
+
+ .align 3
+
+ .quad 0 /* catch type all */
+ .quad 0 /* handler pc */
+ .quad 0 /* end pc */
+ .quad 0 /* start pc */
+ .long 1 /* extable size */
+ .long 0 /* ALIGNMENT PADDING */
+ .quad 0 /* line number table start */
+ .quad 0 /* line number table size */
+ .long 0 /* ALIGNMENT PADDING */
+ .long 0 /* fltsave */
+ .long 0 /* intsave */
+ .long 0 /* isleaf */
+ .long 0 /* IsSync */
+ .long 0 /* frame size */
+ .quad 0 /* codeinfo pointer */
#ifdef ENABLE_LIBJVM
+
+ .globl asm_vm_call_method
+ .globl asm_vm_call_method_int
+ .globl asm_vm_call_method_long
+ .globl asm_vm_call_method_float
+ .globl asm_vm_call_method_double
.section ".opd","aw"
.align 3
#else
asm_vm_call_method:
.globl asm_vm_call_method
+ asm_vm_call_method_int:
+ .globl asm_vm_call_method_int
+ asm_vm_call_method_long:
+ .globl asm_vm_call_method_long
+ asm_vm_call_method_float:
+ .globl asm_vm_call_method_float
+ asm_vm_call_method_double:
+ .globl asm_vm_call_method_double
#endif
.asm_vm_call_method:
std s0,8*8(sp) /* save used callee saved registers */
std a0,9*8(sp) /* save method pointer for compiler */
-#if defined(__DARWIN__)
- std itmp1,10*8(sp) /* register r11 is callee saved */
-#endif
std pv,11*8(sp) /* save PV register */
std itmp3,12*8(sp) /* registers r14-r31 are callee saved */
stfd ftmp1,13*8(sp) /* registers f14-f31 are callee saved */
stfd ftmp2,14*8(sp)
-#if defined(__DARWIN__)
- std t1,15*8(r1)
- std t2,16*8(r1)
- std t3,17*8(r1)
- std t4,18*8(r1)
- std t5,19*8(r1)
- std t6,20*8(r1)
- std t7,21*8(r1)
-
- stfd ft0,22*8(r1)
- stfd ft1,23*8(r1)
- stfd ft2,24*8(r1)
- stfd ft3,25*8(r1)
- stfd ft4,26*8(r1)
- stfd ft5,27*8(r1)
-#else
- SAVE_TEMPORARY_REGISTERS(15) /* the offset has to be even */
-#endif
-
- mr itmp2,a1 /* arg count */
- mr itmp1,a2 /* pointer to arg block */
- mr t4,itmp2 /* save argument count */
- mr t5,itmp1 /* save argument block pointer */
+ SAVE_TEMPORARY_REGISTERS(15)
+ mr s0, r1 /* save stack pointer */
- mr s0,sp /* save current sp to s0 */
-
- addi itmp1,itmp1,-sizevmarg /* initialize pointer (smaller code) */
- addi itmp2,itmp2,1 /* initialize argument count */
- li t0,0 /* initialize integer argument counter */
- li t1,0 /* initialize float argument counter */
- li t6,0 /* initialize integer register counter */
-#if defined(__DARWIN__)
- li t7,0 /* initialize stack slot counter */
-#endif
-
- mflr r0 /* save link register (PIC code) */
- bl L_asm_vm_call_method_get_pc
-L_asm_vm_call_method_get_pc:
- mflr t3 /* t3 contains the current pc */
- mtlr r0
+ /* a1 contains a pointer to a unit64_t structure filled with all INT_ARG_REG,
+ followed by ADR_ARG_CNT and FLT_ARG_CNT, afterwards what else needs to be copied onto
+ the stack
+ a2 contains the number of additional stack slots to be copied
+ */
L_register_copy:
- addi itmp1,itmp1,sizevmarg /* goto next argument block */
- addi itmp2,itmp2,-1 /* argument count - 1 */
- mr. itmp2,itmp2
- beq L_register_copy_done
-
- lwz itmp3,offvmargtype+4(itmp1)
- andi. r0,itmp3,0x0002 /* is this a float/double type? */
- bne L_register_handle_float
-
-L_register_handle_int:
- cmpwi t6,INT_ARG_CNT /* are we out of integer argument */
- beq L_register_copy /* registers? yes, next loop */
-
- andi. r0,itmp3,0x0001 /* is this a 2-word type? */
- bne L_register_handle_long
+ mr t1, a1
+ mr t2, a2
+
+ ld a0 , 0*8(t1)
+ ld a1 , 1*8(t1)
+ ld a2 , 2*8(t1)
+ ld a3 , 3*8(t1)
+ ld a4 , 4*8(t1)
+ ld a5 , 5*8(t1)
+ ld a6 , 6*8(t1)
+ ld a7 , 7*8(t1)
+
+ lfd fa0 , 8*8(t1)
+ lfd fa1 , 9*8(t1)
+ lfd fa2 ,10*8(t1)
+ lfd fa3 ,11*8(t1)
+ lfd fa4 ,12*8(t1)
+ lfd fa5 ,13*8(t1)
+ lfd fa6 ,14*8(t1)
+ lfd fa7 ,15*8(t1)
+ lfd fa8 ,16*8(t1)
+ lfd fa9 ,17*8(t1)
+ lfd fa10,18*8(t1)
+ lfd fa11,19*8(t1)
+ lfd fa12,20*8(t1)
+
+ mr. t2,t2
+ beq L_stack_copy_done
+
+L_stack_copy:
+ addi t1,t1,20*8 /* before first possible stack slot arg */
+ mr t3,t2 /* argument counter */
+ sldi t2,t2,8 /* calculate size of stack */
+ sub sp,sp,t2 /* increase the stack */
+ mr t2,sp /* t2 points to bottom of stack now */
-#if defined(__DARWIN__)
- addis itmp3,t3,ha16(L_jumptable_int - L_asm_vm_call_method_get_pc)
- la itmp3,lo16(L_jumptable_int - L_asm_vm_call_method_get_pc)(itmp3)
-#else
- lis itmp3,L_jumptable_int@highest /* load 64bit address */
- ori itmp3,itmp3,L_jumptable_int@higher
- rldicr itmp3,itmp3,32,31
- oris itmp3,itmp3,L_jumptable_int@h
- ori itmp3,itmp3,L_jumptable_int@l
-#endif
-
- slwi t2,t6,2 /* multiple of 4-bytes */
- add itmp3,itmp3,t2 /* calculate address of jumptable */
- ld itmp3,0(itmp3) /* load function address */
- mtctr itmp3
- addi t0,t0,1 /* integer argument counter */
- addi t6,t6,1 /* integer argument register counter */
-#if defined(__DARWIN__)
- addi t7,t7,1 /* stack slot counter */
-#endif
- bctr
-
-L_register_handle_long:
-#if defined(__DARWIN__)
- addis itmp3,t3,ha16(L_jumptable_long - L_asm_vm_call_method_get_pc)
- la itmp3,lo16(L_jumptable_long - L_asm_vm_call_method_get_pc)(itmp3)
-#else
- lis itmp3,L_jumptable_long@ha
- addi itmp3,itmp3,L_jumptable_long@l
-#endif
-#if !defined(__DARWIN__)
- addi t6,t6,1 /* align to even numbers */
- andi. t6,t6,0xfffe
-#endif
-
- cmpwi t6,(INT_ARG_CNT - 1) /* are we out of integer argument */
- blt L_register_handle_long_continue /* registers? */
-
- li t6,INT_ARG_CNT /* yes, set integer argument register */
- b L_register_copy /* count to max and next loop */
-
-L_register_handle_long_continue:
- slwi t2,t6,2 /* multiple of 4-bytes */
- add itmp3,itmp3,t2 /* calculate address of jumptable */
- lwz itmp3,0(itmp3) /* load function address */
- mtctr itmp3
- addi t0,t0,1 /* integer argument counter */
- addi t6,t6,2 /* integer argument register counter */
-#if defined(__DARWIN__)
- addi t7,t7,2 /* stack slot counter */
-#endif
- bctr
-
-L_register_handle_float:
- cmpwi t1,FLT_ARG_CNT /* are we out of float argument */
- beq L_register_copy /* registers? yes, next loop */
-
- andi. r0,itmp3,0x0001 /* is this a 2-word type? */
- bne L_register_handle_double
-
-#if defined(__DARWIN__)
- addis itmp3,t3,ha16(L_jumptable_float - L_asm_vm_call_method_get_pc)
- la itmp3,lo16(L_jumptable_float - L_asm_vm_call_method_get_pc)(itmp3)
-#else
- lis itmp3,L_jumptable_float@ha
- addi itmp3,itmp3,L_jumptable_float@l
-#endif
-
- slwi t2,t1,2 /* multiple of 4-bytes */
- add itmp3,itmp3,t2 /* calculate address of jumptable */
- lwz itmp3,0(itmp3) /* load function address */
- mtctr itmp3
- addi t1,t1,1 /* float argument counter */
-#if defined(__DARWIN__)
- addi t7,t7,1 /* stack slot counter */
- addi t6,t6,1 /* skip 1 integer argument register */
-#endif
- bctr
-
-L_register_handle_double:
-#if defined(__DARWIN__)
- addis itmp3,t3,ha16(L_jumptable_double - L_asm_vm_call_method_get_pc)
- la itmp3,lo16(L_jumptable_double - L_asm_vm_call_method_get_pc)(itmp3)
-#else
- lis itmp3,L_jumptable_double@ha
- addi itmp3,itmp3,L_jumptable_double@l
-#endif
-
- slwi t2,t1,2 /* multiple of 4-bytes */
- add itmp3,itmp3,t2 /* calculate address of jumptable */
- lwz itmp3,0(itmp3) /* load function address */
- mtctr itmp3
- addi t1,t1,1 /* float argument counter */
-#if defined(__DARWIN__)
- addi t7,t7,2 /* stack slot counter */
- addi t6,t6,2 /* skip 2 integer argument registers */
-#endif
- bctr
-
-L_register_copy_done:
- /* calculate remaining arguments */
- sub itmp3,t4,t0 /* - integer arguments in registers */
- sub itmp3,itmp3,t1 /* - float arguments in registers */
- mr. itmp3,itmp3
- beq L_stack_copy_done
-
- mr itmp2,t4 /* restore argument count */
- mr itmp1,t5 /* restore argument block pointer */
-
- slwi t4,itmp3,3 /* XXX use 8-bytes slots for now */
- addi t4,t4,LA_SIZE /* add size of linkage area */
-
-#if defined(__DARWIN__)
- slwi t5,t7,2 /* add stack space for arguments */
- add t4,t4,t5
-#endif
-
- sub sp,sp,t4
-
- mr t6,sp /* use t6 as temporary sp */
- addi t6,t6,LA_SIZE /* skip linkage area */
-#if defined(__DARWIN__)
- add t6,t6,t5 /* skip stack space for arguments */
-#endif
-
- addi itmp1,itmp1,-sizevmarg /* initialize pointer (smaller code) */
- addi itmp2,itmp2,1 /* initialize argument count */
-
L_stack_copy_loop:
- addi itmp1,itmp1,sizevmarg /* goto next argument block */
- addi itmp2,itmp2,-1 /* argument count - 1 */
- mr. itmp2,itmp2
- beq L_stack_copy_done
-
- lwz itmp3,offvmargtype+4(itmp1)
- andi. r0,itmp3,0x0002 /* is this a float/double type? */
- bne L_stack_handle_float
-
-L_stack_handle_int:
- addi t0,t0,-1 /* arguments assigned to registers */
- mr. t0,t0
- bge L_stack_copy_loop
-
- andi. r0,itmp3,0x0001 /* is this a 2-word type? */
- bne L_stack_handle_long
-
- lwz itmp3,offvmargdata+4(itmp1) /* get integer argument */
- stw itmp3,0(t6) /* and store it on the stack */
- addi t6,t6,4 /* increase temporary sp by 1 slot */
- b L_stack_copy_loop
-
-L_stack_handle_long:
-#if !defined(__DARWIN__)
- addi t6,t6,4 /* align stack to 8-bytes */
- rlwinm t6,t6,0,30,28 /* clear lower 4-bits */
-#endif
-
- lwz itmp3,offvmargdata+0(itmp1) /* get long argument */
- stw itmp3,0(t6) /* and store it on the stack */
- lwz itmp3,offvmargdata+4(itmp1)
- stw itmp3,4(t6)
- addi t6,t6,8 /* increase temporary sp by 2 slots */
- b L_stack_copy_loop
-
-L_stack_handle_float:
- addi t1,t1,-1 /* arguments assigned to registers */
- mr. t1,t1
- bge L_stack_copy_loop
-
- andi. r0,itmp3,0x0001 /* is this a 2-word type? */
- bne L_stack_handle_double
-
- lfs ftmp3,offvmargdata(itmp1) /* get float argument */
- stfs ftmp3,0(t6) /* and store it on the stack */
- addi t6,t6,4 /* increase temporary sp by 1 slot */
- b L_stack_copy_loop
-
-L_stack_handle_double:
-#if !defined(__DARWIN__)
- addi t6,t6,4 /* align stack to 8-bytes */
- rlwinm t6,t6,0,30,28 /* clear lower 4-bits */
-#endif
-
- lfd ftmp3,offvmargdata(itmp1) /* get double argument */
- stfd ftmp3,0(t6) /* and store it on the stack */
- addi t6,t6,8 /* increase temporary sp by 2 slots */
- b L_stack_copy_loop
+ addi t1,t1,8 /* next possible stack slot to copy */
+ mr. t3,t3 /* more stack slots to copy ? */
+ beq L_stack_copy_done
+ ld itmp3, 0(t1)
+ std itmp3, 0(t2)
+ addi t2,t2,8
+ addi t3,t3,-1
+ b L_stack_copy_loop
L_stack_copy_done:
- ld itmp1,9*8(s0) /* pass method pointer via tmp1 */
-
-#if defined(__DARWIN__)
- addis mptr,t3,ha16(L_asm_call_jit_compiler - L_asm_vm_call_method_get_pc)
- la mptr,lo16(L_asm_call_jit_compiler - L_asm_vm_call_method_get_pc)(mptr)
-#else
- lis mptr,L_asm_call_jit_compiler@highest /* load 64bit address */
- ori mptr,mptr,L_asm_call_jit_compiler@higher
- rldicr mptr,mptr,32,31
- oris mptr,mptr,L_asm_call_jit_compiler@h
- ori mptr,mptr,L_asm_call_jit_compiler@l
-#endif
- std mptr,7*8(s0)
- addi mptr,s0,7*8
-
- ld pv,0*8(mptr)
+ mr itmp1, s0 /* fake invokevirtual invocation */
+ addi itmp1, itmp1, 9*8 /* address of methods pv */
+ ld pv,0*8(itmp1)
mtctr pv
bctrl
1:
mflr itmp1
-#if defined(__DARWIN__)
- addi pv,itmp1,lo16(.asm_vm_call_method - 1b)
-#else
addi pv,itmp1,(.asm_vm_call_method - 1b)@l
-#endif
L_asm_vm_call_method_return:
mr sp,s0 /* restore the function's sp */
ld s0,8*8(sp) /* restore used callee saved registers */
-#if defined(__DARWIN__)
- lwz itmp1,10*4(sp) /* register r11 is callee saved */
-#endif
ld pv,11*8(sp) /* save PV register */
ld itmp3,12*8(sp)
- lfd ftmp1,14*8(sp) /* registers f14-f31 are callee saved */
- lfd ftmp2,16*8(sp)
-
-#if defined(__DARWIN__)
- lwz t1,18*4(r1)
- lwz t2,19*4(r1)
- lwz t3,20*4(r1)
- lwz t4,21*4(r1)
- lwz t5,22*4(r1)
- lwz t6,23*4(r1)
- lwz t7,24*4(r1)
-
- lfd ft0,26*4(r1)
- lfd ft1,28*4(r1)
- lfd ft2,30*4(r1)
- lfd ft3,32*4(r1)
- lfd ft4,34*4(r1)
- lfd ft5,36*4(r1)
-#else
- RESTORE_TEMPORARY_REGISTERS(18) /* the offset has to be even */
-#endif
+ lfd ftmp1,13*8(sp) /* registers f14-f31 are callee saved */
+ lfd ftmp2,14*8(sp)
+
+ RESTORE_TEMPORARY_REGISTERS(15)
ld r0,40*8+LA_LR_OFFSET(r1)
- mtlr r0
- addi r1,r1,40*8
+ mtlr r0
+ addi r1,r1,40*8
blr
asm_vm_call_method_exception_handler:
bl builtin_throw_exception
b L_asm_vm_call_method_return
-
- .data
- .align 4
-
-L_jumptable_int:
- .quad L_handle_a0
- .quad L_handle_a1
- .quad L_handle_a2
- .quad L_handle_a3
- .quad L_handle_a4
- .quad L_handle_a5
- .quad L_handle_a6
- .quad L_handle_a7
-
- .text
- .align 4
-
-L_handle_a0:
- lwz a0,offvmargdata+4(itmp1)
- b L_register_copy
-L_handle_a1:
- lwz a1,offvmargdata+4(itmp1)
- b L_register_copy
-L_handle_a2:
- lwz a2,offvmargdata+4(itmp1)
- b L_register_copy
-L_handle_a3:
- lwz a3,offvmargdata+4(itmp1)
- b L_register_copy
-L_handle_a4:
- lwz a4,offvmargdata+4(itmp1)
- b L_register_copy
-L_handle_a5:
- lwz a5,offvmargdata+4(itmp1)
- b L_register_copy
-L_handle_a6:
- lwz a6,offvmargdata+4(itmp1)
- b L_register_copy
-L_handle_a7:
- lwz a7,offvmargdata+4(itmp1)
- b L_register_copy
-
-
- .data
- .align 2
-
-L_jumptable_long:
-#if defined(__DARWIN__)
- .quad L_handle_a0_a1
- .quad L_handle_a1_a2
- .quad L_handle_a2_a3
- .quad L_handle_a3_a4
- .quad L_handle_a4_a5
- .quad L_handle_a5_a6
- .quad L_handle_a6_a7
-#else
- /* we have two entries here, so we get the even argument register
- alignment for linux */
-
- .quad L_handle_a0_a1
- .quad 0
- .quad L_handle_a2_a3
- .quad 0
- .quad L_handle_a4_a5
- .quad 0
- .quad L_handle_a6_a7
-#endif
-
- .text
- .align 2
-
-L_handle_a0_a1:
- lwz a0,offvmargdata+0(itmp1)
- lwz a1,offvmargdata+4(itmp1)
- b L_register_copy
-#if defined(__DARWIN__)
-L_handle_a1_a2:
- lwz a1,offvmargdata+0(itmp1)
- lwz a2,offvmargdata+4(itmp1)
- b L_register_copy
-#endif
-L_handle_a2_a3:
- lwz a2,offvmargdata+0(itmp1)
- lwz a3,offvmargdata+4(itmp1)
- b L_register_copy
-#if defined(__DARWIN__)
-L_handle_a3_a4:
- lwz a3,offvmargdata+0(itmp1)
- lwz a4,offvmargdata+4(itmp1)
- b L_register_copy
-#endif
-L_handle_a4_a5:
- lwz a4,offvmargdata+0(itmp1)
- lwz a5,offvmargdata+4(itmp1)
- b L_register_copy
-#if defined(__DARWIN__)
-L_handle_a5_a6:
- lwz a5,offvmargdata+0(itmp1)
- lwz a6,offvmargdata+4(itmp1)
- b L_register_copy
-#endif
-L_handle_a6_a7:
- lwz a6,offvmargdata+0(itmp1)
- lwz a7,offvmargdata+4(itmp1)
- b L_register_copy
-
-
- .data
- .align 2
-
-L_jumptable_float:
- .quad L_handle_fa0
- .quad L_handle_fa1
- .quad L_handle_fa2
- .quad L_handle_fa3
- .quad L_handle_fa4
- .quad L_handle_fa5
- .quad L_handle_fa6
- .quad L_handle_fa7
-
-#if defined(__DARWIN__)
- .quad L_handle_fa8
- .quad L_handle_fa9
- .quad L_handle_fa10
- .quad L_handle_fa11
- .quad L_handle_fa12
-#endif
-
- .text
- .align 2
-
-L_handle_fa0:
- lfs fa0,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa1:
- lfs fa1,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa2:
- lfs fa2,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa3:
- lfs fa3,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa4:
- lfs fa4,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa5:
- lfs fa5,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa6:
- lfs fa6,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa7:
- lfs fa7,offvmargdata(itmp1)
- b L_register_copy
-
-#if defined(__DARWIN__)
-L_handle_fa8:
- lfs fa8,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa9:
- lfs fa9,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa10:
- lfs fa10,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa11:
- lfs fa11,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fa12:
- lfs fa12,offvmargdata(itmp1)
- b L_register_copy
-#endif
-
-
- .data
- .align 2
-
-L_jumptable_double:
- .quad L_handle_fda0
- .quad L_handle_fda1
- .quad L_handle_fda2
- .quad L_handle_fda3
- .quad L_handle_fda4
- .quad L_handle_fda5
- .quad L_handle_fda6
- .quad L_handle_fda7
-
-#if defined(__DARWIN__)
- .quad L_handle_fda8
- .quad L_handle_fda9
- .quad L_handle_fda10
- .quad L_handle_fda11
- .quad L_handle_fda12
-#endif
-
- .text
- .align 2
-
-L_handle_fda0:
- lfd fa0,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda1:
- lfd fa1,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda2:
- lfd fa2,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda3:
- lfd fa3,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda4:
- lfd fa4,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda5:
- lfd fa5,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda6:
- lfd fa6,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda7:
- lfd fa7,offvmargdata(itmp1)
- b L_register_copy
-
-#if defined(__DARWIN__)
-L_handle_fda8:
- lfd fa8,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda9:
- lfd fa9,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda10:
- lfd fa10,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda11:
- lfd fa11,offvmargdata(itmp1)
- b L_register_copy
-L_handle_fda12:
- lfd fa12,offvmargdata(itmp1)
- b L_register_copy
-#endif
-
+asm_vm_call_method_end:
+ nop
/* asm_call_jit_compiler *******************************************************
std r0,LA_LR_OFFSET(sp) /* save return address */
stdu r1,-(LA_SIZE+PA_SIZE+ARG_CNT*8)(sp)
-#if defined(__DARWIN__)
- stw a0,LA_SIZE+(5+0)*8(r1)
- stw a1,LA_SIZE+(5+1)*8(r1)
- stw a2,LA_SIZE+(5+2)*8(r1)
- stw a3,LA_SIZE+(5+3)*8(r1)
- stw a4,LA_SIZE+(5+4)*8(r1)
- stw a5,LA_SIZE+(5+5)*8(r1)
- stw a6,LA_SIZE+(5+6)*8(r1)
- stw a7,LA_SIZE+(5+7)*8(r1)
-
- stfd fa0,LA_SIZE+(5+8)*8(r1)
- stfd fa1,LA_SIZE+(5+10)*8(r1)
- stfd fa2,LA_SIZE+(5+12)*8(r1)
- stfd fa3,LA_SIZE+(5+14)*8(r1)
- stfd fa4,LA_SIZE+(5+16)*8(r1)
- stfd fa5,LA_SIZE+(5+18)*8(r1)
- stfd fa6,LA_SIZE+(5+20)*8(r1)
- stfd fa7,LA_SIZE+(5+22)*8(r1)
- stfd fa8,LA_SIZE+(5+24)*8(r1)
- stfd fa9,LA_SIZE+(5+26)*8(r1)
- stfd fa10,LA_SIZE+(5+28)*8(r1)
- stfd fa11,LA_SIZE+(5+30)*8(r1)
- stfd fa12,LA_SIZE+(5+32)*8(r1)
-#else
SAVE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS+PA_SIZE_IN_POINTERS)
-#endif
mr a0,itmp1
mr a1,mptr
ori r0,r0,0 /* nop needed after jump to function desc. */
mr pv,v0 /* move address to pv register */
-#if defined(__DARWIN__)
- lwz a0,LA_SIZE+(+5+0)*8(r1)
- lwz a1,LA_SIZE+(+5+1)*8(r1)
- lwz a2,LA_SIZE+(+5+2)*8(r1)
- lwz a3,LA_SIZE+(+5+3)*8(r1)
- lwz a4,LA_SIZE+(+5+4)*8(r1)
- lwz a5,LA_SIZE+(+5+5)*8(r1)
- lwz a6,LA_SIZE+(+5+6)*8(r1)
- lwz a7,LA_SIZE+(+5+7)*8(r1)
-
- lfd fa0,LA_SIZE+(+5+8)*8(r1)
- lfd fa1,LA_SIZE+(+5+10)*8(r1)
- lfd fa2,LA_SIZE+(+5+12)*8(r1)
- lfd fa3,LA_SIZE+(+5+14)*8(r1)
- lfd fa4,LA_SIZE+(+5+16)*8(r1)
- lfd fa5,LA_SIZE+(+5+18)*8(r1)
- lfd fa6,LA_SIZE+(+5+20)*8(r1)
- lfd fa7,LA_SIZE+(+5+22)*8(r1)
- lfd fa8,LA_SIZE+(+5+24)*8(r1)
- lfd fa9,LA_SIZE+(+5+26)*8(r1)
- lfd fa10,LA_SIZE+(+5+28)*8(r1)
- lfd fa11,LA_SIZE+(+5+30)*8(r1)
- lfd fa12,LA_SIZE+(+5+32)*8(r1)
-#else
RESTORE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS+PA_SIZE_IN_POINTERS)
-#endif
ld itmp1,(LA_SIZE + PA_SIZE + ARG_CNT*8)+LA_LR_OFFSET(sp)
mtlr itmp1
L_asm_call_jit_compiler_exception:
mflr r0
- stw r0,LA_LR_OFFSET(sp)
- stwu sp,-LA_SIZE_ALIGNED(sp) /* preserve linkage area */
+ std r0,LA_LR_OFFSET(sp)
+ stdu sp,-LA_SIZE_ALIGNED(sp) /* preserve linkage area */
bl exceptions_get_and_clear_exception
- lwz xpc,LA_SIZE_ALIGNED+LA_LR_OFFSET(sp)
+ ld xpc,LA_SIZE_ALIGNED+LA_LR_OFFSET(sp)
mtlr xpc
addi sp,sp,LA_SIZE_ALIGNED
asm_handle_nat_exception:
L_asm_handle_nat_exception: /* required for PIC code */
- mflr r9
- lwz itmp3,4(r9)
- extsh itmp3,itmp3
- add pv,itmp3,r9
- lwz itmp3,8(r9)
- srwi itmp3,itmp3,16
- cmpwi itmp3,0x3dad
- bne L_asm_handle_exception
- lwz itmp3,8(r9)
- slwi itmp3,itmp3,16
- add pv,pv,itmp3
+L_asm_handle_exception_stack_loop:
+ mflr r0
+ addi sp,sp,-(LA_SIZE+PA_SIZE+((4+6)*8)) /* allocate stack (+4 for darwin) */
+ std xptr,LA_SIZE+PA_SIZE+(4+0)*8(sp) /* save exception pointer */
+ std xpc,LA_SIZE+PA_SIZE+(4+1)*8(sp) /* save exception pc */
+ std r0,LA_SIZE+PA_SIZE+(4+3)*8(sp) /* save return address */
+ li itmp3,0
+ std itmp3,LA_SIZE+PA_SIZE+(4+4)*8(sp) /* save maybe-leaf flag (cleared) */
+
+ mr a0,r0 /* pass return address */
+ bl md_codegen_get_pv_from_pc /* get PV from RA */
+ std v0,LA_SIZE+PA_SIZE+(4+2)*8(sp) /* save data segment pointer */
+
+ ld a0,LA_SIZE+PA_SIZE+(4+0)*8(sp) /* pass xptr */
+ ld a1,LA_SIZE+PA_SIZE+(4+1)*8(sp) /* pass xpc */
+ ld a2,LA_SIZE+PA_SIZE+(4+2)*8(sp) /* pass PV (v0 == a0) */
+ addi a3,sp,LA_SIZE+PA_SIZE+((4+6)*8) /* pass Java SP */
+
+ b L_asm_handle_exception_continue
+
asm_handle_exception:
L_asm_handle_exception: /* required for PIC code */
addi sp,sp,-(ARG_CNT+TMP_CNT)*8 /* create maybe-leaf stackframe */
-#if defined(__DARWIN__)
-#else
SAVE_ARGUMENT_REGISTERS(0) /* we save arg and temp registers in */
SAVE_TEMPORARY_REGISTERS(ARG_CNT) /* case this is a leaf method */
-#endif
-
- li a3,(ARG_CNT+TMP_CNT)*8 /* prepare a3 for handle_exception */
- li a4,1 /* set maybe-leaf flag */
-
-L_asm_handle_exception_stack_loop:
- addi sp,sp,-(LA_SIZE+PA_SIZE+4*8) /* allocate stack */
- std xptr,LA_SIZE+PA_SIZE+1*8(sp) /* save exception pointer */
- std pv,LA_SIZE+PA_SIZE+2*8(sp) /* save data segment pointer */
- mflr r0 /* save return address */
- std r0,LA_SIZE+PA_SIZE+3*8(sp) /* XXX */
- add a3,a3,sp /* calculate Java sp into a3... */
- addi a3,a3,LA_SIZE+PA_SIZE+4*8
- std a4,LA_SIZE+PA_SIZE+4*8(sp) /* save maybe-leaf flag */
+ addi sp,sp,-(LA_SIZE+PA_SIZE+(4+6)*8) /* allocate stack */
+ std xptr,LA_SIZE+PA_SIZE+(4+0)*8(sp) /* save exception pointer */
+ std pv,LA_SIZE+PA_SIZE+(4+2)*8(sp) /* save data segment pointer */
+ mflr r0 /* save return address */
+ std r0,LA_SIZE+PA_SIZE+(4+3)*8(sp)
+ li t0, 1
+ std t0, LA_SIZE+PA_SIZE+(4+4)*8(sp) /* maybe-leaf flag */
+
mr a0,xptr /* pass exception pointer */
mr a1,xpc /* pass exception pc */
mr a2,pv /* pass data segment pointer */
- /* a3 is still set */
+ addi a3,sp,LA_SIZE+PA_SIZE+(ARG_CNT+TMP_CNT)*8+(4+6)*8
+
+
+L_asm_handle_exception_continue:
bl exceptions_handle_exception
mr. v0,v0
beq L_asm_handle_exception_not_catched
- mr xpc,v0 /* move handlerpc into xpc */
- ld xptr,LA_SIZE+PA_SIZE+1*8(sp) /* restore exception pointer */
- ld pv,LA_SIZE+PA_SIZE+2*8(sp) /* restore data segment pointer */
- ld r0,LA_SIZE+PA_SIZE+3*8(sp) /* restore return address */
+ mr xpc,v0 /* move handlerpc into xpc */
+ ld xptr,LA_SIZE+PA_SIZE+(4+0)*8(sp) /* restore exception pointer */
+ ld pv,LA_SIZE+PA_SIZE+(4+2)*8(sp) /* restore data segment pointer */
+ ld r0,LA_SIZE+PA_SIZE+(4+3)*8(sp) /* restore return address */
mtlr r0
- ld a4,LA_SIZE+PA_SIZE+4*8(sp) /* get maybe-leaf flag */
- addi sp,sp,LA_SIZE+PA_SIZE+4*8 /* free stack frame */
+ ld t0,LA_SIZE+PA_SIZE+(4+4)*8(sp) /* get maybe-leaf flag */
+ addi sp,sp,LA_SIZE+PA_SIZE+(4+6)*8 /* free stack frame */
- mr. a4,a4
+ mr. t0,t0
beq L_asm_handle_exception_no_leaf
-#if defined(__DARWIN__)
-#else
RESTORE_ARGUMENT_REGISTERS(0) /* if this is a leaf method, we have */
RESTORE_TEMPORARY_REGISTERS(ARG_CNT)/* to restore arg and temp registers */
-#endif
addi sp,sp,(ARG_CNT+TMP_CNT)*8 /* remove maybe-leaf stackframe */
bctr
L_asm_handle_exception_not_catched:
- ld xptr,LA_SIZE+PA_SIZE+1*8(sp) /* restore exception pointer */
- ld pv,LA_SIZE+PA_SIZE+2*8(sp) /* restore data segment pointer */
- ld r0,LA_SIZE+PA_SIZE+3*8(sp) /* restore return address */
+ ld xptr,LA_SIZE+PA_SIZE+(4+0)*8(sp) /* restore exception pointer */
+ ld pv,LA_SIZE+PA_SIZE+(4+2)*8(sp) /* restore data segment pointer */
+ ld r0,LA_SIZE+PA_SIZE+(4+3)*8(sp) /* restore return address */
mtlr r0
- ld a4,LA_SIZE+PA_SIZE+4*8(sp) /* get maybe-leaf flag */
- addi sp,sp,LA_SIZE+PA_SIZE+4*8 /* free stack frame */
+ ld t0,LA_SIZE+PA_SIZE+(4+4)*8(sp) /* get maybe-leaf flag */
+ addi sp,sp,LA_SIZE+PA_SIZE+(4+6)*8 /* free stack frame */
- mr. a4,a4
+ mr. t0,t0
beq L_asm_handle_exception_no_leaf_stack
addi sp,sp,(ARG_CNT+TMP_CNT)*8 /* remove maybe-leaf stackframe */
- li a4,0 /* clear the maybe-leaf flag */
+ li t0,0 /* clear the maybe-leaf flag */
L_asm_handle_exception_no_leaf_stack:
- ld t0,FrameSize(pv) /* get frame size */
- add t0,sp,t0 /* pointer to save area */
+ lwz t1,FrameSize(pv) /* get frame size */
+ add t1,sp,t1 /* pointer to save area */
- ld t1,IsLeaf(pv) /* is leaf procedure */
- mr. t1,t1
+ lwz t2,IsLeaf(pv) /* is leaf procedure */
+ mr. t2,t2
bne L_asm_handle_exception_no_ra_restore
- ld r0,LA_LR_OFFSET(t0) /* restore ra */
+ ld r0,LA_LR_OFFSET(t1) /* restore ra */
mtlr r0
L_asm_handle_exception_no_ra_restore:
mflr xpc /* the new xpc is ra */
- ld t1,IntSave(pv) /* t1 = saved int register count */
+ mr t4,xpc /* save RA */
+ lwz t2,IntSave(pv) /* t1 = saved int register count */
bl ex_int1
ex_int1:
- mflr t2 /* t2 = current pc */
-#if defined(__DARWIN__)
- addi t2,t2,lo16(ex_int2-ex_int1)
-#else
- addi t2,t2,(ex_int2-ex_int1)@l
-#endif
- slwi t1,t1,3 /* t1 = register count * 8 */
- subf t2,t1,t2 /* t2 = IntSave - t1 */
- mtctr t2
+ mflr t3 /* t3 = current pc */
+ addi t3,t3,(ex_int2-ex_int1)@l
+ slwi t2,t2,2 /* t2 = register count * 4 */
+ subf t3,t2,t3 /* t3 = IntSave - t2 */
+ mtctr t3
bctr
- ld s0,-10*8(t0)
- ld s1,-9*8(t0)
- ld s2,-8*8(t0)
- ld s3,-7*8(t0)
- ld s4,-6*8(t0)
- ld s5,-5*8(t0)
- ld s6,-4*8(t0)
- ld s7,-3*8(t0)
- ld s8,-2*8(t0)
- /*lwz s9,-1*4(t0) XXX */
+ ld s0,-9*8(t1)
+ ld s1,-8*8(t1)
+ ld s2,-7*8(t1)
+ ld s3,-6*8(t1)
+ ld s4,-5*8(t1)
+ ld s5,-4*8(t1)
+ ld s6,-3*8(t1)
+ ld s7,-2*8(t1)
+ ld s8,-1*8(t1)
ex_int2:
- subf t0,t1,t0 /* t0 = t0 - register count * 8 */
-
- ld t1,FltSave(pv)
+ subf t1,t2,t1 /* t1 = t1 - register count * 4 */
+ lwz t2,FltSave(pv)
bl ex_flt1
ex_flt1:
- mflr t2
-#if defined(__DARWIN__)
- addi t2,t2,lo16(ex_flt2-ex_flt1)
-#else
- addi t2,t2,(ex_flt2-ex_flt1)@l
-#endif
- slwi t1,t1,3 /* t1 = register count * 8 */
- subf t2,t1,t2 /* t2 = FltSave - t1 */
- mtctr t2
+ mflr t3
+ addi t3,t3,(ex_flt2-ex_flt1)@l
+ slwi t2,t2,2 /* t2 = register count * 4 */
+ subf t3,t2,t3 /* t3 = FltSave - t2 */
+ mtctr t3
bctr
- lfd fs0,-10*8(t0)
- lfd fs1,-9*8(t0)
- lfd fs2,-8*8(t0)
- lfd fs3,-7*8(t0)
- lfd fs4,-6*8(t0)
- lfd fs5,-5*8(t0)
- lfd fs6,-4*8(t0)
- lfd fs7,-3*8(t0)
- lfd fs8,-2*8(t0)
- lfd fs9,-1*8(t0)
+ lfd fs0,-10*8(t1)
+ lfd fs1,-9*8(t1)
+ lfd fs2,-8*8(t1)
+ lfd fs3,-7*8(t1)
+ lfd fs4,-6*8(t1)
+ lfd fs5,-5*8(t1)
+ lfd fs6,-4*8(t1)
+ lfd fs7,-3*8(t1)
+ lfd fs8,-2*8(t1)
+ lfd fs9,-1*8(t1)
ex_flt2:
- ld t0,FrameSize(pv) /* get frame size */
- add sp,sp,t0 /* unwind stack */
- li a3,0 /* prepare a3 for handle_exception */
-
- mtlr xpc
- ld itmp3,8(xpc)
- extsh itmp3,itmp3
- add pv,itmp3,xpc
- ld itmp3,16(xpc)
- srwi itmp3,itmp3,16
- cmpwi itmp3,0x3dad
- bne L_asm_handle_exception_stack_loop
- ld itmp3,16(xpc)
- slwi itmp3,itmp3,16
- add pv,pv,itmp3
-
+ mtlr t4 /* restore RA */
+ lwz t1,FrameSize(pv)
+ add sp,sp,t1 /* unwind stack */
b L_asm_handle_exception_stack_loop
asm_abstractmethoderror:
mflr r0
- stw r0,LA_LR_OFFSET(sp)
- stwu sp,-LA_SIZE_ALIGNED(sp) /* preserve linkage area */
+ std r0,LA_LR_OFFSET(sp)
+ stdu sp,-LA_SIZE_ALIGNED(sp) /* preserve linkage area */
addi a0,sp,LA_SIZE_ALIGNED /* pass java sp */
mr a1,r0 /* pass exception address */
bl exceptions_asm_new_abstractmethoderror
- lwz r0,LA_SIZE_ALIGNED+LA_LR_OFFSET(sp)
+ ld r0,LA_SIZE_ALIGNED+LA_LR_OFFSET(sp)
mtlr r0 /* restore return address */
addi sp,sp,LA_SIZE_ALIGNED
/* keep stack 16-bytes aligned: 6+1+37 = 44 */
stdu sp,-(LA_SIZE+PA_SIZE+ARG_CNT*8+TMP_CNT*8+4*8)(sp)
-#if defined(__DARWIN__)
- stw a0,LA_SIZE+(5+0)*8(r1) /* save argument registers */
- stw a1,LA_SIZE+(5+1)*8(r1) /* preserve linkage area (24 bytes) */
- stw a2,LA_SIZE+(5+2)*8(r1) /* and 4 bytes for 4 argument */
- stw a3,LA_SIZE+(5+3)*8(r1)
- stw a4,LA_SIZE+(5+4)*8(r1)
- stw a5,LA_SIZE+(5+5)*8(r1)
- stw a6,LA_SIZE+(5+6)*8(r1)
- stw a7,LA_SIZE+(5+7)*8(r1)
-
- stfd fa0,LA_SIZE+(5+8)*8(sp)
- stfd fa1,LA_SIZE+(5+10)*8(sp)
- stfd fa2,LA_SIZE+(5+12)*8(sp)
- stfd fa3,LA_SIZE+(5+14)*8(sp)
- stfd fa4,LA_SIZE+(5+16)*8(sp)
- stfd fa5,LA_SIZE+(5+18)*8(sp)
- stfd fa6,LA_SIZE+(5+20)*8(sp)
- stfd fa7,LA_SIZE+(5+22)*8(sp)
- stfd fa8,LA_SIZE+(5+24)*8(sp)
- stfd fa9,LA_SIZE+(5+26)*8(sp)
- stfd fa10,LA_SIZE+(5+28)*8(sp)
- stfd fa11,LA_SIZE+(5+30)*8(sp)
- stfd fa12,LA_SIZE+(5+32)*8(sp) /* XXX */
-
- stw t0,LA_SIZE+(+5+33)*8(r1)
- stw t1,LA_SIZE+(+5+34)*8(r1)
- stw t2,LA_SIZE+(+5+35)*8(r1)
- stw t3,LA_SIZE+(+5+36)*8(r1)
- stw t4,LA_SIZE+(+5+37)*8(r1)
- stw t5,LA_SIZE+(+5+38)*8(r1)
- stw t6,LA_SIZE+(+5+39)*8(r1)
- stw t7,LA_SIZE+(+5+40)*8(r1)
-
- stfd ft0,LA_SIZE+(+5+42)*8(r1)
- stfd ft1,LA_SIZE+(+5+44)*8(r1)
- stfd ft2,LA_SIZE+(+5+46)*8(r1)
- stfd ft3,LA_SIZE+(+5+48)*8(r1)
- stfd ft4,LA_SIZE+(+5+50)*8(r1)
- stfd ft5,LA_SIZE+(+5+52)*8(r1)
-#else
SAVE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS+PA_SIZE_IN_POINTERS) /* save 8 int/8 float arguments */
SAVE_TEMPORARY_REGISTERS(LA_SIZE_IN_POINTERS+PA_SIZE_IN_POINTERS+ARG_CNT)
-#endif
std itmp1,LA_SIZE+PA_SIZE+(ARG_CNT+TMP_CNT)*8+1*8(sp)
std itmp2,LA_SIZE+PA_SIZE+(ARG_CNT+TMP_CNT)*8+2*8(sp)
bl patcher_wrapper
std v0,LA_SIZE+PA_SIZE+(ARG_CNT+TMP_CNT)*8+4*8(sp) /* save return value */
-#if defined(__DARWIN__)
- lwz a0,LA_SIZE+(5+0)*8(r1)
- lwz a1,LA_SIZE+(5+1)*8(r1)
- lwz a2,LA_SIZE+(5+2)*8(r1)
- lwz a3,LA_SIZE+(5+3)*8(r1)
- lwz a4,LA_SIZE+(5+4)*8(r1)
- lwz a5,LA_SIZE+(5+5)*8(r1)
- lwz a6,LA_SIZE+(5+6)*8(r1)
- lwz a7,LA_SIZE+(5+7)*8(r1)
-
- lfd fa0,LA_SIZE+(5+8)*8(sp)
- lfd fa1,LA_SIZE+(5+10)*8(sp)
- lfd fa2,LA_SIZE+(5+12)*8(sp)
- lfd fa3,LA_SIZE+(5+14)*8(sp)
- lfd fa4,LA_SIZE+(5+16)*8(sp)
- lfd fa5,LA_SIZE+(5+18)*8(sp)
- lfd fa6,LA_SIZE+(5+20)*8(sp)
- lfd fa7,LA_SIZE+(5+22)*8(sp)
- lfd fa8,LA_SIZE+(5+24)*8(sp)
- lfd fa9,LA_SIZE+(5+26)*8(sp)
- lfd fa10,LA_SIZE+(5+28)*8(sp)
- lfd fa11,LA_SIZE+(5+30)*8(sp)
- lfd fa12,LA_SIZE+(5+32)*8(sp)
-
- lwz t0,LA_SIZE+(+5+33)*8(r1)
- lwz t1,LA_SIZE+(+5+34)*8(r1)
- lwz t2,LA_SIZE+(+5+35)*8(r1)
- lwz t3,LA_SIZE+(+5+36)*8(r1)
- lwz t4,LA_SIZE+(+5+37)*8(r1)
- lwz t5,LA_SIZE+(+5+38)*8(r1)
- lwz t6,LA_SIZE+(+5+39)*8(r1)
- lwz t7,LA_SIZE+(+5+40)*8(r1)
-
- lfd ft0,LA_SIZE+(+5+42)*8(r1)
- lfd ft1,LA_SIZE+(+5+44)*8(r1)
- lfd ft2,LA_SIZE+(+5+46)*8(r1)
- lfd ft3,LA_SIZE+(+5+48)*8(r1)
- lfd ft4,LA_SIZE+(+5+50)*8(r1)
- lfd ft5,LA_SIZE+(+5+52)*8(r1)
-#else
+
RESTORE_ARGUMENT_REGISTERS(LA_SIZE_IN_POINTERS+PA_SIZE_IN_POINTERS) /* restore 8 int/8 float args */
RESTORE_TEMPORARY_REGISTERS(LA_SIZE_IN_POINTERS+PA_SIZE_IN_POINTERS+ARG_CNT)
-#endif
ld itmp1,LA_SIZE+PA_SIZE+(ARG_CNT+TMP_CNT)*8+1*8(sp)
ld itmp2,LA_SIZE+PA_SIZE+(ARG_CNT+TMP_CNT)*8+2*8(sp)
addi sp,sp,LA_SIZE+PA_SIZE+ARG_CNT*8+TMP_CNT*8+4*8+8*8
b L_asm_handle_exception
+#if defined(ENABLE_REPLACEMENT)
/* asm_replacement_out *********************************************************
mflr r16
/* save registers in execution state */
- stw r0 ,( 0*8+offes_intregs)(sp)
- stw r1 ,( 1*8+offes_intregs)(sp)
- stw r2 ,( 2*8+offes_intregs)(sp)
- stw r3 ,( 3*8+offes_intregs)(sp)
- stw r4 ,( 4*8+offes_intregs)(sp)
- stw r5 ,( 5*8+offes_intregs)(sp)
- stw r6 ,( 6*8+offes_intregs)(sp)
- stw r7 ,( 7*8+offes_intregs)(sp)
- stw r8 ,( 8*8+offes_intregs)(sp)
- stw r9 ,( 9*8+offes_intregs)(sp)
- stw r10,(10*8+offes_intregs)(sp)
- stw r11,(11*8+offes_intregs)(sp)
- stw r12,(12*8+offes_intregs)(sp)
- stw r13,(13*8+offes_intregs)(sp)
- stw r14,(14*8+offes_intregs)(sp)
- stw r15,(15*8+offes_intregs)(sp)
- stw r16,(16*8+offes_intregs)(sp) /* link register */
- stw r17,(17*8+offes_intregs)(sp)
- stw r18,(18*8+offes_intregs)(sp)
- stw r19,(19*8+offes_intregs)(sp)
- stw r20,(20*8+offes_intregs)(sp)
- stw r21,(21*8+offes_intregs)(sp)
- stw r22,(22*8+offes_intregs)(sp)
- stw r23,(23*8+offes_intregs)(sp)
- stw r24,(24*8+offes_intregs)(sp)
- stw r25,(25*8+offes_intregs)(sp)
- stw r26,(26*8+offes_intregs)(sp)
- stw r27,(27*8+offes_intregs)(sp)
- stw r28,(28*8+offes_intregs)(sp)
- stw r29,(29*8+offes_intregs)(sp)
- stw r30,(30*8+offes_intregs)(sp)
- stw r31,(31*8+offes_intregs)(sp)
+ std r0 ,( 0*8+offes_intregs)(sp)
+ std r1 ,( 1*8+offes_intregs)(sp)
+ std r2 ,( 2*8+offes_intregs)(sp)
+ std r3 ,( 3*8+offes_intregs)(sp)
+ std r4 ,( 4*8+offes_intregs)(sp)
+ std r5 ,( 5*8+offes_intregs)(sp)
+ std r6 ,( 6*8+offes_intregs)(sp)
+ std r7 ,( 7*8+offes_intregs)(sp)
+ std r8 ,( 8*8+offes_intregs)(sp)
+ std r9 ,( 9*8+offes_intregs)(sp)
+ std r10,(10*8+offes_intregs)(sp)
+ std r11,(11*8+offes_intregs)(sp)
+ std r12,(12*8+offes_intregs)(sp)
+ std r13,(13*8+offes_intregs)(sp)
+ std r14,(14*8+offes_intregs)(sp)
+ std r15,(15*8+offes_intregs)(sp)
+ std r16,(16*8+offes_intregs)(sp) /* link register */
+ std r17,(17*8+offes_intregs)(sp)
+ std r18,(18*8+offes_intregs)(sp)
+ std r19,(19*8+offes_intregs)(sp)
+ std r20,(20*8+offes_intregs)(sp)
+ std r21,(21*8+offes_intregs)(sp)
+ std r22,(22*8+offes_intregs)(sp)
+ std r23,(23*8+offes_intregs)(sp)
+ std r24,(24*8+offes_intregs)(sp)
+ std r25,(25*8+offes_intregs)(sp)
+ std r26,(26*8+offes_intregs)(sp)
+ std r27,(27*8+offes_intregs)(sp)
+ std r28,(28*8+offes_intregs)(sp)
+ std r29,(29*8+offes_intregs)(sp)
+ std r30,(30*8+offes_intregs)(sp)
+ std r31,(31*8+offes_intregs)(sp)
stfd fr0 ,( 0*8+offes_fltregs)(sp)
stfd fr1 ,( 1*8+offes_fltregs)(sp)
/* a0 == executionstate *es */
/* set new sp and pv */
- lwz sp,(offes_sp)(a0)
- lwz pv,(offes_pv)(a0)
+ ld sp,(offes_sp)(a0)
+ ld pv,(offes_pv)(a0)
/* copy registers from execution state */
- lwz r0 ,( 0*8+offes_intregs)(a0)
+ ld r0 ,( 0*8+offes_intregs)(a0)
/* r1 is sp */
/* r2 is reserved */
/* a0 is loaded below */
- lwz r4 ,( 4*8+offes_intregs)(a0)
- lwz r5 ,( 5*8+offes_intregs)(a0)
- lwz r6 ,( 6*8+offes_intregs)(a0)
- lwz r7 ,( 7*8+offes_intregs)(a0)
- lwz r8 ,( 8*8+offes_intregs)(a0)
- lwz r9 ,( 9*8+offes_intregs)(a0)
- lwz r10,(10*8+offes_intregs)(a0)
- lwz r11,(11*8+offes_intregs)(a0)
- lwz r12,(12*8+offes_intregs)(a0)
+ ld r4 ,( 4*8+offes_intregs)(a0)
+ ld r5 ,( 5*8+offes_intregs)(a0)
+ ld r6 ,( 6*8+offes_intregs)(a0)
+ ld r7 ,( 7*8+offes_intregs)(a0)
+ ld r8 ,( 8*8+offes_intregs)(a0)
+ ld r9 ,( 9*8+offes_intregs)(a0)
+ ld r10,(10*8+offes_intregs)(a0)
+ ld r11,(11*8+offes_intregs)(a0)
+ ld r12,(12*8+offes_intregs)(a0)
/* r13 is pv */
- lwz r14,(14*8+offes_intregs)(a0)
- lwz r15,(15*8+offes_intregs)(a0)
- lwz r16,(16*8+offes_intregs)(a0) /* link register */
- lwz r17,(17*8+offes_intregs)(a0)
- lwz r18,(18*8+offes_intregs)(a0)
- lwz r19,(19*8+offes_intregs)(a0)
- lwz r20,(20*8+offes_intregs)(a0)
- lwz r21,(21*8+offes_intregs)(a0)
- lwz r22,(22*8+offes_intregs)(a0)
- lwz r23,(23*8+offes_intregs)(a0)
- lwz r24,(24*8+offes_intregs)(a0)
- lwz r25,(25*8+offes_intregs)(a0)
- lwz r26,(26*8+offes_intregs)(a0)
- lwz r27,(27*8+offes_intregs)(a0)
- lwz r28,(28*8+offes_intregs)(a0)
- lwz r29,(29*8+offes_intregs)(a0)
- lwz r30,(30*8+offes_intregs)(a0)
- lwz r31,(31*8+offes_intregs)(a0)
+ ld r14,(14*8+offes_intregs)(a0)
+ ld r15,(15*8+offes_intregs)(a0)
+ ld r16,(16*8+offes_intregs)(a0) /* link register */
+ ld r17,(17*8+offes_intregs)(a0)
+ ld r18,(18*8+offes_intregs)(a0)
+ ld r19,(19*8+offes_intregs)(a0)
+ ld r20,(20*8+offes_intregs)(a0)
+ ld r21,(21*8+offes_intregs)(a0)
+ ld r22,(22*8+offes_intregs)(a0)
+ ld r23,(23*8+offes_intregs)(a0)
+ ld r24,(24*8+offes_intregs)(a0)
+ ld r25,(25*8+offes_intregs)(a0)
+ ld r26,(26*8+offes_intregs)(a0)
+ ld r27,(27*8+offes_intregs)(a0)
+ ld r28,(28*8+offes_intregs)(a0)
+ ld r29,(29*8+offes_intregs)(a0)
+ ld r30,(30*8+offes_intregs)(a0)
+ ld r31,(31*8+offes_intregs)(a0)
lfd fr0 ,( 0*8+offes_fltregs)(a0)
lfd fr1 ,( 1*8+offes_fltregs)(a0)
/* load new pc */
- lwz itmp3,offes_pc(a0)
+ ld itmp3,offes_pc(a0)
/* load a0 */
- lwz a0,(3*8+offes_intregs)(a0)
+ ld a0,(3*8+offes_intregs)(a0)
/* jump to new code */
mtctr itmp3
bctr
-/*********************************************************************/
-/*
+#endif /* defined(ENABLE_REPLACEMENT) */
+
+/* asm_cacheflush **************************************************************
+ copied from linux/arch/ppc64/kernel/vdso64/cacheflush.S
+ assumes 128 byte cache line size.
+ All registers used may be trashed for fun and profit.
+*******************************************************************************/
+
+ .section ".opd","aw"
+ .align 3
asm_cacheflush:
- .quad .asm_cacheflush,.TOC.@tocbase,0
- .previous
- .size asm_cacheflush,24
- .type .asm_cacheflush,@function
- .globl .asm_cacheflush
-*/
+ .quad .asm_cacheflush,.TOC.@tocbase,0
+ .previous
+ .size asm_cacheflush, 24
+ .type .asm_cacheflush,@function
+ .globl .asm_cacheflush
.asm_cacheflush:
+ /* construct the AND mask */
+ li r6, 0xffffffffffff8000
+ ori r6,r6,0x000000000000ff80
+
add r4,r3,r4
- rldimi r3,r3,0,26
- addi r4,r4,31
- rldimi r4,r4,0,26
+ and. r3,r3,r6
+ addi r4,r4,127
+ and. r4,r4,r6
mr r5,r3
1:
cmpld r3,r4
bge 0f
dcbst 0,r3
- addi r3,r3,32
+ addi r3,r3,128
b 1b
0:
sync
cmpld r5,r4
bge 0f
icbi 0,r5
- addi r5,r5,32
+ addi r5,r5,128
b 1b
0:
sync
blr
-.asm_getclassvalues_atomic:
-_crit_restart:
-_crit_begin:
- lwz r6,offbaseval(r3)
- lwz r7,offdiffval(r3)
- lwz r8,offbaseval(r4)
-_crit_end:
- stw r6,offcast_super_baseval(r5)
- stw r7,offcast_super_diffval(r5)
- stw r8,offcast_sub_baseval(r5)
- blr
-
- .data
-
-asm_criticalsections:
-#if defined(ENABLE_THREADS)
- .quad _crit_begin
- .quad _crit_end
- .quad _crit_restart
-#endif
- .quad 0
-
-
-#if defined(__DARWIN__)
-
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align 2
-L_builtin_throw_exception$stub:
- .indirect_symbol _builtin_throw_exception
- mflr r0
- bcl 20,31,L00$_builtin_throw_exception
-L00$_builtin_throw_exception:
- mflr r11
- addis r11,r11,ha16(L_builtin_throw_exception$lazy_ptr - L00$_builtin_throw_exception)
- mtlr r0
- lwzu r12,lo16(L_builtin_throw_exception$lazy_ptr - L00$_builtin_throw_exception)(r11)
- mtctr r12
- bctr
-.data
-.lazy_symbol_pointer
-L_builtin_throw_exception$lazy_ptr:
- .indirect_symbol _builtin_throw_exception
- .long dyld_stub_binding_helper
-
-
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align 2
-L_exceptions_handle_exception$stub:
- .indirect_symbol _exceptions_handle_exception
- mflr r0
- bcl 20,31,L00$_exceptions_handle_exception
-L00$_exceptions_handle_exception:
- mflr r11
- addis r11,r11,ha16(L_exceptions_handle_exception$lazy_ptr - L00$_exceptions_handle_exception)
- mtlr r0
- lwzu r12,lo16(L_exceptions_handle_exception$lazy_ptr - L00$_exceptions_handle_exception)(r11)
- mtctr r12
- bctr
-.data
-.lazy_symbol_pointer
-L_exceptions_handle_exception$lazy_ptr:
- .indirect_symbol _exceptions_handle_exception
- .long dyld_stub_binding_helper
-
-
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align 2
-L_stacktrace_create_extern_stackframeinfo$stub:
- .indirect_symbol _stacktrace_create_extern_stackframeinfo
- mflr r0
- bcl 20,31,L00$_stacktrace_create_extern_stackframeinfo
-L00$_stacktrace_create_extern_stackframeinfo:
- mflr r11
- addis r11,r11,ha16(L_stacktrace_create_extern_stackframeinfo$lazy_ptr - L00$_stacktrace_create_extern_stackframeinfo)
- mtlr r0
- lwzu r12,lo16(L_stacktrace_create_extern_stackframeinfo$lazy_ptr - L00$_stacktrace_create_extern_stackframeinfo)(r11)
- mtctr r12
- bctr
-.data
-.lazy_symbol_pointer
-L_stacktrace_create_extern_stackframeinfo$lazy_ptr:
- .indirect_symbol _stacktrace_create_extern_stackframeinfo
- .long dyld_stub_binding_helper
-
-
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align 2
-L_jit_asm_compile$stub:
- .indirect_symbol _jit_asm_compile
- mflr r0
- bcl 20,31,L00$_jit_asm_compile
-L00$_jit_asm_compile:
- mflr r11
- addis r11,r11,ha16(L_jit_asm_compile$lazy_ptr - L00$_jit_asm_compile)
- mtlr r0
- lwzu r12,lo16(L_jit_asm_compile$lazy_ptr - L00$_jit_asm_compile)(r11)
- mtctr r12
- bctr
-.data
-.lazy_symbol_pointer
-L_jit_asm_compile$lazy_ptr:
- .indirect_symbol _jit_asm_compile
- .long dyld_stub_binding_helper
-
-
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align 2
-L_stacktrace_remove_stackframeinfo$stub:
- .indirect_symbol _stacktrace_remove_stackframeinfo
- mflr r0
- bcl 20,31,L00$_stacktrace_remove_stackframeinfo
-L00$_stacktrace_remove_stackframeinfo:
- mflr r11
- addis r11,r11,ha16(L_stacktrace_remove_stackframeinfo$lazy_ptr - L00$_stacktrace_remove_stackframeinfo)
- mtlr r0
- lwzu r12,lo16(L_stacktrace_remove_stackframeinfo$lazy_ptr - L00$_stacktrace_remove_stackframeinfo)(r11)
- mtctr r12
- bctr
-.data
-.lazy_symbol_pointer
-L_stacktrace_remove_stackframeinfo$lazy_ptr:
- .indirect_symbol _stacktrace_remove_stackframeinfo
- .long dyld_stub_binding_helper
-
-
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align 2
-L_exceptions_get_and_clear_exception$stub:
- .indirect_symbol _exceptions_get_and_clear_exception
- mflr r0
- bcl 20,31,L00$_exceptions_get_and_clear_exception
-L00$_exceptions_get_and_clear_exception:
- mflr r11
- addis r11,r11,ha16(L_exceptions_get_and_clear_exception$lazy_ptr - L00$_exceptions_get_and_clear_exception)
- mtlr r0
- lwzu r12,lo16(L_exceptions_get_and_clear_exception$lazy_ptr - L00$_exceptions_get_and_clear_exception)(r11)
- mtctr r12
- bctr
-.data
-.lazy_symbol_pointer
-L_exceptions_get_and_clear_exception$lazy_ptr:
- .indirect_symbol _exceptions_get_and_clear_exception
- .long dyld_stub_binding_helper
-
-
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align 2
-L_exceptions_asm_new_abstractmethoderror$stub:
- .indirect_symbol _exceptions_asm_new_abstractmethoderror
- mflr r0
- bcl 20,31,L00$_exceptions_asm_new_abstractmethoderror
-L00$_exceptions_asm_new_abstractmethoderror:
- mflr r11
- addis r11,r11,ha16(L_exceptions_asm_new_abstractmethoderror$lazy_ptr - L00$_exceptions_asm_new_abstractmethoderror)
- mtlr r0
- lwzu r12,lo16(L_exceptions_asm_new_abstractmethoderror$lazy_ptr - L00$_exceptions_asm_new_abstractmethoderror)(r11)
- mtctr r12
- bctr
-.data
-.lazy_symbol_pointer
-L_exceptions_asm_new_abstractmethoderror$lazy_ptr:
- .indirect_symbol _exceptions_asm_new_abstractmethoderror
- .long dyld_stub_binding_helper
-
-
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align 2
-L_patcher_wrapper$stub:
- .indirect_symbol _patcher_wrapper
- mflr r0
- bcl 20,31,L00$_patcher_wrapper
-L00$_patcher_wrapper:
- mflr r11
- addis r11,r11,ha16(L_patcher_wrapper$lazy_ptr - L00$_patcher_wrapper)
- mtlr r0
- lwzu r12,lo16(L_patcher_wrapper$lazy_ptr - L00$_patcher_wrapper)(r11)
- mtctr r12
- bctr
-.data
-.lazy_symbol_pointer
-L_patcher_wrapper$lazy_ptr:
- .indirect_symbol _patcher_wrapper
- .long dyld_stub_binding_helper
-
-
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align 2
-L_replace_me$stub:
- .indirect_symbol _replace_me
- mflr r0
- bcl 20,31,L00$_replace_me
-L00$_replace_me:
- mflr r11
- addis r11,r11,ha16(L_replace_me$lazy_ptr - L00$_replace_me)
- mtlr r0
- lwzu r12,lo16(L_replace_me$lazy_ptr - L00$_replace_me)(r11)
- mtctr r12
- bctr
-.data
-.lazy_symbol_pointer
-L_replace_me$lazy_ptr:
- .indirect_symbol _replace_me
- .long dyld_stub_binding_helper
-
-#endif /* defined(__DARWIN__) */
-
-
-/* Disable exec-stacks, required for Gentoo ***********************************/
+/* disable exec-stacks ********************************************************/
-#if defined(__GCC__) && defined(__ELF__)
- .section .note.GNU-stack,"",@progbits
+#if defined(__linux__) && defined(__ELF__)
+ .section .note.GNU-stack,"",%progbits
#endif