/* src/vm/jit/powerpc/asmpart.S - Java-C interface functions for PowerPC
- Copyright (C) 1996-2005 R. Grafl, A. Krall, C. Kruegel, C. Oates,
- R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner,
- C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich, J. Wenninger,
- Institut f. Computersprachen - TU Wien
+ Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+ C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
+ E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
+ J. Wenninger, Institut f. Computersprachen - TU Wien
This file is part of CACAO.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- 02111-1307, USA.
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
- Contact: cacao@complang.tuwien.ac.at
+ Contact: cacao@cacaojvm.org
Authors: Andreas Krall
Reinhard Grafl
Stefan Ring
Changes: Christian Thalinger
+ Edwin Steiner
- $Id: asmpart.S 2806 2005-06-23 13:51:06Z twisti $
+ $Id: asmpart.S 4654 2006-03-19 19:46:11Z edwin $
*/
#include "config.h"
+
#include "md-abi.h"
#include "md-asm.h"
+#include "vm/jit/abi.h"
+#include "vm/jit/methodheader.h"
#include "vm/jit/powerpc/offsets.h"
-#include "vm/jit/powerpc/asmoffsets.h"
.text
.align 2
- .globl asm_calljavafunction
- .globl asm_calljavafunction_int
- .globl asm_calljavafunction2
- .globl asm_calljavafunction2int
- .globl asm_calljavafunction2long
- .globl asm_calljavafunction2float
- .globl asm_calljavafunction2double
+/* exported functions and variables *******************************************/
+
+ .globl asm_vm_call_method
+ .globl asm_vm_call_method_int
+ .globl asm_vm_call_method_long
+ .globl asm_vm_call_method_float
+ .globl asm_vm_call_method_double
.globl asm_call_jit_compiler
.globl asm_wrapper_patcher
- .globl asm_builtin_arraycheckcast
- .globl asm_builtin_aastore
-
- .globl asm_builtin_idiv
- .globl asm_builtin_irem
- .globl asm_builtin_ldiv
- .globl asm_builtin_lrem
+ .globl asm_replacement_out
+ .globl asm_replacement_in
.globl asm_cacheflush
.globl asm_initialize_thread_stack
.globl asm_getclassvalues_atomic
-/********************* function asm_calljavafunction ***************************
+/* asm_vm_call_method **********************************************************
* *
* This function calls a Java-method (which possibly needs compilation) *
* with up to 4 address parameters. *
* This functions calls the JIT-compiler which eventually translates the *
* method into machine code. *
* *
-* C-prototype: *
+* C-prototype: *
* javaobject_header *asm_calljavamethod (methodinfo *m, *
-* void *arg1, void *arg2, void *arg3, void *arg4); *
+* void *arg1, void *arg2, void *arg3, void *arg4); *
* *
*******************************************************************************/
.align 2
.long 0 /* catch type all */
- .long calljava_xhandler /* handler pc */
- .long calljava_xhandler /* end pc */
- .long asm_calljavafunction /* start pc */
+ .long calljava_xhandler2 /* handler pc */
+ .long calljava_xhandler2 /* end pc */
+ .long L_asm_vm_call_method /* start pc */
.long 1 /* extable size */
+ .long 0 /* line number table start */
+ .long 0 /* line number table size */
.long 0 /* fltsave */
.long 0 /* intsave */
.long 0 /* isleaf */
.long 0 /* IsSync */
- .long 24 /* frame size */
+ .long 0 /* frame size */
.long 0 /* method pointer (pointer to name) */
- .long 0 /* padding */
-asm_calljavafunction:
-asm_calljavafunction_int:
+asm_vm_call_method:
+asm_vm_call_method_int:
+asm_vm_call_method_long:
+asm_vm_call_method_float:
+asm_vm_call_method_double:
+L_asm_vm_call_method: /* required for PIC code */
mflr r0
- stw r31,-4(r1)
-/* stw r30,-8(r1)*/
- stw pv,-12(r1)
stw r0,LA_LR_OFFSET(r1)
- stwu r1,-148(r1)
-
-#if defined(__DARWIN__)
- bl 0f
-0:
- mflr r31
-#endif
-
- stw t0,40(r1)
- stw t1,44(r1)
- stw t2,48(r1)
- stw t3,52(r1)
- stw t4,56(r1)
- stw t5,60(r1)
- stw t6,64(r1)
- stw t7,68(r1)
-
- stfd ftmp1,72(r1)
- stfd ftmp2,80(r1)
- stfd ft0,88(r1)
- stfd ft1,96(r1)
- stfd ft2,104(r1)
- stfd ft3,112(r1)
- stfd ft4,120(r1)
- stfd ft5,128(r1)
-
- stw a0,36(r1)
- addi itmp1,r1,36
- mr a0,a1
- mr a1,a2
- mr a2,a3
- mr a3,a4
+ stwu r1,-40*4(r1)
#if defined(__DARWIN__)
-/* addis mptr,r31,ha16(_asm_call_jit_compiler-0b)*/
- addi mptr,r31,lo16(asm_call_jit_compiler-0b)
-#else
-/* addi mptr,r31,(asm_call_jit_compiler-0b)@l*/
- lis mptr,asm_call_jit_compiler@ha
- addi mptr,mptr,asm_call_jit_compiler@l
+ stw itmp1,10*4(sp) /* register r11 is callee saved */
#endif
- stw mptr,32(r1)
- addi mptr,r1,28
+ stw pv,11*4(sp) /* save PV register */
- lwz pv,4(mptr)
- mtctr pv
- bctrl
+ stw itmp3,12*4(sp) /* registers r14-r31 are callee saved */
+ stfd ftmp1,14*4(sp) /* registers f14-f31 are callee saved */
+ stfd ftmp2,16*4(sp)
-1:
- mflr itmp1
#if defined(__DARWIN__)
- addi pv,itmp1,lo16(asm_calljavafunction-1b)
+ stw t1,18*4(r1)
+ stw t2,19*4(r1)
+ stw t3,20*4(r1)
+ stw t4,21*4(r1)
+ stw t5,22*4(r1)
+ stw t6,23*4(r1)
+ stw t7,24*4(r1)
+
+ stfd ft0,26*4(r1)
+ stfd ft1,28*4(r1)
+ stfd ft2,30*4(r1)
+ stfd ft3,32*4(r1)
+ stfd ft4,34*4(r1)
+ stfd ft5,36*4(r1)
#else
- addi pv,itmp1,(asm_calljavafunction-1b)@l
+ SAVE_TEMPORARY_REGISTERS(18) /* the offset has to be even */
#endif
-calljava_regrestore:
- lwz t0,40(r1)
- lwz t1,44(r1)
- lwz t2,48(r1)
- lwz t3,52(r1)
- lwz t4,56(r1)
- lwz t5,60(r1)
- lwz t6,64(r1)
- lwz t7,68(r1)
-
- lfd ftmp1,72(r1)
- lfd ftmp2,80(r1)
- lfd ft0,88(r1)
- lfd ft1,96(r1)
- lfd ft2,104(r1)
- lfd ft3,112(r1)
- lfd ft4,120(r1)
- lfd ft5,128(r1)
-
- lwz r0,148+LA_LR_OFFSET(r1)
- mtlr r0
- addi r1,r1,148
- lwz pv,-12(r1)
-/* lwz r30,-8(r1)*/
- lwz r31,-4(r1)
- blr
-
-calljava_xhandler:
- mr r3,itmp1
- bl builtin_throw_exception
- li v0,0 /* return NULL */
- b calljava_regrestore
-
+ stw a0,9*4(r1) /* save method pointer for compiler */
+ mr itmp1,r5 /* pointer to arg block */
+ mr itmp2,r4 /* arg count */
+ addi itmp1,itmp1,-sizevmarg /* initialize pointer (smaller code) */
+ addi itmp2,itmp2,1 /* initialize argument count */
+ li r17,0 /* initialize integer argument counter */
+ li r18,0 /* initialize float argument counter */
- .align 2
+L_register_copy:
+ addi itmp1,itmp1,sizevmarg /* goto next argument block */
+ addi itmp2,itmp2,-1 /* argument count - 1 */
+ mr. itmp2,itmp2
+ beq L_register_copy_done
- .long 0 /* catch type all */
- .long calljava_xhandler2 /* handler pc */
- .long calljava_xhandler2 /* end pc */
- .long asm_calljavafunction2 /* start pc */
- .long 1 /* extable size */
- .long 0 /* fltsave */
- .long 0 /* intsave */
- .long 0 /* isleaf */
- .long 0 /* IsSync */
- .long 24 /* frame size */
- .long 0 /* method pointer (pointer to name) */
- .long 0 /* padding */
+#if WORDS_BIGENDIAN == 1
+ lwz itmp3,offvmargtype+4(itmp1)
+#else
+#error XXX
+#endif
+ andi. r0,itmp3,0x0002 /* is this a float/double type? */
+ bne L_register_handle_float
-asm_calljavafunction2:
-asm_calljavafunction2int:
-asm_calljavafunction2long:
-asm_calljavafunction2float:
-asm_calljavafunction2double:
- mflr r0
- stw r31,-4(r1)
-/* stw r30,-8(r1)*/
- stw pv,-12(r1)
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-148(r1)
- bl 0f
-0:
- mflr r31
-
- stw r16,40(r1)
- stw r17,44(r1)
- stw r18,48(r1)
- stw r19,52(r1)
- stw r20,56(r1)
- stw r21,60(r1)
- stw r22,64(r1)
- stw r23,68(r1)
- stfd fr16,72(r1)
- stfd fr17,80(r1)
- stfd fr18,88(r1)
- stfd fr19,96(r1)
- stfd fr20,104(r1)
- stfd fr21,112(r1)
- stfd fr22,120(r1)
- stfd fr23,128(r1)
-
- stw r3,36(r1) /* save method pointer for compiler */
- mr itmp1,r6 /* pointer to arg block */
- mr itmp2,r4 /* arg count */
+ cmpwi r17,INT_ARG_CNT /* are we out of integer argument */
+ beq L_register_copy /* registers? yes, next loop */
- mr. itmp2,itmp2
- ble calljava_argsloaded
+ andi. r0,itmp3,0x0001 /* is this a long type? */
+ bne L_register_handle_long
- addi itmp2,itmp2,-1
- lwz r3,offjniitem+4(itmp1)
- mr. itmp2,itmp2
- ble calljava_argsloaded
+L_register_handle_int:
+#if defined(__DARWIN__)
+ lis itmp3,ha16(jumptable_int)
+ addi itmp3,itmp3,lo16(jumptable_int)
+#else
+ lis itmp3,jumptable_int@ha
+ addi itmp3,itmp3,jumptable_int@l
+#endif
+ slwi r19,r17,2 /* multiple of 4-bytes */
+ add itmp3,itmp3,r19 /* calculate address of jumptable */
+ lwz itmp3,0(itmp3) /* load function address */
+ addi r17,r17,1 /* integer argument counter + 1 */
+ mtctr itmp3
+ bctr
- addi itmp2,itmp2,-1
- lwz r4,offjniitem+sizejniblock*1+4(itmp1)
- mr. itmp2,itmp2
- ble calljava_argsloaded
- addi itmp2,itmp2,-1
+L_register_handle_long:
+#if defined(__DARWIN__)
+ lis itmp3,ha16(jumptable_long)
+ addi itmp3,itmp3,lo16(jumptable_long)
+#else
+ lis itmp3,jumptable_long@ha
+ addi itmp3,itmp3,jumptable_long@l
+#endif
+ addi r19,r17,1 /* align to even numbers */
+ srwi r19,r19,1
+ slwi r19,r19,1
+ slwi r19,r19,2 /* multiple of 4-bytes */
+ add itmp3,itmp3,r19 /* calculate address of jumptable */
+ lwz itmp3,0(itmp3) /* load function address */
+ addi r17,r17,1 /* integer argument counter + 1 */
+ mtctr itmp3
+ bctr
- addi itmp2,itmp2,-1
- lwz r5,offjniitem+sizejniblock*2+4(itmp1)
- mr. itmp2,itmp2
- ble calljava_argsloaded
- addi itmp2,itmp2,-1
+L_register_handle_float:
+L_register_copy_done:
- addi itmp2,itmp2,-1
- lwz r6,offjniitem+sizejniblock*3+4(itmp1)
- mr. itmp2,itmp2
- ble calljava_argsloaded
- addi itmp2,itmp2,-1
+L_stack_copy_done:
+ lwz itmp1,9*4(sp) /* pass method pointer via tmp1 */
-calljava_argsloaded:
- addi itmp1,r1,36
#if defined(__DARWIN__)
-/* addis mptr,r31,ha16(_asm_call_jit_compiler-0b)*/
- addi mptr,r31,lo16(asm_call_jit_compiler-0b)
+ lis mptr,ha16(L_asm_call_jit_compiler)
+ addi mptr,mptr,lo16(L_asm_call_jit_compiler)
#else
- addi mptr,r31,(asm_call_jit_compiler-0b)@l
+ lis mptr,L_asm_call_jit_compiler@ha
+ addi mptr,mptr,L_asm_call_jit_compiler@l
#endif
- stw mptr,32(r1)
- addi mptr,r1,28
+ stw mptr,8*4(r1)
+ addi mptr,r1,7*4
- lwz pv,4(mptr)
+ lwz pv,1*4(mptr)
mtctr pv
bctrl
1:
mflr itmp1
#if defined(__DARWIN__)
- addi pv,itmp1,lo16(asm_calljavafunction2-1b)
+ addi pv,itmp1,lo16(L_asm_vm_call_method-1b)
+#else
+ addi pv,itmp1,(L_asm_vm_call_method-1b)@l
+#endif
+
+L_asm_vm_call_method_return:
+#if defined(__DARWIN__)
+ lwz itmp1,10*4(sp) /* register r11 is callee saved */
+#endif
+ lwz pv,11*4(sp) /* save PV register */
+
+ lwz itmp3,12*4(sp)
+ lfd ftmp1,14*4(sp) /* registers f14-f31 are callee saved */
+ lfd ftmp2,16*4(sp)
+
+#if defined(__DARWIN__)
+ lwz t1,18*4(r1)
+ lwz t2,19*4(r1)
+ lwz t3,20*4(r1)
+ lwz t4,21*4(r1)
+ lwz t5,22*4(r1)
+ lwz t6,23*4(r1)
+ lwz t7,24*4(r1)
+
+ lfd ft0,26*4(r1)
+ lfd ft1,28*4(r1)
+ lfd ft2,30*4(r1)
+ lfd ft3,32*4(r1)
+ lfd ft4,34*4(r1)
+ lfd ft5,36*4(r1)
#else
- addi pv,itmp1,(asm_calljavafunction2-1b)@l
+ RESTORE_TEMPORARY_REGISTERS(18) /* the offset has to be even */
#endif
-calljava_regrestore2:
- lwz r16,40(r1)
- lwz r17,44(r1)
- lwz r18,48(r1)
- lwz r19,52(r1)
- lwz r20,56(r1)
- lwz r21,60(r1)
- lwz r22,64(r1)
- lwz r23,68(r1)
- lfd fr16,72(r1)
- lfd fr17,80(r1)
- lfd fr18,88(r1)
- lfd fr19,96(r1)
- lfd fr20,104(r1)
- lfd fr21,112(r1)
- lfd fr22,120(r1)
- lfd fr23,128(r1)
-
- lwz r0,148+LA_LR_OFFSET(r1)
+ lwz r0,40*4+LA_LR_OFFSET(r1)
mtlr r0
- addi r1,r1,148
- lwz pv,-12(r1)
-/* lwz r30,-8(r1)*/
- lwz r31,-4(r1)
+ addi r1,r1,40*4
blr
calljava_xhandler2:
mr r3,itmp1
bl builtin_throw_exception
li v0,0 /* return NULL */
- b calljava_regrestore2
+ b L_asm_vm_call_method_return
+
+
+jumptable_int:
+ .long L_handle_a0
+ .long L_handle_a1
+ .long L_handle_a2
+ .long L_handle_a3
+ .long L_handle_a4
+ .long L_handle_a5
+ .long L_handle_a6
+ .long L_handle_a7
+
+L_handle_a0:
+ lwz a0,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a1:
+ lwz a1,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a2:
+ lwz a2,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a3:
+ lwz a3,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a4:
+ lwz a4,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a5:
+ lwz a5,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a6:
+ lwz a6,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a7:
+ lwz a7,offvmargdata+4(itmp1)
+ b L_register_copy
+
+
+jumptable_long:
+#if defined(__DARWIN__)
+#else
+ /* we have two entries here, so we get the even argument register
+ alignment for linux */
+
+ .long L_handle_a0_a1
+ .long 0
+ .long L_handle_a2_a3
+ .long 0
+ .long L_handle_a4_a5
+ .long 0
+ .long L_handle_a6_a7
+ .long 0
+#endif
+
+L_handle_a0_a1:
+ lwz a0,offvmargdata+0(itmp1)
+ lwz a1,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a2_a3:
+ lwz a2,offvmargdata+0(itmp1)
+ lwz a3,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a4_a5:
+ lwz a4,offvmargdata+0(itmp1)
+ lwz a5,offvmargdata+4(itmp1)
+ b L_register_copy
+L_handle_a6_a7:
+ lwz a6,offvmargdata+0(itmp1)
+ lwz a7,offvmargdata+4(itmp1)
+ b L_register_copy
/* asm_call_jit_compiler *******************************************************
*******************************************************************************/
asm_call_jit_compiler:
- stw itmp1,-8(r1)
- mflr itmp1
- stw r31,-4(r1)
- stw r29,-12(r1)
- stw itmp1,LA_LR_OFFSET(r1)
- stwu r1,-(LA_SIZE + 1*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)(r1)
- mr r31,pv
+L_asm_call_jit_compiler: /* required for PIC code */
+ mflr r0
+ stw r0,LA_LR_OFFSET(r1) /* save return address */
+ stwu r1,-((LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo)(r1)
+ stw itmp1,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 1*4)(r1)
+ mr itmp1,r0 /* save return address to other reg. */
lwz itmp3,-12(itmp1)
srwi itmp3,itmp3,16
andi. itmp3,itmp3,31
add mptr,mptr,itmp3
noregchange:
- lwz itmp1,(LA_SIZE + 1*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 1*4)(r1)
- mr r29,mptr
-
-#if 1
- stw a0,28(r1)
- stw a1,32(r1)
- stw a2,36(r1)
- stw a3,40(r1)
- stw a4,44(r1)
- stw a5,48(r1)
- stw a6,52(r1)
-
- stfd fa0,56(r1)
- stfd fa1,64(r1)
- stfd fa2,72(r1)
- stfd fa3,80(r1)
- stfd fa4,88(r1)
- stfd fa5,96(r1)
- stfd fa6,104(r1)
- stfd fa7,112(r1)
- stfd fa8,120(r1)
- stfd fa9,128(r1)
- stfd fa10,136(r1)
- stfd fa11,144(r1)
- stfd fa12,152(r1)
-
- stw r10,160(r1)
+ stw mptr,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 2*4)(r1)
+
+#if defined(__DARWIN__)
+ stw a0,(LA_WORD_SIZE+5+0)*4(r1)
+ stw a1,(LA_WORD_SIZE+5+1)*4(r1)
+ stw a2,(LA_WORD_SIZE+5+2)*4(r1)
+ stw a3,(LA_WORD_SIZE+5+3)*4(r1)
+ stw a4,(LA_WORD_SIZE+5+4)*4(r1)
+ stw a5,(LA_WORD_SIZE+5+5)*4(r1)
+ stw a6,(LA_WORD_SIZE+5+6)*4(r1)
+ stw a7,(LA_WORD_SIZE+5+7)*4(r1)
+
+ stfd fa0,(LA_WORD_SIZE+5+8)*4(r1)
+ stfd fa1,(LA_WORD_SIZE+5+10)*4(r1)
+ stfd fa2,(LA_WORD_SIZE+5+12)*4(r1)
+ stfd fa3,(LA_WORD_SIZE+5+14)*4(r1)
+ stfd fa4,(LA_WORD_SIZE+5+16)*4(r1)
+ stfd fa5,(LA_WORD_SIZE+5+18)*4(r1)
+ stfd fa6,(LA_WORD_SIZE+5+20)*4(r1)
+ stfd fa7,(LA_WORD_SIZE+5+22)*4(r1)
+ stfd fa8,(LA_WORD_SIZE+5+24)*4(r1)
+ stfd fa9,(LA_WORD_SIZE+5+26)*4(r1)
+ stfd fa10,(LA_WORD_SIZE+5+28)*4(r1)
+ stfd fa11,(LA_WORD_SIZE+5+30)*4(r1)
+ stfd fa12,(LA_WORD_SIZE+5+32)*4(r1)
#else
- SAVE_ARGUMENT_REGISTERS(LA_WORD_SIZE + 1)
+ SAVE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1)
#endif
- lwz r3,0(itmp1)
- bl jit_compile
- mr pv,r3
-
- mr mptr,r29
-
-#if 1
- lwz a0,28(r1)
- lwz a1,32(r1)
- lwz a2,36(r1)
- lwz a3,40(r1)
- lwz a4,44(r1)
- lwz a5,48(r1)
- lwz a6,52(r1)
-
- lfd fa0,56(r1)
- lfd fa1,64(r1)
- lfd fa2,72(r1)
- lfd fa3,80(r1)
- lfd fa4,88(r1)
- lfd fa5,96(r1)
- lfd fa6,104(r1)
- lfd fa7,112(r1)
- lfd fa8,120(r1)
- lfd fa9,128(r1)
- lfd fa10,136(r1)
- lfd fa11,144(r1)
- lfd fa12,152(r1)
-
- lwz r10,160(r1)
+ addi a0,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)
+ li a1,0 /* we don't have pv handy */
+ addi a2,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo
+ lwz a3,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo+LA_LR_OFFSET(sp)
+ mr a4,a3 /* xpc is equal to ra */
+ bl stacktrace_create_extern_stackframeinfo
+
+ lwz a0,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 1*4)(r1)
+ bl jit_compile /* compile the Java method */
+ mr pv,r3 /* move address to pv register */
+
+ addi a0,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)
+ bl stacktrace_remove_stackframeinfo
+
+#if defined(__DARWIN__)
+ lwz a0,(LA_WORD_SIZE+5+0)*4(r1)
+ lwz a1,(LA_WORD_SIZE+5+1)*4(r1)
+ lwz a2,(LA_WORD_SIZE+5+2)*4(r1)
+ lwz a3,(LA_WORD_SIZE+5+3)*4(r1)
+ lwz a4,(LA_WORD_SIZE+5+4)*4(r1)
+ lwz a5,(LA_WORD_SIZE+5+5)*4(r1)
+ lwz a6,(LA_WORD_SIZE+5+6)*4(r1)
+ lwz a7,(LA_WORD_SIZE+5+7)*4(r1)
+
+ lfd fa0,(LA_WORD_SIZE+5+8)*4(r1)
+ lfd fa1,(LA_WORD_SIZE+5+10)*4(r1)
+ lfd fa2,(LA_WORD_SIZE+5+12)*4(r1)
+ lfd fa3,(LA_WORD_SIZE+5+14)*4(r1)
+ lfd fa4,(LA_WORD_SIZE+5+16)*4(r1)
+ lfd fa5,(LA_WORD_SIZE+5+18)*4(r1)
+ lfd fa6,(LA_WORD_SIZE+5+20)*4(r1)
+ lfd fa7,(LA_WORD_SIZE+5+22)*4(r1)
+ lfd fa8,(LA_WORD_SIZE+5+24)*4(r1)
+ lfd fa9,(LA_WORD_SIZE+5+26)*4(r1)
+ lfd fa10,(LA_WORD_SIZE+5+28)*4(r1)
+ lfd fa11,(LA_WORD_SIZE+5+30)*4(r1)
+ lfd fa12,(LA_WORD_SIZE+5+32)*4(r1)
#else
- RESTORE_ARGUMENT_REGISTERS(LA_WORD_SIZE + 1)
+ RESTORE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1)
#endif
- lwz itmp1,(LA_SIZE + 1*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+LA_LR_OFFSET(r1)
+ lwz mptr,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 2*4)(r1)
+
+ lwz itmp1,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo+LA_LR_OFFSET(r1)
+ mtlr itmp1
+ addi r1,r1,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo
+
+ mr. pv,pv /* test for exception */
+ beq L_asm_call_jit_compiler_exception
+
lwz itmp3,-12(itmp1)
extsh itmp3,itmp3
add mptr,mptr,itmp3
- stw pv,0(mptr)
+ stw pv,0(mptr) /* store method address */
- mtctr pv
+ mtctr pv /* move method address to control reg */
+ bctr /* and call the Java method */
- lwz r0,(LA_SIZE + 1*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+LA_LR_OFFSET(r1)
- mtlr r0
- addi r1,r1,(LA_SIZE + 1*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)
- lwz r29,-12(r1)
-/* lwz pv,-8(r1)*/
- lwz r31,-4(r1)
- bctr
+L_asm_call_jit_compiler_exception:
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ mflr r0
+ stw r0,LA_LR_OFFSET(sp)
+ stwu sp,-LA_SIZE_ALIGNED(sp) /* preserve linkage area */
+ bl builtin_asm_get_exceptionptrptr
+ lwz r0,LA_SIZE_ALIGNED+LA_LR_OFFSET(sp)
+ mtlr r0
+ addi sp,sp,LA_SIZE_ALIGNED
+#else
+# if defined(__DARWIN__)
+ lwz v0,lo16(_no_threads_exceptionptr-0b)(pv)
+# else
+ lis v0,_no_threads_exceptionptr@ha
+ addi v0,v0,_no_threads_exceptionptr@l
+# endif
+#endif
+ lwz xptr,0(v0) /* get the exception pointer */
+ li itmp3,0
+ stw itmp3,0(v0) /* clear the exception pointer */
+
+ mflr xpc
+ addi xpc,xpc,-4
+ b L_asm_handle_nat_exception
/********************* function asm_handle_exception ***************************
*******************************************************************************/
asm_handle_nat_exception:
+L_asm_handle_nat_exception: /* required for PIC code */
mflr r9
lwz itmp3,4(r9)
extsh itmp3,itmp3
lwz itmp3,8(r9)
srwi itmp3,itmp3,16
cmpwi itmp3,0x3dad
- bne asm_handle_exception
+ bne L_asm_handle_exception
lwz itmp3,8(r9)
slwi itmp3,itmp3,16
add pv,pv,itmp3
asm_handle_exception:
- addi r1,r1,-18*4
- stw r0,0*4(r1)
- stw r2,1*4(r1)
- stw r3,2*4(r1)
- stw r4,3*4(r1)
- stw r5,4*4(r1)
- stw r6,5*4(r1)
- stw r7,6*4(r1)
- stw r8,7*4(r1)
- stw r9,8*4(r1)
- stw r10,9*4(r1)
- stw r16,10*4(r1)
- stw r17,11*4(r1)
- stw r18,12*4(r1)
- stw r19,13*4(r1)
- stw r20,14*4(r1)
- stw r21,15*4(r1)
- stw r22,16*4(r1)
- stw r23,17*4(r1)
-
- li r9,1
-ex_stack_loop:
- addi r1,r1,-4*4 /* allocate stack */
- stw xptr,0*4(r1) /* save used register */
- stw xpc,1*4(r1)
- mflr xptr
- stw xptr,2*4(r1)
- stw r9,3*4(r1)
-
- lwz r3,0*4(r1) /* exception pointer */
- lwz r4,MethodPointer(pv) /* method pointer */
- mr r5,xpc /* exception pc */
-/* mr r6,r9 */
- li r6,0 /* line number */
- li r7,4 /* set no unwind flag */
-
- /* XXX no valid stack frame chaining here */
- addi r1,r1,-(24+5*4) /* 24 linkage area + 5 argument * 4 */
- bl builtin_trace_exception
- addi r1,r1,(24+5*4)
-
- lwz xptr,2*4(r1)
- mtlr xptr
- lwz xptr,0*4(r1) /* restore xptr */
- lwz xpc,1*4(r1)
- lwz r9,3*4(r1)
- addi r1,r1,4*4
-
- lwz r3,ExTableSize(pv) /* r3 = exception table size */
- mr. r3,r3 /* if empty table skip */
- beq empty_table
-
- addi r4,pv,ExTableStart /* r4 = start of exception table */
-
-ex_table_loop:
- lwz r5,ExStartPC(r4) /* r5 = exception start pc */
- cmplw r5,xpc /* (startpc <= xpc) */
- bgt ex_table_cont
- lwz r5,ExEndPC(r4) /* r5 = exception end pc */
- cmplw xpc,r5 /* (xpc < endpc) */
- bge ex_table_cont
- lwz r7,ExCatchType(r4) /* r7 = exception catch type */
- mr. r7,r7
- beq ex_handle_it
-
- lwz itmp3,offclassloaded(r7)
- mr. itmp3,itmp3
- bne L_class_loaded
-
- /* XXX no valid stack frame chaining here */
- addi r1,r1,-16*4 /* allocate stack */
- stw r3,7*4(r1) /* save used registers */
- stw r4,8*4(r1) /* 6*4 (linkage) + 1*4 (arg1) + 7*4 (save) */
- stw r9,9*4(r1)
- stw xptr,10*4(r1)
- stw xpc,11*4(r1)
- mflr xptr
- stw xptr,12*4(r1)
- stw r7,13*4(r1)
-
- mr r3,r7 /* arg1 = exceptionclass */
- bl load_class_bootstrap
-
- lwz r3,7*4(r1)
- lwz r4,8*4(r1)
- lwz r9,9*4(r1)
- lwz xptr,10*4(r1)
- lwz xpc,11*4(r1)
- lwz itmp3,12*4(r1)
- mtlr itmp3
- lwz r7,13*4(r1)
- addi r1,r1,16*4
-
-L_class_loaded:
- lwz itmp3,offclasslinked(r7)
- mr. itmp3,itmp3
- /* XXX no valid stack frame chaining here */
- addi r1,r1,-16*4 /* allocate stack */
- stw r7,13*4(r1)
- bne L_class_linked
-
- stw r3,7*4(r1) /* save used registers */
- stw r4,8*4(r1) /* 6*4 (linkage) + 1*4 (arg1) + 7*4 (save) */
- stw r9,9*4(r1)
- stw xptr,10*4(r1)
- stw xpc,11*4(r1)
- mflr xptr
- stw xptr,12*4(r1)
-
- mr r3,r7 /* arg1 = exceptionclass */
- bl link_class
-
- lwz r3,7*4(r1)
- lwz r4,8*4(r1)
- lwz r9,9*4(r1)
- lwz xptr,10*4(r1)
- lwz xpc,11*4(r1)
- lwz itmp3,12*4(r1)
- mtlr itmp3
-
-L_class_linked:
-_crit_restart1:
- lwz r7,13*4(r1)
-_crit_begin1:
- lwz r6,offobjvftbl(xptr) /* r6 = vftblptr(xptr) */
- lwz r7,offclassvftbl(r7) /* r7 = vftblptr(catchtype) class (not obj) */
- lwz r6,offbaseval(r6) /* r6 = baseval(xptr) */
- lwz r8,offbaseval(r7) /* r8 = baseval(catchtype) */
- lwz r7,offdiffval(r7) /* r7 = diffval(catchtype) */
-_crit_end1:
- subf r6,r8,r6 /* r6 = baseval(xptr) - baseval(catchtype) */
- cmplw r6,r7 /* xptr is instanceof catchtype */
- addi r1,r1,16*4
- bgt ex_table_cont /* if (false) continue */
-
-ex_handle_it:
- lwz xpc,ExHandlerPC(r4) /* xpc = exception handler pc */
- mr. r9,r9
- beq ex_jump
-
- lwz r0,0*4(r1)
- lwz r2,1*4(r1)
- lwz r3,2*4(r1)
- lwz r4,3*4(r1)
- lwz r5,4*4(r1)
- lwz r6,5*4(r1)
- lwz r7,6*4(r1)
- lwz r8,7*4(r1)
- lwz r9,8*4(r1)
- lwz r10,9*4(r1)
- lwz r16,10*4(r1)
- lwz r17,11*4(r1)
- lwz r18,12*4(r1)
- lwz r19,13*4(r1)
- lwz r20,14*4(r1)
- lwz r21,15*4(r1)
- lwz r22,16*4(r1)
- lwz r23,17*4(r1)
- addi r1,r1,18*4
-
-ex_jump:
- mtctr xpc
- bctr
+L_asm_handle_exception: /* required for PIC code */
+ addi sp,sp,-(ARG_CNT+TMP_CNT)*8 /* create maybe-leaf stackframe */
+
+#if defined(__DARWIN__)
+#else
+ SAVE_ARGUMENT_REGISTERS(0) /* we save arg and temp registers in */
+ SAVE_TEMPORARY_REGISTERS(ARG_CNT) /* case this is a leaf method */
+#endif
+
+ li a3,(ARG_CNT+TMP_CNT)*8 /* prepare a3 for handle_exception */
+ li a4,1 /* set maybe-leaf flag */
+
+L_asm_handle_exception_stack_loop:
+ addi sp,sp,-(LA_WORD_SIZE+4+5)*4 /* allocate stack */
+ stw xptr,LA_SIZE+4*4(sp) /* save exception pointer */
+ stw xpc,LA_SIZE+5*4(sp) /* save exception pc */
+ stw pv,LA_SIZE+6*4(sp) /* save data segment pointer */
+ mflr r0 /* save return address */
+ stw r0,LA_SIZE+5*4(sp)
+ add a3,a3,sp /* calculate Java sp into a3... */
+ addi a3,a3,(LA_WORD_SIZE+4+5)*4
+ stw a4,LA_SIZE+8*4(sp) /* save maybe-leaf flag */
+
+ mr a0,xptr /* pass exception pointer */
+ mr a1,xpc /* pass exception pc */
+ mr a2,pv /* pass data segment pointer */
+ /* a3 is still set */
+ bl exceptions_handle_exception
+
+ mr. v0,v0
+ beq L_asm_handle_exception_not_catched
+
+ mr xpc,v0 /* move handlerpc into xpc */
+ lwz xptr,LA_SIZE+4*4(sp) /* restore exception pointer */
+ lwz pv,LA_SIZE+6*4(sp) /* restore data segment pointer */
+ lwz r0,LA_SIZE+5*4(sp) /* restore return address */
+ mtlr r0
+ lwz a4,LA_SIZE+8*4(sp) /* get maybe-leaf flag */
+ addi sp,sp,(LA_WORD_SIZE+4+5)*4 /* free stack frame */
+
+ mr. a4,a4
+ beq L_asm_handle_exception_no_leaf
-ex_table_cont:
- addi r4,r4,ExEntrySize /* next exception table entry */
- addic. r3,r3,-1 /* decrement entry counter */
- bgt ex_table_loop /* if (t0 > 0) next entry */
-
-empty_table:
- mr. r9,r9 /* if here the first time, then */
- beq ex_already_cleared
- addi r1,r1,18*4 /* deallocate stack and */
- li r9,0 /* clear the no unwind flag */
-ex_already_cleared:
- lwz r3,IsSync(pv) /* t0 = SyncOffset */
- mr. r3,r3
- beq no_monitor_exit /* if zero no monitorexit */
-
-#if defined(USE_THREADS)
- add r3,r1,r3
- lwz r6,-4(r3)
-
- addi r1,r1,-6*4
- stw r3,0*4(r1)
- stw r4,1*4(r1)
- stw r9,2*4(r1)
- stw xptr,3*4(r1)
- stw xpc,4*4(r1)
- mflr xptr
- stw xptr,5*4(r1)
-
- mr r3,r6
- /* XXX no valid stack frame chaining here */
- addi r1,r1,-40
- bl builtin_monitorexit
- addi r1,r1,40
-
- lwz xptr,5*4(r1)
- mtlr xptr
- lwz r3,0*4(r1)
- lwz r4,1*4(r1)
- lwz r9,2*4(r1)
- lwz xptr,3*4(r1)
- lwz xpc,4*4(r1)
- addi r1,r1,6*4
+#if defined(__DARWIN__)
+#else
+ RESTORE_ARGUMENT_REGISTERS(0) /* if this is a leaf method, we have */
+ RESTORE_TEMPORARY_REGISTERS(ARG_CNT)/* to restore arg and temp registers */
#endif
-no_monitor_exit:
- lwz r3,FrameSize(pv) /* r3 = frame size */
- add r1,r1,r3 /* unwind stack */
- mr r3,r1 /* r3 = pointer to save area */
- lwz r4,IsLeaf(pv) /* r4 = is leaf procedure */
- mr. r4,r4
- bne ex_no_restore /* if (leaf) skip */
- lwz r4,LA_LR_OFFSET(r3) /* restore ra */
- mtlr r4 /* t0-- */
-ex_no_restore:
- mflr r4 /* the new xpc is ra */
- mr xpc,r4
- lwz r4,IntSave(pv) /* r4 = saved int register count */
+ addi sp,sp,(ARG_CNT+TMP_CNT)*8 /* remove maybe-leaf stackframe */
+
+L_asm_handle_exception_no_leaf:
+ mtctr xpc /* jump to the handler */
+ bctr
+
+L_asm_handle_exception_not_catched:
+ lwz xptr,LA_SIZE+4*4(sp) /* restore exception pointer */
+ lwz pv,LA_SIZE+6*4(sp) /* restore data segment pointer */
+ lwz r0,LA_SIZE+5*4(sp) /* restore return address */
+ mtlr r0
+ lwz a4,LA_SIZE+8*4(sp) /* get maybe-leaf flag */
+ addi sp,sp,(LA_WORD_SIZE+4+5)*4 /* free stack frame */
+
+ mr. a4,a4
+ beq L_asm_handle_exception_no_leaf_stack
+
+ addi sp,sp,(ARG_CNT+TMP_CNT)*8 /* remove maybe-leaf stackframe */
+ li a4,0 /* clear the maybe-leaf flag */
+
+L_asm_handle_exception_no_leaf_stack:
+ lwz t0,FrameSize(pv) /* get frame size */
+ add t0,sp,t0 /* pointer to save area */
+
+ lwz t1,IsLeaf(pv) /* is leaf procedure */
+ mr. t1,t1
+ bne L_asm_handle_exception_no_ra_restore
+
+ lwz r0,LA_LR_OFFSET(t0) /* restore ra */
+ mtlr r0
+
+L_asm_handle_exception_no_ra_restore:
+ mflr xpc /* the new xpc is ra */
+ lwz t1,IntSave(pv) /* t1 = saved int register count */
bl ex_int1
ex_int1:
- mflr r5
+ mflr t2 /* t2 = current pc */
#if defined(__DARWIN__)
- addi r5,r5,lo16(ex_int2-ex_int1)
+ addi t2,t2,lo16(ex_int2-ex_int1)
#else
- addi r5,r5,(ex_int2-ex_int1)@l
+ addi t2,t2,(ex_int2-ex_int1)@l
#endif
- slwi r4,r4,2
- subf r5,r4,r5
- mtctr r5
+ slwi t1,t1,2 /* t1 = register count * 4 */
+ subf t2,t1,t2 /* t2 = IntSave - t1 */
+ mtctr t2
bctr
- lwz s0,-40(r3)
- lwz s1,-36(r3)
- lwz s2,-32(r3)
- lwz s3,-28(r3)
- lwz s4,-24(r3)
- lwz s5,-20(r3)
- lwz s6,-16(r3)
- lwz s7,-12(r3)
- lwz s8,-8(r3)
- lwz s9,-4(r3)
+
+ lwz s0,-10*4(t0)
+ lwz s1,-9*4(t0)
+ lwz s2,-8*4(t0)
+ lwz s3,-7*4(t0)
+ lwz s4,-6*4(t0)
+ lwz s5,-5*4(t0)
+ lwz s6,-4*4(t0)
+ lwz s7,-3*4(t0)
+ lwz s8,-2*4(t0)
+ lwz s9,-1*4(t0)
ex_int2:
- subf r3,r4,r3
+ subf t0,t1,t0 /* t0 = t0 - register count * 4 */
- lwz r4,FltSave(pv)
+ lwz t1,FltSave(pv)
bl ex_flt1
ex_flt1:
- mflr r5
+ mflr t2
#if defined(__DARWIN__)
- addi r5,r5,lo16(ex_flt2-ex_flt1)
+ addi t2,t2,lo16(ex_flt2-ex_flt1)
#else
- addi r5,r5,(ex_flt2-ex_flt1)@l
+ addi t2,t2,(ex_flt2-ex_flt1)@l
#endif
- slwi r4,r4,2
- subf r5,r4,r5
- mtctr r5
+ slwi t1,t1,2 /* t1 = register count * 4 */
+ subf t2,t1,t2 /* t2 = FltSave - t1 */
+ mtctr t2
bctr
- lfd fs0,-80(r3)
- lfd fs1,-72(r3)
- lfd fs2,-64(r3)
- lfd fs3,-56(r3)
- lfd fs4,-48(r3)
- lfd fs5,-40(r3)
- lfd fs6,-32(r3)
- lfd fs7,-24(r3)
- lfd fs8,-16(r3)
- lfd fs9,-8(r3)
+
+ lfd fs0,-10*8(t0)
+ lfd fs1,-9*8(t0)
+ lfd fs2,-8*8(t0)
+ lfd fs3,-7*8(t0)
+ lfd fs4,-6*8(t0)
+ lfd fs5,-5*8(t0)
+ lfd fs6,-4*8(t0)
+ lfd fs7,-3*8(t0)
+ lfd fs8,-2*8(t0)
+ lfd fs9,-1*8(t0)
ex_flt2:
+ lwz t0,FrameSize(pv) /* get frame size */
+ add sp,sp,t0 /* unwind stack */
+ li a3,0 /* prepare a3 for handle_exception */
+
mtlr xpc
lwz itmp3,4(xpc)
extsh itmp3,itmp3
lwz itmp3,8(xpc)
srwi itmp3,itmp3,16
cmpwi itmp3,0x3dad
- bne ex_stack_loop
+ bne L_asm_handle_exception_stack_loop
lwz itmp3,8(xpc)
slwi itmp3,itmp3,16
add pv,pv,itmp3
- b ex_stack_loop
+
+ b L_asm_handle_exception_stack_loop
/* asm_wrapper_patcher *********************************************************
XXX
Stack layout:
- 16 return address into JIT code (patch position)
- 12 pointer to virtual java_objectheader
- 8 machine code (which is patched back later)
- 4 unresolved class/method/field reference
- 0 patcher function pointer to call
+ 20 return address into JIT code (patch position)
+ 16 pointer to virtual java_objectheader
+ 12 machine code (which is patched back later)
+ 8 unresolved class/method/field reference
+ 4 data segment displacement from load instructions
+ 0 patcher function pointer to call (pv is saved here afterwards)
*******************************************************************************/
asm_wrapper_patcher:
- mflr r0
- stw r0,8*4+LA_LR_OFFSET(r1) /* skip stack frame of patcher stub */
- stwu r1,-((6+1+37)*4)(r1) /* keep stack 16-bytes aligned: 6+1+37 = 44 */
-
-#if 1
- stw a0,(6+1+0)*4(r1) /* save argument registers */
- stw a1,(6+1+1)*4(r1) /* preserve linkage area (24 bytes) */
- stw a2,(6+1+2)*4(r1) /* and 4 bytes for 1 argument */
- stw a3,(6+1+3)*4(r1)
- stw a4,(6+1+4)*4(r1)
- stw a5,(6+1+5)*4(r1)
- stw a6,(6+1+6)*4(r1)
- stw a7,(6+1+7)*4(r1)
-
- stfd fa0,(6+1+8)*4(sp)
- stfd fa1,(6+1+10)*4(sp)
- stfd fa2,(6+1+12)*4(sp)
- stfd fa3,(6+1+14)*4(sp)
- stfd fa4,(6+1+16)*4(sp)
- stfd fa5,(6+1+18)*4(sp)
- stfd fa6,(6+1+20)*4(sp)
- stfd fa7,(6+1+22)*4(sp)
- stfd fa8,(6+1+24)*4(sp)
- stfd fa9,(6+1+26)*4(sp)
- stfd fa10,(6+1+28)*4(sp)
- stfd fa11,(6+1+30)*4(sp)
- stfd fa12,(6+1+32)*4(sp)
+ mflr r0 /* get Java return address (leaf) */
+ stw r0,6*4(sp) /* store it in the stub stackframe */
+ /* keep stack 16-bytes aligned: 6+1+37 = 44 */
+ stwu sp,-(LA_SIZE+(5+58)*4+sizestackframeinfo)(sp)
+
+#if defined(__DARWIN__)
+ stw a0,LA_SIZE+(5+0)*4(r1) /* save argument registers */
+ stw a1,LA_SIZE+(5+1)*4(r1) /* preserve linkage area (24 bytes) */
+ stw a2,LA_SIZE+(5+2)*4(r1) /* and 4 bytes for 4 argument */
+ stw a3,LA_SIZE+(5+3)*4(r1)
+ stw a4,LA_SIZE+(5+4)*4(r1)
+ stw a5,LA_SIZE+(5+5)*4(r1)
+ stw a6,LA_SIZE+(5+6)*4(r1)
+ stw a7,LA_SIZE+(5+7)*4(r1)
+
+ stfd fa0,LA_SIZE+(5+8)*4(sp)
+ stfd fa1,LA_SIZE+(5+10)*4(sp)
+ stfd fa2,LA_SIZE+(5+12)*4(sp)
+ stfd fa3,LA_SIZE+(5+14)*4(sp)
+ stfd fa4,LA_SIZE+(5+16)*4(sp)
+ stfd fa5,LA_SIZE+(5+18)*4(sp)
+ stfd fa6,LA_SIZE+(5+20)*4(sp)
+ stfd fa7,LA_SIZE+(5+22)*4(sp)
+ stfd fa8,LA_SIZE+(5+24)*4(sp)
+ stfd fa9,LA_SIZE+(5+26)*4(sp)
+ stfd fa10,LA_SIZE+(5+28)*4(sp)
+ stfd fa11,LA_SIZE+(5+30)*4(sp)
+ stfd fa12,LA_SIZE+(5+32)*4(sp)
+
+ stw t0,(LA_WORD_SIZE+5+33)*4(r1)
+ stw t1,(LA_WORD_SIZE+5+34)*4(r1)
+ stw t2,(LA_WORD_SIZE+5+35)*4(r1)
+ stw t3,(LA_WORD_SIZE+5+36)*4(r1)
+ stw t4,(LA_WORD_SIZE+5+37)*4(r1)
+ stw t5,(LA_WORD_SIZE+5+38)*4(r1)
+ stw t6,(LA_WORD_SIZE+5+39)*4(r1)
+ stw t7,(LA_WORD_SIZE+5+40)*4(r1)
+
+ stfd ft0,(LA_WORD_SIZE+5+42)*4(r1)
+ stfd ft1,(LA_WORD_SIZE+5+44)*4(r1)
+ stfd ft2,(LA_WORD_SIZE+5+46)*4(r1)
+ stfd ft3,(LA_WORD_SIZE+5+48)*4(r1)
+ stfd ft4,(LA_WORD_SIZE+5+50)*4(r1)
+ stfd ft5,(LA_WORD_SIZE+5+52)*4(r1)
#else
- SAVE_ARGUMENT_REGISTERS(6+1) /* save 8 int/13 float argument registers */
+ SAVE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1) /* save 8 int/8 float arguments */
+ SAVE_TEMPORARY_REGISTERS(LA_WORD_SIZE+1+24)
#endif
-#if 0
- stw r2,25*8(r1)
- stw r16,26*8(r1)
- stw r17,27*8(r1)
- stw r18,28*8(r1)
- stw r19,29*8(r1)
- stw r20,30*8(r1)
- stw r21,31*8(r1)
- stw r22,32*8(r1)
- stw r23,33*8(r1)
-#endif
+ stw itmp1,LA_SIZE+(5+54)*4(sp)
+ stw itmp2,LA_SIZE+(5+55)*4(sp)
+ stw pv,LA_SIZE+(5+56)*4(sp)
+
+ addi a0,sp,LA_SIZE+(5+58)*4 /* create stackframe info */
+ mr a1,pv
+ addi a2,sp,(8+LA_WORD_SIZE+5+58)*4+sizestackframeinfo
+ mr a3,r0 /* this is correct for leafs */
+ lwz a4,((5+LA_WORD_SIZE+5+58)*4+sizestackframeinfo)(sp) /* pass xpc */
+ bl stacktrace_create_extern_stackframeinfo
+
+ addi a0,sp,(0+LA_WORD_SIZE+5+58)*4+sizestackframeinfo /* pass sp */
+ lwz pv,(0+LA_WORD_SIZE+5+58)*4+sizestackframeinfo(sp) /* get function */
+ lwz itmp1,LA_SIZE+(5+56)*4(sp) /* move pv to position of fp */
+ stw itmp1,(0+LA_WORD_SIZE+5+58)*4+sizestackframeinfo(sp)
+ mtctr pv /* call the patcher function */
+ bctrl
+ stw v0,LA_SIZE+(5+57)*4(sp) /* save return value */
- stw itmp1,(6+1+34)*4(sp)
- stw itmp2,(6+1+35)*4(sp)
- stw pv,(6+1+36)*4(sp)
+ addi a0,sp,LA_SIZE+(5+58)*4
+ bl stacktrace_remove_stackframeinfo /* remove stackframe info */
- addi a0,sp,(1+6+1+37)*4 /* pass sp, skip patcher function pointer */
- lwz pv,(0+6+1+37)*4(sp) /* get function pointer */
- mtctr pv /* call the patcher function */
- bctrl
- mr itmp3,r3 /* save return value in temp register */
-
-#if 1
- lwz a0,(6+1+0)*4(r1)
- lwz a1,(6+1+1)*4(r1)
- lwz a2,(6+1+2)*4(r1)
- lwz a3,(6+1+3)*4(r1)
- lwz a4,(6+1+4)*4(r1)
- lwz a5,(6+1+5)*4(r1)
- lwz a6,(6+1+6)*4(r1)
- lwz a7,(6+1+7)*4(r1)
-
- lfd fa0,(6+1+8)*4(sp)
- lfd fa1,(6+1+10)*4(sp)
- lfd fa2,(6+1+12)*4(sp)
- lfd fa3,(6+1+14)*4(sp)
- lfd fa4,(6+1+16)*4(sp)
- lfd fa5,(6+1+18)*4(sp)
- lfd fa6,(6+1+20)*4(sp)
- lfd fa7,(6+1+22)*4(sp)
- lfd fa8,(6+1+24)*4(sp)
- lfd fa9,(6+1+26)*4(sp)
- lfd fa10,(6+1+28)*4(sp)
- lfd fa11,(6+1+30)*4(sp)
- lfd fa12,(6+1+32)*4(sp)
+#if defined(__DARWIN__)
+ lwz a0,LA_SIZE+(5+0)*4(r1)
+ lwz a1,LA_SIZE+(5+1)*4(r1)
+ lwz a2,LA_SIZE+(5+2)*4(r1)
+ lwz a3,LA_SIZE+(5+3)*4(r1)
+ lwz a4,LA_SIZE+(5+4)*4(r1)
+ lwz a5,LA_SIZE+(5+5)*4(r1)
+ lwz a6,LA_SIZE+(5+6)*4(r1)
+ lwz a7,LA_SIZE+(5+7)*4(r1)
+
+ lfd fa0,LA_SIZE+(5+8)*4(sp)
+ lfd fa1,LA_SIZE+(5+10)*4(sp)
+ lfd fa2,LA_SIZE+(5+12)*4(sp)
+ lfd fa3,LA_SIZE+(5+14)*4(sp)
+ lfd fa4,LA_SIZE+(5+16)*4(sp)
+ lfd fa5,LA_SIZE+(5+18)*4(sp)
+ lfd fa6,LA_SIZE+(5+20)*4(sp)
+ lfd fa7,LA_SIZE+(5+22)*4(sp)
+ lfd fa8,LA_SIZE+(5+24)*4(sp)
+ lfd fa9,LA_SIZE+(5+26)*4(sp)
+ lfd fa10,LA_SIZE+(5+28)*4(sp)
+ lfd fa11,LA_SIZE+(5+30)*4(sp)
+ lfd fa12,LA_SIZE+(5+32)*4(sp)
+
+ lwz t0,(LA_WORD_SIZE+5+33)*4(r1)
+ lwz t1,(LA_WORD_SIZE+5+34)*4(r1)
+ lwz t2,(LA_WORD_SIZE+5+35)*4(r1)
+ lwz t3,(LA_WORD_SIZE+5+36)*4(r1)
+ lwz t4,(LA_WORD_SIZE+5+37)*4(r1)
+ lwz t5,(LA_WORD_SIZE+5+38)*4(r1)
+ lwz t6,(LA_WORD_SIZE+5+39)*4(r1)
+ lwz t7,(LA_WORD_SIZE+5+40)*4(r1)
+
+ lfd ft0,(LA_WORD_SIZE+5+42)*4(r1)
+ lfd ft1,(LA_WORD_SIZE+5+44)*4(r1)
+ lfd ft2,(LA_WORD_SIZE+5+46)*4(r1)
+ lfd ft3,(LA_WORD_SIZE+5+48)*4(r1)
+ lfd ft4,(LA_WORD_SIZE+5+50)*4(r1)
+ lfd ft5,(LA_WORD_SIZE+5+52)*4(r1)
#else
- RESTORE_ARGUMENT_REGISTERS(6+1)/* restore 8 int/13 float argument reg. */
-#endif
-
-#if 0
- lwz r2,25*8(r1)
- lwz r16,26*8(r1)
- lwz r17,27*8(r1)
- lwz r18,28*8(r1)
- lwz r19,29*8(r1)
- lwz r20,30*8(r1)
- lwz r21,31*8(r1)
- lwz r22,32*8(r1)
- lwz r23,33*8(r1)
+ RESTORE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1) /* restore 8 int/8 float args */
+ RESTORE_TEMPORARY_REGISTERS(LA_WORD_SIZE+1+24)
#endif
- lwz itmp1,(4+6+1+37)*4(sp)/* get return address (into JIT code) */
- mtlr itmp1
-
- lwz itmp1,(6+1+34)*4(sp)
- lwz itmp2,(6+1+35)*4(sp)
- lwz pv,(6+1+36)*4(sp)
+ lwz itmp1,LA_SIZE+(5+54)*4(sp)
+ lwz itmp2,LA_SIZE+(5+55)*4(sp)
+ lwz pv,LA_SIZE+(5+56)*4(sp)
+ lwz itmp3,LA_SIZE+(5+57)*4(sp) /* restore return value into temp reg.*/
- addi r1,r1,(8+6+1+37)*4 /* remove stack frame + patcher stub stack */
+ lwz r0,(6+LA_WORD_SIZE+5+58)*4+sizestackframeinfo(sp) /* restore RA */
+ mtlr r0
mr. itmp3,itmp3 /* check for an exception */
beq L_asm_wrapper_patcher_exception
- blr /* jump to new patched code */
+ /* get return address (into JIT code) */
+ lwz itmp3,(5+LA_WORD_SIZE+5+58)*4+sizestackframeinfo(sp)
+
+ /* remove stack frame + patcher stub stack */
+ addi sp,sp,(8+LA_WORD_SIZE+5+58)*4+sizestackframeinfo
+
+ mtctr itmp3
+ bctr /* jump to new patched code */
L_asm_wrapper_patcher_exception:
+ lwz xpc,(5+LA_WORD_SIZE+5+58)*4+sizestackframeinfo(sp)
+ addi sp,sp,(8+LA_WORD_SIZE+5+58)*4+sizestackframeinfo
+
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-LA_SIZE_ALIGNED(r1) /* preserve linkage area */
+ stw r0,LA_LR_OFFSET(sp)
+ stwu sp,-(LA_SIZE+1*4)(sp) /* preserve linkage area */
+ stw xpc,LA_SIZE+0*4(sp)
bl builtin_asm_get_exceptionptrptr
- lwz r0,LA_SIZE_ALIGNED+LA_LR_OFFSET(r1)
- mtlr r0
- addi r1,r1,LA_SIZE_ALIGNED
+ lwz xpc,LA_SIZE+0*4(sp)
+ lwz r0,LA_SIZE+1*4+LA_LR_OFFSET(sp)
+ mtlr r0
+ addi sp,sp,LA_SIZE+1*4
#else
# if defined(__DARWIN__)
- lwz v0,lo16(_exceptionptr-0b)(pv)
+ lwz v0,lo16(_no_threads_exceptionptr-0b)(pv)
# else
- lis v0,_exceptionptr@ha
- addi v0,v0,_exceptionptr@l
+ lis v0,_no_threads_exceptionptr@ha
+ addi v0,v0,_no_threads_exceptionptr@l
# endif
#endif
lwz xptr,0(v0) /* get the exception pointer */
li itmp3,0
stw itmp3,0(v0) /* clear the exception pointer */
+ b L_asm_handle_exception
- mflr xpc
- b asm_handle_exception
-
-
-/* asm_builtin_arraycheckcast **************************************************
- Does the cast check and eventually throws an exception.
+/* asm_replacement_out *********************************************************
-*******************************************************************************/
-
-asm_builtin_arraycheckcast:
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-48(r1)
+ This code is jumped to from the replacement-out stubs that are executed
+ when a thread reaches an activated replacement point.
- stw r3,32(r1)
- bl builtin_arraycheckcast
+ The purpose of asm_replacement_out is to read out the parts of the
+ execution state that cannot be accessed from C code, store this state,
+ and then call the C function replace_me.
- lwz r0,48+LA_LR_OFFSET(r1)
- mtlr r0
- mr. r3,r3
- beq nb_carray_throw
- lwz r3,32(r1)
- addi r1,r1,48
- blr
-
-nb_carray_throw:
- addi r1,r1,48
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-24(r1)
- bl new_classcastexception
- mr xptr,r3
- addi r1,r1,24
- lwz r0,LA_LR_OFFSET(r1)
- mr xpc,r0
- mtlr r0
- b asm_handle_nat_exception
+ Stack layout:
+ 16 start of stack inside method to replace
+ 0 rplpoint * info on the replacement point that was reached
+ NOTE: itmp3 has been clobbered by the replacement-out stub!
-/******************* function asm_builtin_aastore ******************************
-* *
-* Does the cast check and eventually throws an exception *
-* *
*******************************************************************************/
+
+/* some room to accomodate changes of the stack frame size during replacement */
+ /* XXX we should find a cleaner solution here */
+#define REPLACEMENT_ROOM 512
+
+asm_replacement_out:
+ /* create stack frame */
+ addi sp,sp,-(sizeexecutionstate + REPLACEMENT_ROOM) /* XXX align */
+
+ /* save link register */
+ mflr r16
+
+ /* save registers in execution state */
+ stw r0 ,( 0*8+offes_intregs)(sp)
+ stw r1 ,( 1*8+offes_intregs)(sp)
+ stw r2 ,( 2*8+offes_intregs)(sp)
+ stw r3 ,( 3*8+offes_intregs)(sp)
+ stw r4 ,( 4*8+offes_intregs)(sp)
+ stw r5 ,( 5*8+offes_intregs)(sp)
+ stw r6 ,( 6*8+offes_intregs)(sp)
+ stw r7 ,( 7*8+offes_intregs)(sp)
+ stw r8 ,( 8*8+offes_intregs)(sp)
+ stw r9 ,( 9*8+offes_intregs)(sp)
+ stw r10,(10*8+offes_intregs)(sp)
+ stw r11,(11*8+offes_intregs)(sp)
+ stw r12,(12*8+offes_intregs)(sp)
+ stw r13,(13*8+offes_intregs)(sp)
+ stw r14,(14*8+offes_intregs)(sp)
+ stw r15,(15*8+offes_intregs)(sp)
+ stw r16,(16*8+offes_intregs)(sp) /* link register */
+ stw r17,(17*8+offes_intregs)(sp)
+ stw r18,(18*8+offes_intregs)(sp)
+ stw r19,(19*8+offes_intregs)(sp)
+ stw r20,(20*8+offes_intregs)(sp)
+ stw r21,(21*8+offes_intregs)(sp)
+ stw r22,(22*8+offes_intregs)(sp)
+ stw r23,(23*8+offes_intregs)(sp)
+ stw r24,(24*8+offes_intregs)(sp)
+ stw r25,(25*8+offes_intregs)(sp)
+ stw r26,(26*8+offes_intregs)(sp)
+ stw r27,(27*8+offes_intregs)(sp)
+ stw r28,(28*8+offes_intregs)(sp)
+ stw r29,(29*8+offes_intregs)(sp)
+ stw r30,(30*8+offes_intregs)(sp)
+ stw r31,(31*8+offes_intregs)(sp)
-asm_builtin_aastore:
- mr. r3,r3
- beq nb_aastore_null
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-48(r1)
-
- lwz itmp1,offarraysize(r3)
- slwi itmp3,r4,2
- add itmp2,r3,itmp3
- cmplw r4,itmp1
- bge nb_aastore_bound
- mr r4,r5
- stw itmp2,32(r1)
- stw r4,36(r1)
- bl builtin_canstore
-
- lwz r0,48+LA_LR_OFFSET(r1)
- mtlr r0
- lwz itmp1,32(r1)
- lwz itmp2,36(r1)
- addi r1,r1,48
- mr. r3,r3
- beq nb_aastore_store
- stw itmp2,offobjarrdata(itmp1)
- blr
+ stfd fr0 ,( 0*8+offes_fltregs)(sp)
+ stfd fr1 ,( 1*8+offes_fltregs)(sp)
+ stfd fr2 ,( 2*8+offes_fltregs)(sp)
+ stfd fr3 ,( 3*8+offes_fltregs)(sp)
+ stfd fr4 ,( 4*8+offes_fltregs)(sp)
+ stfd fr5 ,( 5*8+offes_fltregs)(sp)
+ stfd fr6 ,( 6*8+offes_fltregs)(sp)
+ stfd fr7 ,( 7*8+offes_fltregs)(sp)
+ stfd fr8 ,( 8*8+offes_fltregs)(sp)
+ stfd fr9 ,( 9*8+offes_fltregs)(sp)
+ stfd fr10,(10*8+offes_fltregs)(sp)
+ stfd fr11,(11*8+offes_fltregs)(sp)
+ stfd fr12,(12*8+offes_fltregs)(sp)
+ stfd fr13,(13*8+offes_fltregs)(sp)
+ stfd fr14,(14*8+offes_fltregs)(sp)
+ stfd fr15,(15*8+offes_fltregs)(sp)
+ stfd fr16,(16*8+offes_fltregs)(sp)
+ stfd fr17,(17*8+offes_fltregs)(sp)
+ stfd fr18,(18*8+offes_fltregs)(sp)
+ stfd fr19,(19*8+offes_fltregs)(sp)
+ stfd fr20,(20*8+offes_fltregs)(sp)
+ stfd fr21,(21*8+offes_fltregs)(sp)
+ stfd fr22,(22*8+offes_fltregs)(sp)
+ stfd fr23,(23*8+offes_fltregs)(sp)
+ stfd fr24,(24*8+offes_fltregs)(sp)
+ stfd fr25,(25*8+offes_fltregs)(sp)
+ stfd fr26,(26*8+offes_fltregs)(sp)
+ stfd fr27,(27*8+offes_fltregs)(sp)
+ stfd fr28,(28*8+offes_fltregs)(sp)
+ stfd fr29,(29*8+offes_fltregs)(sp)
+ stfd fr30,(30*8+offes_fltregs)(sp)
+ stfd fr31,(31*8+offes_fltregs)(sp)
+
+ /* calculate sp of method */
+ addi itmp1,sp,(sizeexecutionstate + REPLACEMENT_ROOM + 4*4)
+ stw itmp1,(offes_sp)(sp)
-nb_aastore_null:
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-24(r1)
- bl new_nullpointerexception
- mr xptr,r3
- addi r1,r1,24
- lwz r0,LA_LR_OFFSET(r1)
- mr xpc,r0
- mtlr r0
- b asm_handle_nat_exception
+ /* store pv */
+ stw pv,(offes_pv)(sp)
-nb_aastore_bound:
- addi r1,r1,48
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-(24+1*4)(r1)
- mr r3,r4 /* move index into a0 */
- bl new_arrayindexoutofboundsexception
- mr xptr,r3
- addi r1,r1,(24+1*4)
- lwz r0,LA_LR_OFFSET(r1)
- mr xpc,r0
- mtlr r0
- b asm_handle_nat_exception
+ /* call replace_me */
+ lwz a0,-(4*4)(itmp1) /* arg0: rplpoint * */
+ mr a1,sp /* arg1: execution state */
+ addi sp,sp,-(LA_SIZE_ALIGNED)
+ b replace_me /* call C function replace_me */
-nb_aastore_store:
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-24(r1)
- bl new_arraystoreexception
- mr xptr,r3
- addi r1,r1,24
- lwz r0,LA_LR_OFFSET(r1)
- mr xpc,r0
- mtlr r0
- b asm_handle_nat_exception
+/* asm_replacement_in **********************************************************
+ This code writes the given execution state and jumps to the replacement
+ code.
-asm_builtin_idiv:
- mr. r4,r4
- beq nb_idiv
- lis itmp3,0x8000
- cmpw r3,itmp3
- bne normal_idiv
- cmpwi r4,-1
- bne normal_idiv
- blr
+ This function never returns!
-normal_idiv:
- divw r3,r3,r4
- blr
+ NOTE: itmp3 is not restored!
-nb_idiv:
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-24(r1)
- bl new_arithmeticexception
- mr xptr,r3
- addi r1,r1,24
- lwz r0,LA_LR_OFFSET(r1)
- mr xpc,r0
- mtlr r0
- b asm_handle_nat_exception
-
-
-asm_builtin_irem:
- mr itmp2,r3
- mr. r4,r4
- beq nb_irem
- lis itmp3,0x8000
- cmpw itmp2,itmp3
- bne normal_irem
- cmpwi r4,-1
- bne normal_irem
- li r3,0
- beqlr
-
-normal_irem:
- divw itmp3,itmp2,r4
- mullw itmp3,itmp3,r4
- subf r3,itmp3,itmp2
- blr
+ C prototype:
+ void asm_replacement_in(executionstate *es);
-nb_irem:
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-24(r1)
- bl new_arithmeticexception
- mr xptr,r3
- addi r1,r1,24
- lwz r0,LA_LR_OFFSET(r1)
- mr xpc,r0
- mtlr r0
- b asm_handle_nat_exception
+*******************************************************************************/
+asm_replacement_in:
+ /* a0 == executionstate *es */
-asm_builtin_ldiv:
- or. r0,r5,r6
- beq nb_ldiv
- b builtin_ldiv
+ /* set new sp and pv */
+ lwz sp,(offes_sp)(a0)
+ lwz pv,(offes_pv)(a0)
+
+ /* copy registers from execution state */
+ lwz r0 ,( 0*8+offes_intregs)(a0)
+ /* r1 is sp */
+ /* r2 is reserved */
+ /* a0 is loaded below */
+ lwz r4 ,( 4*8+offes_intregs)(a0)
+ lwz r5 ,( 5*8+offes_intregs)(a0)
+ lwz r6 ,( 6*8+offes_intregs)(a0)
+ lwz r7 ,( 7*8+offes_intregs)(a0)
+ lwz r8 ,( 8*8+offes_intregs)(a0)
+ lwz r9 ,( 9*8+offes_intregs)(a0)
+ lwz r10,(10*8+offes_intregs)(a0)
+ lwz r11,(11*8+offes_intregs)(a0)
+ lwz r12,(12*8+offes_intregs)(a0)
+ /* r13 is pv */
+ lwz r14,(14*8+offes_intregs)(a0)
+ lwz r15,(15*8+offes_intregs)(a0)
+ lwz r16,(16*8+offes_intregs)(a0) /* link register */
+ lwz r17,(17*8+offes_intregs)(a0)
+ lwz r18,(18*8+offes_intregs)(a0)
+ lwz r19,(19*8+offes_intregs)(a0)
+ lwz r20,(20*8+offes_intregs)(a0)
+ lwz r21,(21*8+offes_intregs)(a0)
+ lwz r22,(22*8+offes_intregs)(a0)
+ lwz r23,(23*8+offes_intregs)(a0)
+ lwz r24,(24*8+offes_intregs)(a0)
+ lwz r25,(25*8+offes_intregs)(a0)
+ lwz r26,(26*8+offes_intregs)(a0)
+ lwz r27,(27*8+offes_intregs)(a0)
+ lwz r28,(28*8+offes_intregs)(a0)
+ lwz r29,(29*8+offes_intregs)(a0)
+ lwz r30,(30*8+offes_intregs)(a0)
+ lwz r31,(31*8+offes_intregs)(a0)
+
+ lfd fr0 ,( 0*8+offes_fltregs)(a0)
+ lfd fr1 ,( 1*8+offes_fltregs)(a0)
+ lfd fr2 ,( 2*8+offes_fltregs)(a0)
+ lfd fr3 ,( 3*8+offes_fltregs)(a0)
+ lfd fr4 ,( 4*8+offes_fltregs)(a0)
+ lfd fr5 ,( 5*8+offes_fltregs)(a0)
+ lfd fr6 ,( 6*8+offes_fltregs)(a0)
+ lfd fr7 ,( 7*8+offes_fltregs)(a0)
+ lfd fr8 ,( 8*8+offes_fltregs)(a0)
+ lfd fr9 ,( 9*8+offes_fltregs)(a0)
+ lfd fr10,(10*8+offes_fltregs)(a0)
+ lfd fr11,(11*8+offes_fltregs)(a0)
+ lfd fr12,(12*8+offes_fltregs)(a0)
+ lfd fr13,(13*8+offes_fltregs)(a0)
+ lfd fr14,(14*8+offes_fltregs)(a0)
+ lfd fr15,(15*8+offes_fltregs)(a0)
+ lfd fr16,(16*8+offes_fltregs)(a0)
+ lfd fr17,(17*8+offes_fltregs)(a0)
+ lfd fr18,(18*8+offes_fltregs)(a0)
+ lfd fr19,(19*8+offes_fltregs)(a0)
+ lfd fr20,(20*8+offes_fltregs)(a0)
+ lfd fr21,(21*8+offes_fltregs)(a0)
+ lfd fr22,(22*8+offes_fltregs)(a0)
+ lfd fr23,(23*8+offes_fltregs)(a0)
+ lfd fr24,(24*8+offes_fltregs)(a0)
+ lfd fr25,(25*8+offes_fltregs)(a0)
+ lfd fr26,(26*8+offes_fltregs)(a0)
+ lfd fr27,(27*8+offes_fltregs)(a0)
+ lfd fr28,(28*8+offes_fltregs)(a0)
+ lfd fr29,(29*8+offes_fltregs)(a0)
+ lfd fr30,(30*8+offes_fltregs)(a0)
+ lfd fr31,(31*8+offes_fltregs)(a0)
+
+ /* restore link register */
+
+ mtlr r16
+
+ /* load new pc */
-nb_ldiv:
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-24(r1)
- bl new_arithmeticexception
- mr xptr,r3
- addi r1,r1,24
- lwz r0,LA_LR_OFFSET(r1)
- mr xpc,r0
- mtlr r0
- b asm_handle_nat_exception
+ lwz itmp3,offes_pc(a0)
+ /* load a0 */
+
+ lwz a0,(3*8+offes_intregs)(a0)
-asm_builtin_lrem:
- or. r0,r5,r6
- beq nb_lrem
- b builtin_lrem
+ /* jump to new code */
-nb_lrem:
- mflr r0
- stw r0,LA_LR_OFFSET(r1)
- stwu r1,-24(r1)
- bl new_arithmeticexception
- mr xptr,r3
- addi r1,r1,24
- lwz r0,LA_LR_OFFSET(r1)
- mr xpc,r0
- mtlr r0
- b asm_handle_nat_exception
+ mtctr itmp3
+ bctr
+/*********************************************************************/
asm_cacheflush:
add r4,r3,r4
asm_getclassvalues_atomic:
-_crit_restart2:
-_crit_begin2:
+_crit_restart:
+_crit_begin:
lwz r6,offbaseval(r3)
lwz r7,offdiffval(r3)
lwz r8,offbaseval(r4)
-_crit_end2:
+_crit_end:
stw r6,offcast_super_baseval(r5)
stw r7,offcast_super_diffval(r5)
stw r8,offcast_sub_baseval(r5)
asm_criticalsections:
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- .long _crit_begin1
- .long _crit_end1
- .long _crit_restart1
- .long _crit_begin2
- .long _crit_end2
- .long _crit_restart2
+ .long _crit_begin
+ .long _crit_end
+ .long _crit_restart
#endif
.long 0
+/* Disable exec-stacks, required for Gentoo ***********************************/
+
+#if defined(__GCC__) && defined(__ELF__)
+ .section .note.GNU-stack,"",@progbits
+#endif
+
+
/*
* These are local overrides for various environment variables in Emacs.
* Please do not remove this and leave it at the end of the file, where
* c-basic-offset: 4
* tab-width: 4
* End:
+ * vim:noexpandtab:sw=4:ts=4:
*/