Changes: Christian Thalinger
Edwin Steiner
- $Id: asmpart.S 4735 2006-04-05 10:02:14Z twisti $
+ $Id: asmpart.S 5060 2006-06-28 21:52:26Z twisti $
*/
.globl asm_handle_nat_exception
.globl asm_handle_exception
+ .globl asm_abstractmethoderror
+
.globl asm_wrapper_patcher
.globl asm_replacement_out
.globl asm_replacement_in
.globl asm_cacheflush
- .globl asm_initialize_thread_stack
- .globl asm_perform_threadswitch
- .globl asm_switchstackandcall
.globl asm_criticalsections
.globl asm_getclassvalues_atomic
.long 0 /* isleaf */
.long 0 /* IsSync */
.long 0 /* frame size */
- .long 0 /* method pointer (pointer to name) */
+ .long 0 /* codeinfo pointer */
asm_vm_call_method:
asm_vm_call_method_int:
stw r0,LA_LR_OFFSET(r1)
stwu r1,-40*4(r1)
+ stw s0,8*4(sp) /* save used callee saved registers */
+ stw a0,9*4(sp) /* save method pointer for compiler */
+
#if defined(__DARWIN__)
stw itmp1,10*4(sp) /* register r11 is callee saved */
#endif
SAVE_TEMPORARY_REGISTERS(18) /* the offset has to be even */
#endif
- stw a0,9*4(r1) /* save method pointer for compiler */
+ mr itmp2,a1 /* arg count */
+ mr itmp1,a2 /* pointer to arg block */
- mr itmp1,r5 /* pointer to arg block */
- mr itmp2,r4 /* arg count */
+ mr t4,itmp2 /* save argument count */
+ mr t5,itmp1 /* save argument block pointer */
+
+ mr s0,sp /* save current sp to s0 */
addi itmp1,itmp1,-sizevmarg /* initialize pointer (smaller code) */
addi itmp2,itmp2,1 /* initialize argument count */
li t0,0 /* initialize integer argument counter */
li t1,0 /* initialize float argument counter */
+ li t6,0 /* initialize integer register counter */
+#if defined(__DARWIN__)
+ li t7,0 /* initialize stack slot counter */
+#endif
mflr r0 /* save link register (PIC code) */
bl L_asm_vm_call_method_get_pc
mr. itmp2,itmp2
beq L_register_copy_done
-#if WORDS_BIGENDIAN == 1
lwz itmp3,offvmargtype+4(itmp1)
-#else
-#error XXX
-#endif
andi. r0,itmp3,0x0002 /* is this a float/double type? */
bne L_register_handle_float
-
- cmpwi t0,INT_ARG_CNT /* are we out of integer argument */
+
+L_register_handle_int:
+ cmpwi t6,INT_ARG_CNT /* are we out of integer argument */
beq L_register_copy /* registers? yes, next loop */
- andi. r0,itmp3,0x0001 /* is this a long type? */
+ andi. r0,itmp3,0x0001 /* is this a 2-word type? */
bne L_register_handle_long
-L_register_handle_int:
#if defined(__DARWIN__)
addis itmp3,t3,ha16(L_jumptable_int - L_asm_vm_call_method_get_pc)
la itmp3,lo16(L_jumptable_int - L_asm_vm_call_method_get_pc)(itmp3)
lis itmp3,L_jumptable_int@ha
addi itmp3,itmp3,L_jumptable_int@l
#endif
- slwi t2,t0,2 /* multiple of 4-bytes */
+
+ slwi t2,t6,2 /* multiple of 4-bytes */
add itmp3,itmp3,t2 /* calculate address of jumptable */
lwz itmp3,0(itmp3) /* load function address */
- addi t0,t0,1 /* integer argument counter + 1 */
mtctr itmp3
+ addi t0,t0,1 /* integer argument counter */
+ addi t6,t6,1 /* integer argument register counter */
+#if defined(__DARWIN__)
+ addi t7,t7,1 /* stack slot counter */
+#endif
bctr
L_register_handle_long:
lis itmp3,L_jumptable_long@ha
addi itmp3,itmp3,L_jumptable_long@l
#endif
- addi t2,t0,1 /* align to even numbers */
- srwi t2,t2,1
- slwi t2,t2,1
- slwi t2,t2,2 /* multiple of 4-bytes */
+#if !defined(__DARWIN__)
+ addi t6,t6,1 /* align to even numbers */
+ andi. t6,t6,0xfffe
+#endif
+
+ cmpwi t6,(INT_ARG_CNT - 1) /* are we out of integer argument */
+ blt L_register_handle_long_continue /* registers? */
+
+ li t6,INT_ARG_CNT /* yes, set integer argument register */
+ b L_register_copy /* count to max and next loop */
+
+L_register_handle_long_continue:
+ slwi t2,t6,2 /* multiple of 4-bytes */
add itmp3,itmp3,t2 /* calculate address of jumptable */
lwz itmp3,0(itmp3) /* load function address */
- addi t0,t0,1 /* integer argument counter + 1 */
mtctr itmp3
+ addi t0,t0,1 /* integer argument counter */
+ addi t6,t6,2 /* integer argument register counter */
+#if defined(__DARWIN__)
+ addi t7,t7,2 /* stack slot counter */
+#endif
bctr
L_register_handle_float:
+ cmpwi t1,FLT_ARG_CNT /* are we out of float argument */
+ beq L_register_copy /* registers? yes, next loop */
+
+ andi. r0,itmp3,0x0001 /* is this a 2-word type? */
+ bne L_register_handle_double
+
+#if defined(__DARWIN__)
+ addis itmp3,t3,ha16(L_jumptable_float - L_asm_vm_call_method_get_pc)
+ la itmp3,lo16(L_jumptable_float - L_asm_vm_call_method_get_pc)(itmp3)
+#else
+ lis itmp3,L_jumptable_float@ha
+ addi itmp3,itmp3,L_jumptable_float@l
+#endif
+
+ slwi t2,t1,2 /* multiple of 4-bytes */
+ add itmp3,itmp3,t2 /* calculate address of jumptable */
+ lwz itmp3,0(itmp3) /* load function address */
+ mtctr itmp3
+ addi t1,t1,1 /* float argument counter */
+#if defined(__DARWIN__)
+ addi t7,t7,1 /* stack slot counter */
+ addi t6,t6,1 /* skip 1 integer argument register */
+#endif
+ bctr
+
+L_register_handle_double:
+#if defined(__DARWIN__)
+ addis itmp3,t3,ha16(L_jumptable_double - L_asm_vm_call_method_get_pc)
+ la itmp3,lo16(L_jumptable_double - L_asm_vm_call_method_get_pc)(itmp3)
+#else
+ lis itmp3,L_jumptable_double@ha
+ addi itmp3,itmp3,L_jumptable_double@l
+#endif
+
+ slwi t2,t1,2 /* multiple of 4-bytes */
+ add itmp3,itmp3,t2 /* calculate address of jumptable */
+ lwz itmp3,0(itmp3) /* load function address */
+ mtctr itmp3
+ addi t1,t1,1 /* float argument counter */
+#if defined(__DARWIN__)
+ addi t7,t7,2 /* stack slot counter */
+ addi t6,t6,2 /* skip 2 integer argument registers */
+#endif
+ bctr
+
L_register_copy_done:
+ /* calculate remaining arguments */
+ sub itmp3,t4,t0 /* - integer arguments in registers */
+ sub itmp3,itmp3,t1 /* - float arguments in registers */
+ mr. itmp3,itmp3
+ beq L_stack_copy_done
+
+ mr itmp2,t4 /* restore argument count */
+ mr itmp1,t5 /* restore argument block pointer */
+
+ slwi t4,itmp3,3 /* XXX use 8-bytes slots for now */
+ addi t4,t4,LA_SIZE /* add size of linkage area */
+
+#if defined(__DARWIN__)
+ slwi t5,t7,2 /* add stack space for arguments */
+ add t4,t4,t5
+#endif
+
+ sub sp,sp,t4
+
+ mr t6,sp /* use t6 as temporary sp */
+ addi t6,t6,LA_SIZE /* skip linkage area */
+#if defined(__DARWIN__)
+ add t6,t6,t5 /* skip stack space for arguments */
+#endif
+
+ addi itmp1,itmp1,-sizevmarg /* initialize pointer (smaller code) */
+ addi itmp2,itmp2,1 /* initialize argument count */
+
+L_stack_copy_loop:
+ addi itmp1,itmp1,sizevmarg /* goto next argument block */
+ addi itmp2,itmp2,-1 /* argument count - 1 */
+ mr. itmp2,itmp2
+ beq L_stack_copy_done
+
+ lwz itmp3,offvmargtype+4(itmp1)
+ andi. r0,itmp3,0x0002 /* is this a float/double type? */
+ bne L_stack_handle_float
+
+L_stack_handle_int:
+ addi t0,t0,-1 /* arguments assigned to registers */
+ mr. t0,t0
+ bge L_stack_copy_loop
+
+ andi. r0,itmp3,0x0001 /* is this a 2-word type? */
+ bne L_stack_handle_long
+
+ lwz itmp3,offvmargdata+4(itmp1) /* get integer argument */
+ stw itmp3,0(t6) /* and store it on the stack */
+ addi t6,t6,4 /* increase temporary sp by 1 slot */
+ b L_stack_copy_loop
+
+L_stack_handle_long:
+#if !defined(__DARWIN__)
+ addi t6,t6,4 /* align stack to 8-bytes */
+ rlwinm t6,t6,0,30,28 /* clear lower 4-bits */
+#endif
+
+ lwz itmp3,offvmargdata+0(itmp1) /* get long argument */
+ stw itmp3,0(t6) /* and store it on the stack */
+ lwz itmp3,offvmargdata+4(itmp1)
+ stw itmp3,4(t6)
+ addi t6,t6,8 /* increase temporary sp by 2 slots */
+ b L_stack_copy_loop
+
+L_stack_handle_float:
+ addi t1,t1,-1 /* arguments assigned to registers */
+ mr. t1,t1
+ bge L_stack_copy_loop
+
+ andi. r0,itmp3,0x0001 /* is this a 2-word type? */
+ bne L_stack_handle_double
+
+ lfs ftmp3,offvmargdata(itmp1) /* get float argument */
+ stfs ftmp3,0(t6) /* and store it on the stack */
+ addi t6,t6,4 /* increase temporary sp by 1 slot */
+ b L_stack_copy_loop
+
+L_stack_handle_double:
+#if !defined(__DARWIN__)
+ addi t6,t6,4 /* align stack to 8-bytes */
+ rlwinm t6,t6,0,30,28 /* clear lower 4-bits */
+#endif
+
+ lfd ftmp3,offvmargdata(itmp1) /* get double argument */
+ stfd ftmp3,0(t6) /* and store it on the stack */
+ addi t6,t6,8 /* increase temporary sp by 2 slots */
+ b L_stack_copy_loop
L_stack_copy_done:
- lwz itmp1,9*4(sp) /* pass method pointer via tmp1 */
+ lwz itmp1,9*4(s0) /* pass method pointer via tmp1 */
#if defined(__DARWIN__)
addis mptr,t3,ha16(L_asm_call_jit_compiler - L_asm_vm_call_method_get_pc)
lis mptr,L_asm_call_jit_compiler@ha
addi mptr,mptr,L_asm_call_jit_compiler@l
#endif
- stw mptr,8*4(r1)
- addi mptr,r1,7*4
+ stw mptr,7*4(s0)
+ addi mptr,s0,7*4
- lwz pv,1*4(mptr)
+ lwz pv,0*4(mptr)
mtctr pv
bctrl
1:
#endif
L_asm_vm_call_method_return:
+ mr sp,s0 /* restore the function's sp */
+
+ lwz s0,8*4(sp) /* restore used callee saved registers */
+
#if defined(__DARWIN__)
lwz itmp1,10*4(sp) /* register r11 is callee saved */
#endif
asm_vm_call_method_exception_handler:
mr r3,itmp1
bl builtin_throw_exception
- li v0,0 /* return NULL */
b L_asm_vm_call_method_return
L_jumptable_long:
#if defined(__DARWIN__)
+ .long L_handle_a0_a1
+ .long L_handle_a1_a2
+ .long L_handle_a2_a3
+ .long L_handle_a3_a4
+ .long L_handle_a4_a5
+ .long L_handle_a5_a6
+ .long L_handle_a6_a7
#else
/* we have two entries here, so we get the even argument register
alignment for linux */
.long L_handle_a4_a5
.long 0
.long L_handle_a6_a7
- .long 0
#endif
.text
lwz a0,offvmargdata+0(itmp1)
lwz a1,offvmargdata+4(itmp1)
b L_register_copy
+#if defined(__DARWIN__)
+L_handle_a1_a2:
+ lwz a1,offvmargdata+0(itmp1)
+ lwz a2,offvmargdata+4(itmp1)
+ b L_register_copy
+#endif
L_handle_a2_a3:
lwz a2,offvmargdata+0(itmp1)
lwz a3,offvmargdata+4(itmp1)
b L_register_copy
+#if defined(__DARWIN__)
+L_handle_a3_a4:
+ lwz a3,offvmargdata+0(itmp1)
+ lwz a4,offvmargdata+4(itmp1)
+ b L_register_copy
+#endif
L_handle_a4_a5:
lwz a4,offvmargdata+0(itmp1)
lwz a5,offvmargdata+4(itmp1)
b L_register_copy
+#if defined(__DARWIN__)
+L_handle_a5_a6:
+ lwz a5,offvmargdata+0(itmp1)
+ lwz a6,offvmargdata+4(itmp1)
+ b L_register_copy
+#endif
L_handle_a6_a7:
lwz a6,offvmargdata+0(itmp1)
lwz a7,offvmargdata+4(itmp1)
b L_register_copy
+ .data
+ .align 2
+
+L_jumptable_float:
+ .long L_handle_fa0
+ .long L_handle_fa1
+ .long L_handle_fa2
+ .long L_handle_fa3
+ .long L_handle_fa4
+ .long L_handle_fa5
+ .long L_handle_fa6
+ .long L_handle_fa7
+
+#if defined(__DARWIN__)
+ .long L_handle_fa8
+ .long L_handle_fa9
+ .long L_handle_fa10
+ .long L_handle_fa11
+ .long L_handle_fa12
+#endif
+
+ .text
+ .align 2
+
+L_handle_fa0:
+ lfs fa0,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa1:
+ lfs fa1,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa2:
+ lfs fa2,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa3:
+ lfs fa3,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa4:
+ lfs fa4,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa5:
+ lfs fa5,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa6:
+ lfs fa6,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa7:
+ lfs fa7,offvmargdata(itmp1)
+ b L_register_copy
+
+#if defined(__DARWIN__)
+L_handle_fa8:
+ lfs fa8,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa9:
+ lfs fa9,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa10:
+ lfs fa10,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa11:
+ lfs fa11,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fa12:
+ lfs fa12,offvmargdata(itmp1)
+ b L_register_copy
+#endif
+
+
+ .data
+ .align 2
+
+L_jumptable_double:
+ .long L_handle_fda0
+ .long L_handle_fda1
+ .long L_handle_fda2
+ .long L_handle_fda3
+ .long L_handle_fda4
+ .long L_handle_fda5
+ .long L_handle_fda6
+ .long L_handle_fda7
+
+#if defined(__DARWIN__)
+ .long L_handle_fda8
+ .long L_handle_fda9
+ .long L_handle_fda10
+ .long L_handle_fda11
+ .long L_handle_fda12
+#endif
+
+ .text
+ .align 2
+
+L_handle_fda0:
+ lfd fa0,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda1:
+ lfd fa1,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda2:
+ lfd fa2,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda3:
+ lfd fa3,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda4:
+ lfd fa4,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda5:
+ lfd fa5,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda6:
+ lfd fa6,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda7:
+ lfd fa7,offvmargdata(itmp1)
+ b L_register_copy
+
+#if defined(__DARWIN__)
+L_handle_fda8:
+ lfd fa8,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda9:
+ lfd fa9,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda10:
+ lfd fa10,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda11:
+ lfd fa11,offvmargdata(itmp1)
+ b L_register_copy
+L_handle_fda12:
+ lfd fa12,offvmargdata(itmp1)
+ b L_register_copy
+#endif
+
+
/* asm_call_jit_compiler *******************************************************
Invokes the compiler for untranslated JavaVM methods.
L_asm_call_jit_compiler: /* required for PIC code */
mflr r0
stw r0,LA_LR_OFFSET(r1) /* save return address */
- stwu r1,-((LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo)(r1)
- stw itmp1,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 1*4)(r1)
-
- mr itmp1,r0 /* save return address to other reg. */
- lwz itmp3,-12(itmp1)
- srwi itmp3,itmp3,16
- andi. itmp3,itmp3,31
- cmpwi itmp3,mptrn
- beq noregchange
- lwz itmp3,4(itmp1)
- extsh itmp3,itmp3
- add mptr,itmp3,itmp1
- lwz itmp3,8(itmp1)
- srwi itmp3,itmp3,16
- cmpwi itmp3,0x3dad
- bne noregchange
- lwz itmp3,8(itmp1)
- slwi itmp3,itmp3,16
- add mptr,mptr,itmp3
-
-noregchange:
- stw mptr,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 2*4)(r1)
+ stwu r1,-(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)(r1)
#if defined(__DARWIN__)
stw a0,(LA_WORD_SIZE+5+0)*4(r1)
SAVE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1)
#endif
- addi a0,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)
- li a1,0 /* we don't have pv handy */
- addi a2,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo
- lwz a3,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo+LA_LR_OFFSET(sp)
- mr a4,a3 /* xpc is equal to ra */
- bl stacktrace_create_extern_stackframeinfo
-
- lwz a0,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 1*4)(r1)
- bl jit_compile /* compile the Java method */
- mr pv,r3 /* move address to pv register */
-
- addi a0,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)
- bl stacktrace_remove_stackframeinfo
+ mr a0,itmp1
+ mr a1,mptr
+ addi a2,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)
+ lwz a3,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)+LA_LR_OFFSET(sp)
+ bl jit_asm_compile
+ mr pv,v0 /* move address to pv register */
#if defined(__DARWIN__)
lwz a0,(LA_WORD_SIZE+5+0)*4(r1)
RESTORE_ARGUMENT_REGISTERS(LA_WORD_SIZE+1)
#endif
- lwz mptr,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 2*4)(r1)
-
- lwz itmp1,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo+LA_LR_OFFSET(r1)
+ lwz itmp1,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)+LA_LR_OFFSET(r1)
mtlr itmp1
- addi r1,r1,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8 + 3*4)+sizestackframeinfo
+
+ addi sp,sp,(LA_SIZE + 5*4 + INT_ARG_CNT*4 + FLT_ARG_CNT*8)
mr. pv,pv /* test for exception */
beq L_asm_call_jit_compiler_exception
- lwz itmp3,-12(itmp1)
- extsh itmp3,itmp3
- add mptr,mptr,itmp3
- stw pv,0(mptr) /* store method address */
-
mtctr pv /* move method address to control reg */
bctr /* and call the Java method */
L_asm_call_jit_compiler_exception:
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
mflr r0
stw r0,LA_LR_OFFSET(sp)
stwu sp,-LA_SIZE_ALIGNED(sp) /* preserve linkage area */
b L_asm_handle_exception_stack_loop
+/* asm_abstractmethoderror *****************************************************
+
+ Creates and throws an AbstractMethodError.
+
+*******************************************************************************/
+
+asm_abstractmethoderror:
+ mflr r0
+ stw r0,LA_LR_OFFSET(sp)
+ stwu sp,-LA_SIZE_ALIGNED(sp) /* preserve linkage area */
+ addi a0,sp,LA_SIZE_ALIGNED /* pass java sp */
+ mr a1,r0 /* pass exception address */
+ bl exceptions_asm_new_abstractmethoderror
+ lwz r0,LA_SIZE_ALIGNED+LA_LR_OFFSET(sp)
+ mtlr r0 /* restore return address */
+ addi sp,sp,LA_SIZE_ALIGNED
+
+ mr xptr,v0 /* get exception pointer */
+ mr xpc,r0 /* we can't use r0 directly in addi */
+ addi xpc,xpc,-4 /* exception address is ra - 4 */
+ b L_asm_handle_nat_exception
+
+
/* asm_wrapper_patcher *********************************************************
XXX
lwz xpc,(5+LA_WORD_SIZE+5+58)*4+sizestackframeinfo(sp)
addi sp,sp,(8+LA_WORD_SIZE+5+58)*4+sizestackframeinfo
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
mflr r0
stw r0,LA_LR_OFFSET(sp)
stwu sp,-(LA_SIZE+1*4)(sp) /* preserve linkage area */
blr
- .align 3
-doublezero:
- .double 0.0
-
-asm_initialize_thread_stack:
- addi r4,r4,-256
- stw r3,120(r4)
- li r3,0
- stw r3,124(r4)
- stw r3,0(r4)
- stw r3,4(r4)
- stw r3,8(r4)
- stw r3,12(r4)
- stw r3,16(r4)
- stw r3,20(r4)
- stw r3,24(r4)
- stw r3,28(r4)
- stw r3,32(r4)
- stw r3,36(r4)
-
- stw r3,128(r4)
- stw r3,132(r4)
- stw r3,136(r4)
- stw r3,140(r4)
- stw r3,144(r4)
- stw r3,148(r4)
- stw r3,152(r4)
- stw r3,156(r4)
-
- mflr r0
- bl 0f
-0:
- mflr r3
- mtlr r0
-#if defined(__DARWIN__)
- lfd fr0,lo16(doublezero-0b)(r3)
-#else
- lfd fr0,(doublezero-0b)@l(r3)
-#endif
-
- stfd fr0,40(r4)
- stfd fr0,48(r4)
- stfd fr0,56(r4)
- stfd fr0,64(r4)
- stfd fr0,72(r4)
- stfd fr0,80(r4)
- stfd fr0,88(r4)
- stfd fr0,96(r4)
- stfd fr0,104(r4)
- stfd fr0,112(r4)
-
- stfd fr0,160(r4)
- stfd fr0,168(r4)
- stfd fr0,176(r4)
- stfd fr0,184(r4)
- stfd fr0,192(r4)
- stfd fr0,200(r4)
- stfd fr0,208(r4)
- stfd fr0,216(r4)
-
- mr r3,r4
- blr
-
-
-asm_perform_threadswitch:
- mflr r0
- addi r1,r1,-224
- stw r0,120(r1)
- stw pv,124(r1)
- stw r14,0(r1)
- stw r15,4(r1)
- stw r24,8(r1)
- stw r25,12(r1)
- stw r26,16(r1)
- stw r27,20(r1)
- stw r28,24(r1)
- stw r29,28(r1)
- stw r30,32(r1)
- stw r31,36(r1)
- stfd fr14,40(r1)
- stfd fr15,48(r1)
- stfd fr24,56(r1)
- stfd fr25,64(r1)
- stfd fr26,72(r1)
- stfd fr27,80(r1)
- stfd fr28,88(r1)
- stfd fr29,96(r1)
- stfd fr30,104(r1)
- stfd fr31,112(r1)
-
- stw r16,128(r1)
- stw r17,132(r1)
- stw r18,136(r1)
- stw r19,140(r1)
- stw r20,144(r1)
- stw r21,148(r1)
- stw r22,152(r1)
- stw r23,156(r1)
- stfd fr16,160(r1)
- stfd fr17,168(r1)
- stfd fr18,176(r1)
- stfd fr19,184(r1)
- stfd fr20,192(r1)
- stfd fr21,200(r1)
- stfd fr22,208(r1)
- stfd fr23,216(r1)
-
- stw r1,0(r3)
- stw r1,0(r5)
- lwz r1,0(r4)
-
- lwz r0,120(r1)
- lwz pv,124(r1)
- lwz r14,0(r1)
- lwz r15,4(r1)
- lwz r24,8(r1)
- lwz r25,12(r1)
- lwz r26,16(r1)
- lwz r27,20(r1)
- lwz r28,24(r1)
- lwz r29,28(r1)
- lwz r30,32(r1)
- lwz r31,36(r1)
- lfd fr14,40(r1)
- lfd fr15,48(r1)
- lfd fr24,56(r1)
- lfd fr25,64(r1)
- lfd fr26,72(r1)
- lfd fr27,80(r1)
- lfd fr28,88(r1)
- lfd fr29,96(r1)
- lfd fr30,104(r1)
- lfd fr31,112(r1)
-
- lwz r16,128(r1)
- lwz r17,132(r1)
- lwz r18,136(r1)
- lwz r19,140(r1)
- lwz r20,144(r1)
- lwz r21,148(r1)
- lwz r22,152(r1)
- lwz r23,156(r1)
- lfd fr16,160(r1)
- lfd fr17,168(r1)
- lfd fr18,176(r1)
- lfd fr19,184(r1)
- lfd fr20,192(r1)
- lfd fr21,200(r1)
- lfd fr22,208(r1)
- lfd fr23,216(r1)
-
- mtlr r0
- addi r1,r1,224
- blr
-
-
-asm_switchstackandcall:
- mflr r0
- stwu r3,-48(r3)
- stw r0,40(r3)
- stw r1,44(r3)
- stw r1,0(r5)
- mr r1,r3
-
- mtctr r4
- mr r3,r6
- bctrl
-
- lwz r0,40(r1)
- mtlr r0
- lwz r1,44(r1)
- blr
-
-
asm_getclassvalues_atomic:
_crit_restart:
_crit_begin:
.data
asm_criticalsections:
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
.long _crit_begin
.long _crit_end
.long _crit_restart
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align 2
-L_jit_compile$stub:
- .indirect_symbol _jit_compile
+L_jit_asm_compile$stub:
+ .indirect_symbol _jit_asm_compile
mflr r0
- bcl 20,31,L00$_jit_compile
-L00$_jit_compile:
+ bcl 20,31,L00$_jit_asm_compile
+L00$_jit_asm_compile:
mflr r11
- addis r11,r11,ha16(L_jit_compile$lazy_ptr - L00$_jit_compile)
+ addis r11,r11,ha16(L_jit_asm_compile$lazy_ptr - L00$_jit_asm_compile)
mtlr r0
- lwzu r12,lo16(L_jit_compile$lazy_ptr - L00$_jit_compile)(r11)
+ lwzu r12,lo16(L_jit_asm_compile$lazy_ptr - L00$_jit_asm_compile)(r11)
mtctr r12
bctr
.data
.lazy_symbol_pointer
-L_jit_compile$lazy_ptr:
- .indirect_symbol _jit_compile
+L_jit_asm_compile$lazy_ptr:
+ .indirect_symbol _jit_asm_compile
.long dyld_stub_binding_helper