Changes: Joseph Wenninger
Christian Thalinger
+ Edwin Steiner
- $Id: asmpart.S 4440 2006-02-05 12:03:43Z twisti $
+ $Id: asmpart.S 4749 2006-04-11 10:20:18Z twisti $
*/
#include "vm/jit/alpha/md-asm.h"
#include "vm/jit/alpha/offsets.h"
-#include "vm/jit/abi.h"
+#include "vm/jit/abi-asm.h"
#include "vm/jit/methodheader.h"
.set noreorder
-/********************* exported functions and variables ***********************/
+/* export functions ***********************************************************/
- .globl asm_md_init
-
- .globl asm_sync_instruction_cache
-
- .globl asm_calljavafunction
- .globl asm_calljavafunction_int
-
- .globl asm_calljavafunction2
- .globl asm_calljavafunction2int
- .globl asm_calljavafunction2long
- .globl asm_calljavafunction2float
- .globl asm_calljavafunction2double
+ .globl asm_vm_call_method
+ .globl asm_vm_call_method_int
+ .globl asm_vm_call_method_long
+ .globl asm_vm_call_method_float
+ .globl asm_vm_call_method_double
+ .globl asm_vm_call_method_exception_handler
.globl asm_call_jit_compiler
.globl asm_handle_exception
.globl asm_wrapper_patcher
+ .globl asm_replacement_out
+ .globl asm_replacement_in
+
.globl asm_perform_threadswitch
.globl asm_initialize_thread_stack
.globl asm_switchstackandcall
.globl asm_criticalsections
.globl asm_getclassvalues_atomic
-
-/* asm_sync_instruction_cache **************************************************
-
- XXX
-
-*******************************************************************************/
-
- .ent asm_sync_instruction_cache
-
-asm_sync_instruction_cache:
- call_pal PAL_imb /* synchronize instruction cache */
- jmp zero,(ra)
-
- .end asm_sync_instruction_cache
-
-
-/* asm_md_init *****************************************************************
-
- Initialize machine dependent stuff.
-
- Determines if the byte support instruction set (21164a and higher)
- is available.
-
-*******************************************************************************/
-
- .ent asm_md_init
-
-asm_md_init:
-
- .long 0x47e03c20 /* amask 1,v0 */
- jmp zero,(ra) /* return */
-
- .end asm_md_init
+ .globl asm_md_init
+ .globl asm_cacheflush
-/********************* function asm_calljavafunction ***************************
+/* asm_vm_call_method **********************************************************
* *
* This function calls a Java-method (which possibly needs compilation) *
* with up to 4 address parameters. *
* *
*******************************************************************************/
- .ent asm_calljavafunction
-
- .align 2 /*3*/
-
- .quad 0 /* catch type all */
- .quad calljava_xhandler /* handler pc */
- .quad calljava_xhandler /* end pc */
- .quad asm_calljavafunction /* start pc */
- .long 1 /* extable size */
- .long 0 /* ALIGNMENT PADDING */
- .quad 0 /* line number table start */
- .quad 0 /* line number table size */
- .long 0 /* ALIGNMENT PADDING */
- .long 0 /* fltsave */
- .long 0 /* intsave */
- .long 0 /* isleaf */
- .long 0 /* IsSync */
- .long 0 /* frame size */
- .quad 0 /* method pointer (pointer to name) */
-
-asm_calljavafunction:
-asm_calljavafunction_int:
- ldgp gp,0(pv)
- lda sp,-4*8(sp) /* allocate stack space */
- stq ra,0*8(sp) /* save return address */
- stq gp,1*8(sp) /* save global pointer */
-
- stq a0,2*8(sp) /* save method pointer for compiler */
- lda v0,2*8(sp) /* pass pointer to method pointer */
-
- mov a1,a0 /* pass the remaining parameters */
- mov a2,a1
- mov a3,a2
- mov a4,a3
-
- lda itmp2,asm_call_jit_compiler /* fake virtual function call */
- stq itmp2,3*8(sp) /* store function address */
- mov sp,itmp2 /* set method pointer */
-
- ldq pv,3*8(itmp2) /* method call as in Java */
- jmp ra,(pv) /* call JIT compiler */
-calljava_jit:
- lda pv,(asm_calljavafunction - calljava_jit)(ra)
-
-L_asm_calljavafunction_return:
- ldq ra,0*8(sp) /* restore return address */
- ldq gp,1*8(sp) /* restore global pointer */
- lda sp,4*8(sp) /* free stack space */
- jmp zero,(ra)
-
-calljava_xhandler:
- ldq gp,1*8(sp) /* restore global pointer */
- mov xptr,a0
- jsr ra,builtin_throw_exception
- mov zero,v0 /* return NULL */
- br L_asm_calljavafunction_return
-
- .end asm_calljavafunction
-
-
-
-
- .ent asm_calljavafunction2
+ .ent asm_vm_call_method
.align 3
.quad 0 /* catch type all */
- .quad calljava_xhandler2 /* handler pc */
- .quad calljava_xhandler2 /* end pc */
- .quad asm_calljavafunction2 /* start pc */
+ .quad 0 /* handler pc */
+ .quad 0 /* end pc */
+ .quad 0 /* start pc */
.long 1 /* extable size */
.long 0 /* ALIGNMENT PADDING */
.quad 0 /* line number table start */
.long 0 /* frame size */
.quad 0 /* method pointer (pointer to name) */
-asm_calljavafunction2:
-asm_calljavafunction2int:
-asm_calljavafunction2long:
-asm_calljavafunction2float:
-asm_calljavafunction2double:
+asm_vm_call_method:
+asm_vm_call_method_int:
+asm_vm_call_method_long:
+asm_vm_call_method_float:
+asm_vm_call_method_double:
ldgp gp,0(pv)
lda sp,-5*8(sp) /* allocate stack space */
stq ra,0*8(sp) /* save return address */
stq s6,3*8(sp)
stq a0,4*8(sp) /* save method pointer for compiler */
- mov a3,t0 /* pointer to arg block */
+
+ mov a2,t0 /* pointer to arg block */
mov a1,s6 /* arg count */
ble s6,calljava_argsloaded
lda s6,-1(s6)
- ldq a0,offjniitem(t0)
- ldt $f16,offjniitem(t0)
+ ldq a0,offvmargdata(t0)
+ ldt $f16,offvmargdata(t0)
ble s6,calljava_argsloaded
lda s6,-1(s6)
- ldq a1,offjniitem+sizejniblock*1(t0)
- ldt $f17,offjniitem+sizejniblock*1(t0)
+ ldq a1,offvmargdata+sizevmarg*1(t0)
+ ldt $f17,offvmargdata+sizevmarg*1(t0)
ble s6,calljava_argsloaded
lda s6,-1(s6)
- ldq a2,offjniitem+sizejniblock*2(t0)
- ldt $f18,offjniitem+sizejniblock*2(t0)
+ ldq a2,offvmargdata+sizevmarg*2(t0)
+ ldt $f18,offvmargdata+sizevmarg*2(t0)
ble s6,calljava_argsloaded
lda s6,-1(s6)
- ldq a3,offjniitem+sizejniblock*3(t0)
- ldt $f19,offjniitem+sizejniblock*3(t0)
+ ldq a3,offvmargdata+sizevmarg*3(t0)
+ ldt $f19,offvmargdata+sizevmarg*3(t0)
ble s6,calljava_argsloaded
lda s6,-1(s6)
- ldq a4,offjniitem+sizejniblock*4(t0)
- ldt $f20,offjniitem+sizejniblock*4(t0)
+ ldq a4,offvmargdata+sizevmarg*4(t0)
+ ldt $f20,offvmargdata+sizevmarg*4(t0)
ble s6,calljava_argsloaded
lda s6,-1(s6)
- ldq a5,offjniitem+sizejniblock*5(t0)
- ldt $f21,offjniitem+sizejniblock*5(t0)
+ ldq a5,offvmargdata+sizevmarg*5(t0)
+ ldt $f21,offvmargdata+sizevmarg*5(t0)
calljava_argsloaded:
mov sp,t4
ble s6,calljava_nocopy
s8addq t1,t4,t2
calljava_copyloop:
- ldq t3,offjniitem+sizejniblock*6(t0)
+ ldq t3,offvmargdata+sizevmarg*6(t0)
stq t3,0(t2)
lda t1,1(t1)
- lda t0,sizejniblock(t0)
+ lda t0,sizevmarg(t0)
lda t2,8(t2)
bne t1,calljava_copyloop
calljava_nocopy:
- lda v0,4*8(t4) /* pass pointer to method pointer via v0*/
+ ldq itmp1,4*8(t4) /* pass method pointer via itmp1 */
- lda itmp2,asm_call_jit_compiler/* fake virtual function call (2 instr)*/
- stq itmp2,16(t4) /* store function address */
- lda itmp2,8(t4) /* set method pointer */
+ lda mptr,asm_call_jit_compiler/* fake virtual function call (2 instr) */
+ stq mptr,2*8(t4) /* store function address */
+ lda mptr,1*8(t4) /* set method pointer */
- ldq pv,8(itmp2) /* method call as in Java */
+ ldq pv,1*8(mptr) /* method call as in Java */
jmp ra,(pv) /* call JIT compiler */
calljava_jit2:
- lda pv,(asm_calljavafunction2 - calljava_jit2)(ra)
+ lda pv,(asm_vm_call_method - calljava_jit2)(ra)
s8addq s6,sp,sp
calljava_return2:
calljava_ret2:
jmp zero,(ra)
-calljava_xhandler2:
+asm_vm_call_method_exception_handler:
s8addq s6,sp,sp
ldq gp,1*8(sp) /* restore global pointer */
mov itmp1,a0
ldq ra,0*8(sp) /* restore return address */
ldq s6,3*8(sp)
lda sp,5*8(sp) /* free stack space */
- mov zero,v0 /* return NULL */
jmp zero,(ra)
- .end asm_calljavafunction2
+ .end asm_vm_call_method
/****************** function asm_call_jit_compiler *****************************
asm_call_jit_compiler:
ldgp gp,0(pv)
- ldl t8,-8(ra) /* load instruction LDQ PV,xxx($yy) */
- srl t8,16,t8 /* shift right register number $yy */
- and t8,31,t8 /* isolate register number */
- subl t8,28,t8 /* test for REG_METHODPTR */
- beq t8,noregchange
- ldl t8,0(ra) /* load instruction LDA PV,xxx(RA) */
- sll t8,48,t8
- sra t8,48,t8 /* isolate offset */
- addq t8,ra,$28 /* compute update address */
- ldl t8,4(ra) /* load instruction LDAH PV,xxx(PV) */
- srl t8,16,t8 /* isolate instruction code */
- lda t8,-0x177b(t8) /* test for LDAH */
- bne t8,noregchange
- ldl t8,4(ra) /* load instruction LDAH PV,xxx(PV) */
- sll t8,16,t8 /* compute high offset */
- addl t8,0,t8 /* sign extend high offset */
- addq t8,$28,$28 /* compute update address */
-noregchange:
lda sp,-(15*8+sizestackframeinfo)(sp) /* reserve stack space */
SAVE_ARGUMENT_REGISTERS(0) /* save 6 int/6 float argument registers */
- stq $28,12*8(sp) /* save method pointer */
+ stq mptr,12*8(sp) /* save method pointer */
stq ra,13*8(sp) /* save return address */
- stq v0,14*8(sp) /* save methodinfo pointer */
+ stq itmp1,14*8(sp) /* save methodinfo pointer */
lda a0,15*8(sp) /* create stackframe info */
mov zero,a1 /* we don't have pv handy */
jsr ra,stacktrace_create_extern_stackframeinfo
ldgp gp,0(ra)
- ldq v0,14*8(sp) /* restore methodinfo pointer */
- ldq a0,0(v0) /* pass methodinfo pointer */
+ ldq a0,14*8(sp) /* pass methodinfo pointer */
jsr ra,jit_compile /* call jit compiler */
ldgp gp,0(ra)
stq v0,14*8(sp) /* save return value */
+ ldq a0,13*8(sp) /* pass return address */
+ lda a1,15*8(sp) /* pass stackframeinfo (for PV) */
+ ldq a2,12*8(sp) /* pass method pointer */
+ jsr ra,md_assembler_get_patch_address /* get address of patch position*/
+ ldgp gp,0(ra)
+ stq v0,12*8(sp) /* store patch address for later use */
+
lda a0,15*8(sp) /* remove stackframe info */
jsr ra,stacktrace_remove_stackframeinfo
ldgp gp,0(ra)
RESTORE_ARGUMENT_REGISTERS(0) /* restore 6 int/6 float argument registers */
- ldq $28,12*8(sp) /* load method pointer */
+ ldq t0,12*8(sp) /* load patch address */
ldq ra,13*8(sp) /* load return address */
- ldq v0,14*8(sp) /* restore return value */
+ ldq pv,14*8(sp) /* restore method entry point */
lda sp,15*8+sizestackframeinfo(sp) /* deallocate stack area */
- beq v0,L_asm_call_jit_compiler_exception
-
- ldl t8,-8(ra) /* load instruction LDQ PV,xxx($yy) */
- sll t8,48,t8
- sra t8,48,t8 /* isolate offset */
+ beq pv,L_asm_call_jit_compiler_exception
- addq t8,$28,t8 /* compute update address via method pointer*/
- stq v0,0(t8) /* save new method address there */
+ stq pv,0(t0) /* patch method entry point */
call_pal PAL_imb /* synchronise instruction cache */
- mov v0,pv /* load method address into pv */
- jmp zero,(pv) /* and call method. The method returns */
+ jmp zero,(pv) /* and call method, the method returns */
/* directly to the caller (ra). */
L_asm_call_jit_compiler_exception:
stq zero,0(v0) /* clear the exception pointer */
subq ra,4,xpc
- br asm_handle_nat_exception
+ br L_asm_handle_nat_exception
.end asm_call_jit_compiler
.ent asm_handle_nat_exception
asm_handle_nat_exception:
+L_asm_handle_nat_exception: /* required for PIC code */
ldl t0,0(ra) /* load instruction LDA PV,xxx(RA) */
sll t0,48,t0
sra t0,48,t0 /* isolate offset */
ldl t0,4(ra) /* load instruction LDAH PV,xxx(PV) */
srl t0,16,t0 /* isolate instruction code */
lda t0,-0x177b(t0) /* test for LDAH */
- bne t0,asm_handle_exception
+ bne t0,L_asm_handle_exception
ldl t0,4(ra) /* load instruction LDAH PV,xxx(PV) */
sll t0,16,t0 /* compute high offset */
addl t0,0,t0 /* sign extend high offset */
.aent asm_handle_exception
asm_handle_exception:
+L_asm_handle_exception: /* required for PIC code */
lda sp,-(ARG_CNT+TMP_CNT)*8(sp) /* create maybe-leaf stackframe */
SAVE_ARGUMENT_REGISTERS(0) /* we save arg and temp registers in */
#endif
ldq xptr,0(v0) /* get the exception pointer */
stq zero,0(v0) /* clear the exception pointer */
- br asm_handle_exception /* we have the pv of the calling java func. */
+ br L_asm_handle_exception/* we have the pv of the calling java func. */
.end asm_wrapper_patcher
+/* asm_replacement_out *********************************************************
+
+ This code is jumped to from the replacement-out stubs that are executed
+ when a thread reaches an activated replacement point.
+
+ The purpose of asm_replacement_out is to read out the parts of the
+ execution state that cannot be accessed from C code, store this state,
+ and then call the C function replace_me.
+
+ Stack layout:
+ 16 start of stack inside method to replace
+ 0 rplpoint * info on the replacement point that was reached
+
+ NOTE: itmp3 has been clobbered by the replacement-out stub!
+
+*******************************************************************************/
+
+/* some room to accomodate changes of the stack frame size during replacement */
+ /* XXX we should find a cleaner solution here */
+#define REPLACEMENT_ROOM 512
+
+#define REPLACEMENT_STACK_OFFSET ((sizeexecutionstate + REPLACEMENT_ROOM + 0xf) & ~0xf)
+
+ .ent asm_replacement_out
+
+asm_replacement_out:
+ /* create stack frame */
+ lda sp,-(REPLACEMENT_STACK_OFFSET)(sp)
+
+ /* save registers in execution state */
+ stq $0 ,( 0*8+offes_intregs)(sp)
+ stq $1 ,( 1*8+offes_intregs)(sp)
+ stq $2 ,( 2*8+offes_intregs)(sp)
+ stq $3 ,( 3*8+offes_intregs)(sp)
+ stq $4 ,( 4*8+offes_intregs)(sp)
+ stq $5 ,( 5*8+offes_intregs)(sp)
+ stq $6 ,( 6*8+offes_intregs)(sp)
+ stq $7 ,( 7*8+offes_intregs)(sp)
+ stq $8 ,( 8*8+offes_intregs)(sp)
+ stq $9 ,( 9*8+offes_intregs)(sp)
+ stq $10,(10*8+offes_intregs)(sp)
+ stq $11,(11*8+offes_intregs)(sp)
+ stq $12,(12*8+offes_intregs)(sp)
+ stq $13,(13*8+offes_intregs)(sp)
+ stq $14,(14*8+offes_intregs)(sp)
+ stq $15,(15*8+offes_intregs)(sp)
+ stq $16,(16*8+offes_intregs)(sp)
+ stq $17,(17*8+offes_intregs)(sp)
+ stq $18,(18*8+offes_intregs)(sp)
+ stq $19,(19*8+offes_intregs)(sp)
+ stq $20,(20*8+offes_intregs)(sp)
+ stq $21,(21*8+offes_intregs)(sp)
+ stq $22,(22*8+offes_intregs)(sp)
+ stq $23,(23*8+offes_intregs)(sp)
+ stq $24,(24*8+offes_intregs)(sp)
+ stq $25,(25*8+offes_intregs)(sp)
+ stq $26,(26*8+offes_intregs)(sp)
+ stq $27,(27*8+offes_intregs)(sp)
+ stq $28,(28*8+offes_intregs)(sp)
+ stq $29,(29*8+offes_intregs)(sp)
+ stq $30,(30*8+offes_intregs)(sp)
+ stq $31,(31*8+offes_intregs)(sp)
+
+ stt $f0 ,( 0*8+offes_fltregs)(sp)
+ stt $f1 ,( 1*8+offes_fltregs)(sp)
+ stt $f2 ,( 2*8+offes_fltregs)(sp)
+ stt $f3 ,( 3*8+offes_fltregs)(sp)
+ stt $f4 ,( 4*8+offes_fltregs)(sp)
+ stt $f5 ,( 5*8+offes_fltregs)(sp)
+ stt $f6 ,( 6*8+offes_fltregs)(sp)
+ stt $f7 ,( 7*8+offes_fltregs)(sp)
+ stt $f8 ,( 8*8+offes_fltregs)(sp)
+ stt $f9 ,( 9*8+offes_fltregs)(sp)
+ stt $f10,(10*8+offes_fltregs)(sp)
+ stt $f11,(11*8+offes_fltregs)(sp)
+ stt $f12,(12*8+offes_fltregs)(sp)
+ stt $f13,(13*8+offes_fltregs)(sp)
+ stt $f14,(14*8+offes_fltregs)(sp)
+ stt $f15,(15*8+offes_fltregs)(sp)
+ stt $f16,(16*8+offes_fltregs)(sp)
+ stt $f17,(17*8+offes_fltregs)(sp)
+ stt $f18,(18*8+offes_fltregs)(sp)
+ stt $f19,(19*8+offes_fltregs)(sp)
+ stt $f20,(20*8+offes_fltregs)(sp)
+ stt $f21,(21*8+offes_fltregs)(sp)
+ stt $f22,(22*8+offes_fltregs)(sp)
+ stt $f23,(23*8+offes_fltregs)(sp)
+ stt $f24,(24*8+offes_fltregs)(sp)
+ stt $f25,(25*8+offes_fltregs)(sp)
+ stt $f26,(26*8+offes_fltregs)(sp)
+ stt $f27,(27*8+offes_fltregs)(sp)
+ stt $f28,(28*8+offes_fltregs)(sp)
+ stt $f29,(29*8+offes_fltregs)(sp)
+ stt $f30,(30*8+offes_fltregs)(sp)
+ stt $f31,(31*8+offes_fltregs)(sp)
+
+ /* calculate sp of method */
+ lda itmp1,(REPLACEMENT_STACK_OFFSET + 2*8)(sp)
+ stq itmp1,(offes_sp)(sp)
+
+ br ra,L_asm_replacement_out_load_gp
+L_asm_replacement_out_load_gp:
+ ldgp gp,0(ra) /* load gp */
+
+ /* store pv */
+ stq pv,(offes_pv)(sp)
+
+ /* call replace_me */
+ ldq a0,-(2*8)(itmp1) /* arg0: rplpoint * */
+ mov sp,a1 /* arg1: execution state */
+ jmp zero,replace_me /* call C function replace_me */
+ jmp zero,abort /* NEVER REACHED */
+
+ .end asm_replacement_out
+
+/* asm_replacement_in **********************************************************
+
+ This code writes the given execution state and jumps to the replacement
+ code.
+
+ This function never returns!
+
+ NOTE: itmp3 is not restored!
+
+ C prototype:
+ void asm_replacement_in(executionstate *es);
+
+*******************************************************************************/
+
+ .ent asm_replacement_in
+
+asm_replacement_in:
+ /* a0 == executionstate *es */
+
+ /* set new sp and pv */
+ ldq sp,(offes_sp)(a0)
+ ldq pv,(offes_pv)(a0)
+
+ /* copy registers from execution state */
+ ldq $0 ,( 0*8+offes_intregs)(a0)
+ ldq $1 ,( 1*8+offes_intregs)(a0)
+ ldq $2 ,( 2*8+offes_intregs)(a0)
+ ldq $3 ,( 3*8+offes_intregs)(a0)
+ ldq $4 ,( 4*8+offes_intregs)(a0)
+ ldq $5 ,( 5*8+offes_intregs)(a0)
+ ldq $6 ,( 6*8+offes_intregs)(a0)
+ ldq $7 ,( 7*8+offes_intregs)(a0)
+ ldq $8 ,( 8*8+offes_intregs)(a0)
+ ldq $9 ,( 9*8+offes_intregs)(a0)
+ ldq $10,(10*8+offes_intregs)(a0)
+ ldq $11,(11*8+offes_intregs)(a0)
+ ldq $12,(12*8+offes_intregs)(a0)
+ ldq $13,(13*8+offes_intregs)(a0)
+ ldq $14,(14*8+offes_intregs)(a0)
+ ldq $15,(15*8+offes_intregs)(a0)
+ /* a0 is loaded below */
+ ldq $17,(17*8+offes_intregs)(a0)
+ ldq $18,(18*8+offes_intregs)(a0)
+ ldq $19,(19*8+offes_intregs)(a0)
+ ldq $20,(20*8+offes_intregs)(a0)
+ ldq $21,(21*8+offes_intregs)(a0)
+ ldq $22,(22*8+offes_intregs)(a0)
+ ldq $23,(23*8+offes_intregs)(a0)
+ ldq $24,(24*8+offes_intregs)(a0)
+ ldq $25,(25*8+offes_intregs)(a0)
+ ldq $26,(26*8+offes_intregs)(a0)
+ /* $27 is pv */
+ ldq $28,(28*8+offes_intregs)(a0)
+ ldq $29,(29*8+offes_intregs)(a0)
+ /* $30 is sp */
+ /* $31 is zero */
+
+ ldt $f0 ,( 0*8+offes_fltregs)(a0)
+ ldt $f1 ,( 1*8+offes_fltregs)(a0)
+ ldt $f2 ,( 2*8+offes_fltregs)(a0)
+ ldt $f3 ,( 3*8+offes_fltregs)(a0)
+ ldt $f4 ,( 4*8+offes_fltregs)(a0)
+ ldt $f5 ,( 5*8+offes_fltregs)(a0)
+ ldt $f6 ,( 6*8+offes_fltregs)(a0)
+ ldt $f7 ,( 7*8+offes_fltregs)(a0)
+ ldt $f8 ,( 8*8+offes_fltregs)(a0)
+ ldt $f9 ,( 9*8+offes_fltregs)(a0)
+ ldt $f10,(10*8+offes_fltregs)(a0)
+ ldt $f11,(11*8+offes_fltregs)(a0)
+ ldt $f12,(12*8+offes_fltregs)(a0)
+ ldt $f13,(13*8+offes_fltregs)(a0)
+ ldt $f14,(14*8+offes_fltregs)(a0)
+ ldt $f15,(15*8+offes_fltregs)(a0)
+ ldt $f16,(16*8+offes_fltregs)(a0)
+ ldt $f17,(17*8+offes_fltregs)(a0)
+ ldt $f18,(18*8+offes_fltregs)(a0)
+ ldt $f19,(19*8+offes_fltregs)(a0)
+ ldt $f20,(20*8+offes_fltregs)(a0)
+ ldt $f21,(21*8+offes_fltregs)(a0)
+ ldt $f22,(22*8+offes_fltregs)(a0)
+ ldt $f23,(23*8+offes_fltregs)(a0)
+ ldt $f24,(24*8+offes_fltregs)(a0)
+ ldt $f25,(25*8+offes_fltregs)(a0)
+ ldt $f26,(26*8+offes_fltregs)(a0)
+ ldt $f27,(27*8+offes_fltregs)(a0)
+ ldt $f28,(28*8+offes_fltregs)(a0)
+ ldt $f29,(29*8+offes_fltregs)(a0)
+ ldt $f30,(30*8+offes_fltregs)(a0)
+ ldt $f31,(31*8+offes_fltregs)(a0)
+
+ /* load new pc */
+
+ ldq itmp3,offes_pc(a0)
+
+ /* load a0 */
+
+ ldq a0,(16*8+offes_intregs)(a0)
+
+ /* jump to new code */
+
+ jmp zero,(itmp3)
+
+ .end asm_replacement_in
+
/******************* function asm_initialize_thread_stack **********************
* *
* initialized a thread stack *
.quad 0
+/* asm_md_init *****************************************************************
+
+ Initialize machine dependent stuff.
+
+ Determines if the byte support instruction set (21164a and higher)
+ is available.
+
+*******************************************************************************/
+
+ .ent asm_md_init
+
+asm_md_init:
+ .long 0x47e03c20 /* amask 1,v0 */
+ jmp zero,(ra) /* return */
+
+ .end asm_md_init
+
+
+/* asm_cacheflush **************************************************************
+
+ XXX
+
+*******************************************************************************/
+
+ .ent asm_cacheflush
+
+asm_cacheflush:
+ call_pal PAL_imb /* synchronize instruction cache */
+ jmp zero,(ra)
+
+ .end asm_cacheflush
+
+
/* Disable exec-stacks, required for Gentoo ***********************************/
#if defined(__GCC__) && defined(__ELF__)
* c-basic-offset: 4
* tab-width: 4
* End:
+ * vim:noexpandtab:sw=4:ts=4:
*/