Changes: Joseph Wenninger
Christian Thalinger
+ Edwin Steiner
- $Id: asmpart.S 4561 2006-03-05 23:53:19Z twisti $
+ $Id: asmpart.S 4749 2006-04-11 10:20:18Z twisti $
*/
#include "vm/jit/alpha/md-asm.h"
#include "vm/jit/alpha/offsets.h"
-#include "vm/jit/abi.h"
+#include "vm/jit/abi-asm.h"
#include "vm/jit/methodheader.h"
.set noreorder
-/********************* exported functions and variables ***********************/
-
- .globl asm_md_init
-
- .globl asm_sync_instruction_cache
+/* export functions ***********************************************************/
.globl asm_vm_call_method
.globl asm_vm_call_method_int
.globl asm_vm_call_method_long
.globl asm_vm_call_method_float
.globl asm_vm_call_method_double
+ .globl asm_vm_call_method_exception_handler
.globl asm_call_jit_compiler
.globl asm_handle_exception
.globl asm_wrapper_patcher
+ .globl asm_replacement_out
+ .globl asm_replacement_in
+
.globl asm_perform_threadswitch
.globl asm_initialize_thread_stack
.globl asm_switchstackandcall
.globl asm_criticalsections
.globl asm_getclassvalues_atomic
-
-/* asm_sync_instruction_cache **************************************************
-
- XXX
-
-*******************************************************************************/
-
- .ent asm_sync_instruction_cache
-
-asm_sync_instruction_cache:
- call_pal PAL_imb /* synchronize instruction cache */
- jmp zero,(ra)
-
- .end asm_sync_instruction_cache
-
-
-/* asm_md_init *****************************************************************
-
- Initialize machine dependent stuff.
-
- Determines if the byte support instruction set (21164a and higher)
- is available.
-
-*******************************************************************************/
-
- .ent asm_md_init
-
-asm_md_init:
-
- .long 0x47e03c20 /* amask 1,v0 */
- jmp zero,(ra) /* return */
-
- .end asm_md_init
+ .globl asm_md_init
+ .globl asm_cacheflush
/* asm_vm_call_method **********************************************************
.align 3
.quad 0 /* catch type all */
- .quad calljava_xhandler2 /* handler pc */
- .quad calljava_xhandler2 /* end pc */
- .quad asm_vm_call_method /* start pc */
+ .quad 0 /* handler pc */
+ .quad 0 /* end pc */
+ .quad 0 /* start pc */
.long 1 /* extable size */
.long 0 /* ALIGNMENT PADDING */
.quad 0 /* line number table start */
calljava_ret2:
jmp zero,(ra)
-calljava_xhandler2:
+asm_vm_call_method_exception_handler:
s8addq s6,sp,sp
ldq gp,1*8(sp) /* restore global pointer */
mov itmp1,a0
ldq ra,0*8(sp) /* restore return address */
ldq s6,3*8(sp)
lda sp,5*8(sp) /* free stack space */
- mov zero,v0 /* return NULL */
jmp zero,(ra)
.end asm_vm_call_method
.end asm_wrapper_patcher
+/* asm_replacement_out *********************************************************
+
+ This code is jumped to from the replacement-out stubs that are executed
+ when a thread reaches an activated replacement point.
+
+ The purpose of asm_replacement_out is to read out the parts of the
+ execution state that cannot be accessed from C code, store this state,
+ and then call the C function replace_me.
+
+ Stack layout:
+ 16 start of stack inside method to replace
+ 0 rplpoint * info on the replacement point that was reached
+
+ NOTE: itmp3 has been clobbered by the replacement-out stub!
+
+*******************************************************************************/
+
+/* some room to accomodate changes of the stack frame size during replacement */
+ /* XXX we should find a cleaner solution here */
+#define REPLACEMENT_ROOM 512
+
+#define REPLACEMENT_STACK_OFFSET ((sizeexecutionstate + REPLACEMENT_ROOM + 0xf) & ~0xf)
+
+ .ent asm_replacement_out
+
+asm_replacement_out:
+ /* create stack frame */
+ lda sp,-(REPLACEMENT_STACK_OFFSET)(sp)
+
+ /* save registers in execution state */
+ stq $0 ,( 0*8+offes_intregs)(sp)
+ stq $1 ,( 1*8+offes_intregs)(sp)
+ stq $2 ,( 2*8+offes_intregs)(sp)
+ stq $3 ,( 3*8+offes_intregs)(sp)
+ stq $4 ,( 4*8+offes_intregs)(sp)
+ stq $5 ,( 5*8+offes_intregs)(sp)
+ stq $6 ,( 6*8+offes_intregs)(sp)
+ stq $7 ,( 7*8+offes_intregs)(sp)
+ stq $8 ,( 8*8+offes_intregs)(sp)
+ stq $9 ,( 9*8+offes_intregs)(sp)
+ stq $10,(10*8+offes_intregs)(sp)
+ stq $11,(11*8+offes_intregs)(sp)
+ stq $12,(12*8+offes_intregs)(sp)
+ stq $13,(13*8+offes_intregs)(sp)
+ stq $14,(14*8+offes_intregs)(sp)
+ stq $15,(15*8+offes_intregs)(sp)
+ stq $16,(16*8+offes_intregs)(sp)
+ stq $17,(17*8+offes_intregs)(sp)
+ stq $18,(18*8+offes_intregs)(sp)
+ stq $19,(19*8+offes_intregs)(sp)
+ stq $20,(20*8+offes_intregs)(sp)
+ stq $21,(21*8+offes_intregs)(sp)
+ stq $22,(22*8+offes_intregs)(sp)
+ stq $23,(23*8+offes_intregs)(sp)
+ stq $24,(24*8+offes_intregs)(sp)
+ stq $25,(25*8+offes_intregs)(sp)
+ stq $26,(26*8+offes_intregs)(sp)
+ stq $27,(27*8+offes_intregs)(sp)
+ stq $28,(28*8+offes_intregs)(sp)
+ stq $29,(29*8+offes_intregs)(sp)
+ stq $30,(30*8+offes_intregs)(sp)
+ stq $31,(31*8+offes_intregs)(sp)
+
+ stt $f0 ,( 0*8+offes_fltregs)(sp)
+ stt $f1 ,( 1*8+offes_fltregs)(sp)
+ stt $f2 ,( 2*8+offes_fltregs)(sp)
+ stt $f3 ,( 3*8+offes_fltregs)(sp)
+ stt $f4 ,( 4*8+offes_fltregs)(sp)
+ stt $f5 ,( 5*8+offes_fltregs)(sp)
+ stt $f6 ,( 6*8+offes_fltregs)(sp)
+ stt $f7 ,( 7*8+offes_fltregs)(sp)
+ stt $f8 ,( 8*8+offes_fltregs)(sp)
+ stt $f9 ,( 9*8+offes_fltregs)(sp)
+ stt $f10,(10*8+offes_fltregs)(sp)
+ stt $f11,(11*8+offes_fltregs)(sp)
+ stt $f12,(12*8+offes_fltregs)(sp)
+ stt $f13,(13*8+offes_fltregs)(sp)
+ stt $f14,(14*8+offes_fltregs)(sp)
+ stt $f15,(15*8+offes_fltregs)(sp)
+ stt $f16,(16*8+offes_fltregs)(sp)
+ stt $f17,(17*8+offes_fltregs)(sp)
+ stt $f18,(18*8+offes_fltregs)(sp)
+ stt $f19,(19*8+offes_fltregs)(sp)
+ stt $f20,(20*8+offes_fltregs)(sp)
+ stt $f21,(21*8+offes_fltregs)(sp)
+ stt $f22,(22*8+offes_fltregs)(sp)
+ stt $f23,(23*8+offes_fltregs)(sp)
+ stt $f24,(24*8+offes_fltregs)(sp)
+ stt $f25,(25*8+offes_fltregs)(sp)
+ stt $f26,(26*8+offes_fltregs)(sp)
+ stt $f27,(27*8+offes_fltregs)(sp)
+ stt $f28,(28*8+offes_fltregs)(sp)
+ stt $f29,(29*8+offes_fltregs)(sp)
+ stt $f30,(30*8+offes_fltregs)(sp)
+ stt $f31,(31*8+offes_fltregs)(sp)
+
+ /* calculate sp of method */
+ lda itmp1,(REPLACEMENT_STACK_OFFSET + 2*8)(sp)
+ stq itmp1,(offes_sp)(sp)
+
+ br ra,L_asm_replacement_out_load_gp
+L_asm_replacement_out_load_gp:
+ ldgp gp,0(ra) /* load gp */
+
+ /* store pv */
+ stq pv,(offes_pv)(sp)
+
+ /* call replace_me */
+ ldq a0,-(2*8)(itmp1) /* arg0: rplpoint * */
+ mov sp,a1 /* arg1: execution state */
+ jmp zero,replace_me /* call C function replace_me */
+ jmp zero,abort /* NEVER REACHED */
+
+ .end asm_replacement_out
+
+/* asm_replacement_in **********************************************************
+
+ This code writes the given execution state and jumps to the replacement
+ code.
+
+ This function never returns!
+
+ NOTE: itmp3 is not restored!
+
+ C prototype:
+ void asm_replacement_in(executionstate *es);
+
+*******************************************************************************/
+
+ .ent asm_replacement_in
+
+asm_replacement_in:
+ /* a0 == executionstate *es */
+
+ /* set new sp and pv */
+ ldq sp,(offes_sp)(a0)
+ ldq pv,(offes_pv)(a0)
+
+ /* copy registers from execution state */
+ ldq $0 ,( 0*8+offes_intregs)(a0)
+ ldq $1 ,( 1*8+offes_intregs)(a0)
+ ldq $2 ,( 2*8+offes_intregs)(a0)
+ ldq $3 ,( 3*8+offes_intregs)(a0)
+ ldq $4 ,( 4*8+offes_intregs)(a0)
+ ldq $5 ,( 5*8+offes_intregs)(a0)
+ ldq $6 ,( 6*8+offes_intregs)(a0)
+ ldq $7 ,( 7*8+offes_intregs)(a0)
+ ldq $8 ,( 8*8+offes_intregs)(a0)
+ ldq $9 ,( 9*8+offes_intregs)(a0)
+ ldq $10,(10*8+offes_intregs)(a0)
+ ldq $11,(11*8+offes_intregs)(a0)
+ ldq $12,(12*8+offes_intregs)(a0)
+ ldq $13,(13*8+offes_intregs)(a0)
+ ldq $14,(14*8+offes_intregs)(a0)
+ ldq $15,(15*8+offes_intregs)(a0)
+ /* a0 is loaded below */
+ ldq $17,(17*8+offes_intregs)(a0)
+ ldq $18,(18*8+offes_intregs)(a0)
+ ldq $19,(19*8+offes_intregs)(a0)
+ ldq $20,(20*8+offes_intregs)(a0)
+ ldq $21,(21*8+offes_intregs)(a0)
+ ldq $22,(22*8+offes_intregs)(a0)
+ ldq $23,(23*8+offes_intregs)(a0)
+ ldq $24,(24*8+offes_intregs)(a0)
+ ldq $25,(25*8+offes_intregs)(a0)
+ ldq $26,(26*8+offes_intregs)(a0)
+ /* $27 is pv */
+ ldq $28,(28*8+offes_intregs)(a0)
+ ldq $29,(29*8+offes_intregs)(a0)
+ /* $30 is sp */
+ /* $31 is zero */
+
+ ldt $f0 ,( 0*8+offes_fltregs)(a0)
+ ldt $f1 ,( 1*8+offes_fltregs)(a0)
+ ldt $f2 ,( 2*8+offes_fltregs)(a0)
+ ldt $f3 ,( 3*8+offes_fltregs)(a0)
+ ldt $f4 ,( 4*8+offes_fltregs)(a0)
+ ldt $f5 ,( 5*8+offes_fltregs)(a0)
+ ldt $f6 ,( 6*8+offes_fltregs)(a0)
+ ldt $f7 ,( 7*8+offes_fltregs)(a0)
+ ldt $f8 ,( 8*8+offes_fltregs)(a0)
+ ldt $f9 ,( 9*8+offes_fltregs)(a0)
+ ldt $f10,(10*8+offes_fltregs)(a0)
+ ldt $f11,(11*8+offes_fltregs)(a0)
+ ldt $f12,(12*8+offes_fltregs)(a0)
+ ldt $f13,(13*8+offes_fltregs)(a0)
+ ldt $f14,(14*8+offes_fltregs)(a0)
+ ldt $f15,(15*8+offes_fltregs)(a0)
+ ldt $f16,(16*8+offes_fltregs)(a0)
+ ldt $f17,(17*8+offes_fltregs)(a0)
+ ldt $f18,(18*8+offes_fltregs)(a0)
+ ldt $f19,(19*8+offes_fltregs)(a0)
+ ldt $f20,(20*8+offes_fltregs)(a0)
+ ldt $f21,(21*8+offes_fltregs)(a0)
+ ldt $f22,(22*8+offes_fltregs)(a0)
+ ldt $f23,(23*8+offes_fltregs)(a0)
+ ldt $f24,(24*8+offes_fltregs)(a0)
+ ldt $f25,(25*8+offes_fltregs)(a0)
+ ldt $f26,(26*8+offes_fltregs)(a0)
+ ldt $f27,(27*8+offes_fltregs)(a0)
+ ldt $f28,(28*8+offes_fltregs)(a0)
+ ldt $f29,(29*8+offes_fltregs)(a0)
+ ldt $f30,(30*8+offes_fltregs)(a0)
+ ldt $f31,(31*8+offes_fltregs)(a0)
+
+ /* load new pc */
+
+ ldq itmp3,offes_pc(a0)
+
+ /* load a0 */
+
+ ldq a0,(16*8+offes_intregs)(a0)
+
+ /* jump to new code */
+
+ jmp zero,(itmp3)
+
+ .end asm_replacement_in
+
/******************* function asm_initialize_thread_stack **********************
* *
* initialized a thread stack *
.quad 0
+/* asm_md_init *****************************************************************
+
+ Initialize machine dependent stuff.
+
+ Determines if the byte support instruction set (21164a and higher)
+ is available.
+
+*******************************************************************************/
+
+ .ent asm_md_init
+
+asm_md_init:
+ .long 0x47e03c20 /* amask 1,v0 */
+ jmp zero,(ra) /* return */
+
+ .end asm_md_init
+
+
+/* asm_cacheflush **************************************************************
+
+ XXX
+
+*******************************************************************************/
+
+ .ent asm_cacheflush
+
+asm_cacheflush:
+ call_pal PAL_imb /* synchronize instruction cache */
+ jmp zero,(ra)
+
+ .end asm_cacheflush
+
+
/* Disable exec-stacks, required for Gentoo ***********************************/
#if defined(__GCC__) && defined(__ELF__)
* c-basic-offset: 4
* tab-width: 4
* End:
+ * vim:noexpandtab:sw=4:ts=4:
*/