/* src/vm/jit/x86_64/asmpart.S - Java-C interface functions for x86_64
- Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+ Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
J. Wenninger, Institut f. Computersprachen - TU Wien
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
- Contact: cacao@cacaojvm.org
-
- Authors: Andreas Krall
- Reinhard Grafl
- Christian Thalinger
-
- Changes: Edwin Steiner
-
- $Id: asmpart.S 5110 2006-07-11 22:23:55Z twisti $
-
*/
#include "vm/jit/x86_64/arch.h"
#include "vm/jit/x86_64/md-abi.h"
#include "vm/jit/x86_64/md-asm.h"
-#include "vm/jit/x86_64/offsets.h"
#include "vm/jit/abi-asm.h"
#include "vm/jit/methodheader.h"
.globl asm_vm_call_method_float
.globl asm_vm_call_method_double
.globl asm_vm_call_method_exception_handler
+ .globl asm_vm_call_method_end
.globl asm_call_jit_compiler
.globl asm_abstractmethoderror
- .globl asm_wrapper_patcher
+ .globl asm_patcher_wrapper
+#if defined(ENABLE_REPLACEMENT)
.globl asm_replacement_out
.globl asm_replacement_in
+#endif
.globl asm_builtin_f2i
.globl asm_builtin_f2l
.globl asm_builtin_d2i
.globl asm_builtin_d2l
- .globl asm_criticalsections
- .globl asm_getclassvalues_atomic
+ .globl asm_compare_and_swap
+ .globl asm_memory_barrier
/********************* function asm_calljavafunction ***************************
mov s3,4*8(sp)
mov s4,5*8(sp)
- mov a0,itmp1 /* move method pointer for compiler */
- xor %rbp,%rbp /* set argument stack frame to zero */
+ mov a0,6*8(sp) /* store method PV */
- test a1,a1 /* maybe we have no args... */
- jle L_copy_done
+ mov sp,s0 /* save stack pointer */
- mov a1,itmp3 /* arg count */
- mov a2,itmp2 /* pointer to arg block */
+ mov a1,t0 /* address of data structure */
+ mov a2,itmp1 /* number of stack arguments */
- mov itmp2,%r14 /* save argument block pointer */
- mov itmp3,%r15 /* save argument count */
+ mov 0*8(t0),a0
+ mov 1*8(t0),a1
+ mov 2*8(t0),a2
+ mov 3*8(t0),a3
+ mov 4*8(t0),a4
+ mov 5*8(t0),a5
- sub $sizevmarg,itmp2 /* initialize pointer (smaller code) */
- add $1,itmp3 /* initialize argument count */
- xor %r12,%r12 /* initialize integer argument counter*/
- xor %r13,%r13 /* initialize float argument counter */
+ movq 6*8(t0),fa0
+ movq 7*8(t0),fa1
+ movq 8*8(t0),fa2
+ movq 9*8(t0),fa3
+ movq 10*8(t0),fa4
+ movq 11*8(t0),fa5
+ movq 12*8(t0),fa6
+ movq 13*8(t0),fa7
-L_register_copy:
- add $sizevmarg,itmp2 /* goto next argument block */
- dec itmp3 /* argument count - 1 */
- jz L_register_copy_done
- andb $0x02,offvmargtype(itmp2) /* is this a float/double type? */
- jnz L_register_handle_float /* yes, handle it */
+ cmp $0,itmp1l
+ je L_asm_vm_call_method_stack_copy_done
- cmp $INT_ARG_CNT,%r12 /* are we out of integer argument */
- je L_register_copy /* register? yes, next loop */
+ mov itmp1,itmp2
+ add $1,itmp2 /* keep stack 16-byte aligned */
+ and $0xfffffffffffffffe,itmp2
+ shl $3,itmp2 /* calculate stack size */
+ sub itmp2,sp /* create stack frame */
+ mov sp,itmp2 /* temporary stack pointer */
- lea jumptable_integer(%rip),%rbp
- mov 0(%rbp,%r12,8),%rbx
- inc %r12 /* integer argument counter + 1 */
- jmp *%rbx
+L_asm_vm_call_method_stack_copy_loop:
+ mov 14*8(t0),itmp3 /* load argument */
+ mov itmp3,0(itmp2) /* store argument on stack */
-L_register_handle_float:
- cmp $FLT_ARG_CNT,%r13 /* are we out of float argument */
- je L_register_copy /* register? yes, next loop */
+ sub $1,itmp1l /* subtract 1 argument */
+ add $8,t0 /* set address of next argument */
+ add $8,itmp2 /* increase SP */
- lea jumptable_float(%rip),%rbp
- mov 0(%rbp,%r13,8),%rbx
- inc %r13 /* float argument counter + 1 */
- jmp *%rbx
-
-L_register_copy_done:
- mov %r15,%rbp /* calculate remaining arguments */
- sub %r12,%rbp /* - integer arguments in registers */
- sub %r13,%rbp /* - float arguments in registers */
- jle L_copy_done /* are all assigned to registers? */
-
- and $0xfffffffffffffffe,%rbp /* keep stack 16-byte aligned */
- shl $3,%rbp /* calculate stack size */
- sub %rbp,sp /* stack frame for arguments */
- mov sp,%rbx /* use %rbx as temp sp */
-
- sub $sizevmarg,%r14 /* initialize pointer (smaller code) */
- add $1,%r15 /* initialize argument count */
-
-L_stack_copy_loop:
- add $sizevmarg,%r14 /* goto next argument block */
- dec %r15 /* are there any arguments left? */
- jz L_copy_done /* no test needed after dec */
-
- andb $0x02,offvmargtype(%r14) /* is this a float/double type? */
- jnz L_stack_handle_float
- dec %r12 /* arguments assigned to registers */
- jge L_stack_copy_loop
- jmp L_stack_copy
-
-L_stack_handle_float:
- dec %r13 /* arguments assigned to registers */
- jge L_stack_copy_loop
-
-L_stack_copy:
- mov offvmargdata(%r14),itmp3 /* copy s8 argument onto stack */
- mov itmp3,0(%rbx)
- add $8,%rbx /* increase sp to next argument */
- jmp L_stack_copy_loop
-
-L_copy_done:
- /* itmp1 still contains method pointer*/
- lea L_asm_call_jit_compiler(%rip),mptr
- mov sp,itmp3 /* calculate the old stack pointer */
- add bp,itmp3
- mov mptr,6*8(itmp3)
- lea (6*8-256)(itmp3),mptr /* We subtract 256 to force the next */
+ cmp $0,itmp1l
+ jg L_asm_vm_call_method_stack_copy_loop
+
+L_asm_vm_call_method_stack_copy_done:
+ lea (6*8-256)(s0),mptr /* We subtract 256 to force the next */
/* move instruction to have a 32-bit */
/* offset. */
- mov (0*8+256)(mptr),itmp3 /* method call as in Java */
- call *itmp3 /* call JIT compiler */
+ mov (0*8+256)(mptr),itmp3 /* load PV */
+ call *itmp3
- add bp,sp /* remove argument stack frame if any */
+ mov s0,sp /* restore SP */
L_asm_vm_call_method_return:
mov 0*8(sp),%rbx /* restore callee saved registers */
call builtin_throw_exception@PLT
jmp L_asm_vm_call_method_return
-
-jumptable_integer:
- .quad handle_a0
- .quad handle_a1
- .quad handle_a2
- .quad handle_a3
- .quad handle_a4
- .quad handle_a5
-
-handle_a0:
- mov offvmargdata(itmp2),a0
- jmp L_register_copy
-handle_a1:
- mov offvmargdata(itmp2),a1
- jmp L_register_copy
-handle_a2:
- mov offvmargdata(itmp2),a2
- jmp L_register_copy
-handle_a3:
- mov offvmargdata(itmp2),a3
- jmp L_register_copy
-handle_a4:
- mov offvmargdata(itmp2),a4
- jmp L_register_copy
-handle_a5:
- mov offvmargdata(itmp2),a5
- jmp L_register_copy
-
-
-jumptable_float:
- .quad handle_fa0
- .quad handle_fa1
- .quad handle_fa2
- .quad handle_fa3
- .quad handle_fa4
- .quad handle_fa5
- .quad handle_fa6
- .quad handle_fa7
-
-handle_fa0:
- movq offvmargdata(itmp2),fa0
- jmp L_register_copy
-handle_fa1:
- movq offvmargdata(itmp2),fa1
- jmp L_register_copy
-handle_fa2:
- movq offvmargdata(itmp2),fa2
- jmp L_register_copy
-handle_fa3:
- movq offvmargdata(itmp2),fa3
- jmp L_register_copy
-handle_fa4:
- movq offvmargdata(itmp2),fa4
- jmp L_register_copy
-handle_fa5:
- movq offvmargdata(itmp2),fa5
- jmp L_register_copy
-handle_fa6:
- movq offvmargdata(itmp2),fa6
- jmp L_register_copy
-handle_fa7:
- movq offvmargdata(itmp2),fa7
- jmp L_register_copy
+asm_vm_call_method_end:
+ nop
/****************** function asm_call_jit_compiler *****************************
mov t0,4*8(sp) /* save maybe-leaf flag */
mov xpc,a0 /* exception pc */
- call codegen_findmethod@PLT
+ call codegen_get_pv_from_pc@PLT
mov v0,2*8(sp) /* save data segment pointer */
mov 0*8(sp),a0 /* pass exception pointer */
jmp L_asm_handle_exception
-/* asm_wrapper_patcher *********************************************************
+/* asm_patcher_wrapper *********************************************************
XXX
*******************************************************************************/
-asm_wrapper_patcher:
+asm_patcher_wrapper:
push bp /* save base pointer */
mov sp,bp /* move actual sp to bp */
- sub $((3+ARG_CNT+TMP_CNT)*8+sizestackframeinfo),sp
+ sub $(3+ARG_CNT+TMP_CNT)*8,sp
and $0xfffffffffffffff0,sp /* align sp to 16-byte (this is for */
/* leaf functions) */
mov itmp1,0*8(sp) /* save itmp1 and itmp2 */
mov itmp2,1*8(sp) /* can be used by some instructions */
- mov sp,a0 /* create stackframe info */
- add $((3+ARG_CNT+TMP_CNT)*8),a0
- xor a1,a1 /* if pv is NULL, use findmethod */
- mov bp,a2 /* pass java sp */
- add $((6+1)*8),a2
- mov ((5+1)*8)(bp),a3 /* pass ra to java function */
- mov a3,a4 /* xpc is equal to ra */
- call stacktrace_create_extern_stackframeinfo@PLT
-
- mov bp,a0 /* pass stack pointer */
- add $((1+1)*8),a0 /* skip function pointer */
- mov 1*8(bp),itmp3 /* get function pointer */
- call *itmp3 /* call the patcher function */
+ mov bp,a0 /* pass SP of patcher stub */
+ add $(1*8),a0
+ mov $0,a1 /* pass PV (if NULL, use findmethod) */
+ mov $0,a2 /* pass RA (it's on the stack) */
+ call patcher_wrapper@PLT
mov v0,2*8(sp) /* save return value */
- mov sp,a0 /* remove stackframe info */
- add $((3+ARG_CNT+TMP_CNT)*8),a0
- call stacktrace_remove_stackframeinfo@PLT
-
RESTORE_ARGUMENT_REGISTERS(3)
RESTORE_TEMPORARY_REGISTERS(3+ARG_CNT)
mov bp,sp /* restore original sp */
pop bp /* restore bp */
- add $(5*8),sp /* remove patcher stackframe, keep ra */
+ add $(5*8),sp /* remove patcher stackframe, keep RA */
test itmp3,itmp3 /* exception thrown? */
- jz L_asm_wrapper_patcher_exception
+ jne L_asm_patcher_wrapper_exception
ret /* call new patched code */
-L_asm_wrapper_patcher_exception:
- call exceptions_get_and_clear_exception@PLT
+L_asm_patcher_wrapper_exception:
+ mov itmp3,xptr /* get exception */
pop xpc /* get and remove return address */
jmp L_asm_handle_exception
+#if defined(ENABLE_REPLACEMENT)
/* asm_replacement_out *********************************************************
This function never returns!
C prototype:
- void asm_replacement_in(executionstate *es);
+ void asm_replacement_in(executionstate *es, replace_safestack_t *st);
*******************************************************************************/
asm_replacement_in:
- mov a0,%rbp /* executionstate *es */
+ /* get arguments */
+ mov a1,s1 /* replace_safestack_t *st */
+ mov a0,%rbp /* executionstate *es == safe stack */
+
+ /* switch to the safe stack */
+ mov %rbp,sp
+
+ /* call replace_build_execution_state(st) */
+ mov s1,a0
+ call replace_build_execution_state@PLT
/* set new sp */
- mov (offes_sp)(%rbp),%rsp
-
- /* store address of new code */
- push (offes_pc)(%rbp)
-
+ mov (offes_sp)(%rbp),sp
+
+ /* push address of new code */
+ pushq (offes_pc)(%rbp)
+
+ /* allocate an executionstate_t on the stack */
+ sub $(sizeexecutionstate),sp
+
+ /* call replace_free_safestack(st,& of allocated executionstate_t) */
+ mov sp,a1
+ mov s1,a0
+ call replace_free_safestack@PLT
+
/* copy registers from execution state */
- movq (XMM0 *8+offes_fltregs)(%rbp),%xmm0
- movq (XMM1 *8+offes_fltregs)(%rbp),%xmm1
- movq (XMM2 *8+offes_fltregs)(%rbp),%xmm2
- movq (XMM3 *8+offes_fltregs)(%rbp),%xmm3
- movq (XMM4 *8+offes_fltregs)(%rbp),%xmm4
- movq (XMM5 *8+offes_fltregs)(%rbp),%xmm5
- movq (XMM6 *8+offes_fltregs)(%rbp),%xmm6
- movq (XMM7 *8+offes_fltregs)(%rbp),%xmm7
- movq (XMM8 *8+offes_fltregs)(%rbp),%xmm8
- movq (XMM9 *8+offes_fltregs)(%rbp),%xmm9
- movq (XMM10*8+offes_fltregs)(%rbp),%xmm10
- movq (XMM11*8+offes_fltregs)(%rbp),%xmm11
- movq (XMM12*8+offes_fltregs)(%rbp),%xmm12
- movq (XMM13*8+offes_fltregs)(%rbp),%xmm13
- movq (XMM14*8+offes_fltregs)(%rbp),%xmm14
- movq (XMM15*8+offes_fltregs)(%rbp),%xmm15
-
- mov (RAX*8+offes_intregs)(%rbp),%rax
- mov (RBX*8+offes_intregs)(%rbp),%rbx
- mov (RCX*8+offes_intregs)(%rbp),%rcx
- mov (RDX*8+offes_intregs)(%rbp),%rdx
- mov (RSI*8+offes_intregs)(%rbp),%rsi
- mov (RDI*8+offes_intregs)(%rbp),%rdi
- mov (R8 *8+offes_intregs)(%rbp),%r8
- mov (R9 *8+offes_intregs)(%rbp),%r9
- mov (R10*8+offes_intregs)(%rbp),%r10
- mov (R11*8+offes_intregs)(%rbp),%r11
- mov (R12*8+offes_intregs)(%rbp),%r12
- mov (R13*8+offes_intregs)(%rbp),%r13
- mov (R14*8+offes_intregs)(%rbp),%r14
- mov (R15*8+offes_intregs)(%rbp),%r15
-
- mov (RBP*8+offes_intregs)(%rbp),%rbp
+ movq (XMM0 *8+offes_fltregs)(sp),%xmm0
+ movq (XMM1 *8+offes_fltregs)(sp),%xmm1
+ movq (XMM2 *8+offes_fltregs)(sp),%xmm2
+ movq (XMM3 *8+offes_fltregs)(sp),%xmm3
+ movq (XMM4 *8+offes_fltregs)(sp),%xmm4
+ movq (XMM5 *8+offes_fltregs)(sp),%xmm5
+ movq (XMM6 *8+offes_fltregs)(sp),%xmm6
+ movq (XMM7 *8+offes_fltregs)(sp),%xmm7
+ movq (XMM8 *8+offes_fltregs)(sp),%xmm8
+ movq (XMM9 *8+offes_fltregs)(sp),%xmm9
+ movq (XMM10*8+offes_fltregs)(sp),%xmm10
+ movq (XMM11*8+offes_fltregs)(sp),%xmm11
+ movq (XMM12*8+offes_fltregs)(sp),%xmm12
+ movq (XMM13*8+offes_fltregs)(sp),%xmm13
+ movq (XMM14*8+offes_fltregs)(sp),%xmm14
+ movq (XMM15*8+offes_fltregs)(sp),%xmm15
+
+ mov (RAX*8+offes_intregs)(sp),%rax
+ mov (RBX*8+offes_intregs)(sp),%rbx
+ mov (RCX*8+offes_intregs)(sp),%rcx
+ mov (RDX*8+offes_intregs)(sp),%rdx
+ mov (RSI*8+offes_intregs)(sp),%rsi
+ mov (RDI*8+offes_intregs)(sp),%rdi
+ mov (RBP*8+offes_intregs)(sp),%rbp
+ mov (R8 *8+offes_intregs)(sp),%r8
+ mov (R9 *8+offes_intregs)(sp),%r9
+ mov (R10*8+offes_intregs)(sp),%r10
+ mov (R11*8+offes_intregs)(sp),%r11
+ mov (R12*8+offes_intregs)(sp),%r12
+ mov (R13*8+offes_intregs)(sp),%r13
+ mov (R14*8+offes_intregs)(sp),%r14
+ mov (R15*8+offes_intregs)(sp),%r15
+
+ /* pop the execution state off the stack */
+ add $(sizeexecutionstate),sp
/* jump to new code */
ret
+#endif /* defined(ENABLE_REPLACEMENT) */
+
/* asm_builtin_x2x *************************************************************
* *
ret
-asm_getclassvalues_atomic:
-_crit_restart:
-_crit_begin:
- movl offbaseval(a0),itmp1l
- movl offdiffval(a0),itmp2l
- movl offbaseval(a1),itmp3l
-_crit_end:
- movl itmp1l,offcast_super_baseval(a2)
- movl itmp2l,offcast_super_diffval(a2)
- movl itmp3l,offcast_sub_baseval(a2)
+/* asm_compare_and_swap ********************************************************
+
+ Does an atomic compare and swap. Required for the lock
+ implementation.
+
+*******************************************************************************/
+
+asm_compare_and_swap:
+ mov a1,v0 /* v0 is %rax */
+ lock cmpxchg a2,(a0)
ret
- .data
-
-asm_criticalsections:
-#if defined(ENABLE_THREADS)
- .quad _crit_begin
- .quad _crit_end
- .quad _crit_restart
-#endif
- .quad 0
+
+/* asm_memory_barrier **********************************************************
+
+ A memory barrier for the Java Memory Model.
+
+*******************************************************************************/
+
+asm_memory_barrier:
+ mfence
+ ret
-/* Disable exec-stacks, required for Gentoo ***********************************/
+/* disable exec-stacks ********************************************************/
-#if defined(__GCC__) && defined(__ELF__)
- .section .note.GNU-stack,"",@progbits
+#if defined(__linux__) && defined(__ELF__)
+ .section .note.GNU-stack,"",%progbits
#endif