* Merged in twisti-branch.
[cacao.git] / src / vm / jit / x86_64 / asmpart.S
index ad4650b07bded6623bbc394f7954f9630dbb1893..83da57bef44c240079e13fa7f691a60524d644fe 100644 (file)
@@ -1,6 +1,6 @@
 /* src/vm/jit/x86_64/asmpart.S - Java-C interface functions for x86_64
 
-   Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+   Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
    J. Wenninger, Institut f. Computersprachen - TU Wien
    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
    02110-1301, USA.
 
-   Contact: cacao@cacaojvm.org
-
-   Authors: Andreas Krall
-            Reinhard Grafl
-            Christian Thalinger
-
-   Changes: Edwin Steiner
-
-   $Id: asmpart.S 5145 2006-07-17 11:48:38Z twisti $
+   $Id: asmpart.S 7486 2007-03-08 13:50:07Z twisti $
 
 */
 
@@ -57,6 +49,7 @@
        .globl asm_vm_call_method_float
        .globl asm_vm_call_method_double
        .globl asm_vm_call_method_exception_handler
+       .globl asm_vm_call_method_end
 
        .globl asm_call_jit_compiler
 
 
        .globl asm_patcher_wrapper
 
+#if defined(ENABLE_REPLACEMENT)
        .globl asm_replacement_out
        .globl asm_replacement_in
+#endif
 
        .globl asm_builtin_f2i
        .globl asm_builtin_f2l
        .globl asm_builtin_d2i
        .globl asm_builtin_d2l
 
+       .globl asm_compare_and_swap
+       .globl asm_memory_barrier
+
        .globl asm_criticalsections
        .globl asm_getclassvalues_atomic
 
@@ -294,6 +292,8 @@ handle_fa7:
        movq    offvmargdata(itmp2),fa7
        jmp     L_register_copy
 
+asm_vm_call_method_end:
+       nop
 
 /****************** function asm_call_jit_compiler *****************************
 *                                                                              *
@@ -384,7 +384,7 @@ L_asm_handle_exception_stack_loop:
        mov     t0,4*8(sp)                  /* save maybe-leaf flag               */
 
        mov     xpc,a0                      /* exception pc                       */
-       call    codegen_findmethod@PLT
+       call    codegen_get_pv_from_pc@PLT
        mov     v0,2*8(sp)                  /* save data segment pointer          */
         
        mov     0*8(sp),a0                  /* pass exception pointer             */
@@ -529,7 +529,7 @@ asm_abstractmethoderror:
 asm_patcher_wrapper:
        push    bp                          /* save base pointer                  */
        mov     sp,bp                       /* move actual sp to bp               */
-       sub     $((3+ARG_CNT+TMP_CNT)*8+sizestackframeinfo),sp
+       sub     $(3+ARG_CNT+TMP_CNT)*8,sp
        and     $0xfffffffffffffff0,sp      /* align sp to 16-byte (this is for   */
                                            /* leaf functions)                    */
 
@@ -566,6 +566,7 @@ L_asm_patcher_wrapper_exception:
        pop     xpc                         /* get and remove return address      */
        jmp     L_asm_handle_exception
 
+#if defined(ENABLE_REPLACEMENT)
 
 /* asm_replacement_out *********************************************************
 
@@ -647,57 +648,78 @@ asm_replacement_out:
    This function never returns!
 
    C prototype:
-      void asm_replacement_in(executionstate *es);
+      void asm_replacement_in(executionstate *es, replace_safestack_t *st);
 
 *******************************************************************************/
 
 asm_replacement_in:
-       mov     a0,%rbp                     /* executionstate *es                 */
+       /* get arguments */
+       mov     a1,s1                       /* replace_safestack_t *st            */
+       mov     a0,%rbp                     /* executionstate *es == safe stack   */
+
+       /* switch to the safe stack */
+       mov     %rbp,sp
+
+       /* call replace_build_execution_state(st) */
+       mov             s1,a0
+       call    replace_build_execution_state@PLT
 
        /* set new sp */
-       mov     (offes_sp)(%rbp),%rsp
-       
-       /* store address of new code */
-       push    (offes_pc)(%rbp)
-       
+       mov     (offes_sp)(%rbp),sp
+
+       /* push address of new code */
+       pushq   (offes_pc)(%rbp)
+
+       /* allocate an executionstate_t on the stack */
+       sub             $(sizeexecutionstate),sp
+
+       /* call replace_free_safestack(st,& of allocated executionstate_t) */
+       mov             sp,a1
+       mov             s1,a0
+       call    replace_free_safestack@PLT
+
        /* copy registers from execution state */
-       movq    (XMM0 *8+offes_fltregs)(%rbp),%xmm0
-       movq    (XMM1 *8+offes_fltregs)(%rbp),%xmm1
-       movq    (XMM2 *8+offes_fltregs)(%rbp),%xmm2
-       movq    (XMM3 *8+offes_fltregs)(%rbp),%xmm3
-       movq    (XMM4 *8+offes_fltregs)(%rbp),%xmm4
-       movq    (XMM5 *8+offes_fltregs)(%rbp),%xmm5
-       movq    (XMM6 *8+offes_fltregs)(%rbp),%xmm6
-       movq    (XMM7 *8+offes_fltregs)(%rbp),%xmm7
-       movq    (XMM8 *8+offes_fltregs)(%rbp),%xmm8
-       movq    (XMM9 *8+offes_fltregs)(%rbp),%xmm9
-       movq    (XMM10*8+offes_fltregs)(%rbp),%xmm10
-       movq    (XMM11*8+offes_fltregs)(%rbp),%xmm11
-       movq    (XMM12*8+offes_fltregs)(%rbp),%xmm12
-       movq    (XMM13*8+offes_fltregs)(%rbp),%xmm13
-       movq    (XMM14*8+offes_fltregs)(%rbp),%xmm14
-       movq    (XMM15*8+offes_fltregs)(%rbp),%xmm15
-
-       mov     (RAX*8+offes_intregs)(%rbp),%rax
-       mov     (RBX*8+offes_intregs)(%rbp),%rbx
-       mov     (RCX*8+offes_intregs)(%rbp),%rcx
-       mov     (RDX*8+offes_intregs)(%rbp),%rdx
-       mov     (RSI*8+offes_intregs)(%rbp),%rsi
-       mov     (RDI*8+offes_intregs)(%rbp),%rdi
-       mov     (R8 *8+offes_intregs)(%rbp),%r8
-       mov     (R9 *8+offes_intregs)(%rbp),%r9
-       mov     (R10*8+offes_intregs)(%rbp),%r10
-       mov     (R11*8+offes_intregs)(%rbp),%r11
-       mov     (R12*8+offes_intregs)(%rbp),%r12
-       mov     (R13*8+offes_intregs)(%rbp),%r13
-       mov     (R14*8+offes_intregs)(%rbp),%r14
-       mov     (R15*8+offes_intregs)(%rbp),%r15
-
-       mov     (RBP*8+offes_intregs)(%rbp),%rbp
+       movq    (XMM0 *8+offes_fltregs)(sp),%xmm0
+       movq    (XMM1 *8+offes_fltregs)(sp),%xmm1
+       movq    (XMM2 *8+offes_fltregs)(sp),%xmm2
+       movq    (XMM3 *8+offes_fltregs)(sp),%xmm3
+       movq    (XMM4 *8+offes_fltregs)(sp),%xmm4
+       movq    (XMM5 *8+offes_fltregs)(sp),%xmm5
+       movq    (XMM6 *8+offes_fltregs)(sp),%xmm6
+       movq    (XMM7 *8+offes_fltregs)(sp),%xmm7
+       movq    (XMM8 *8+offes_fltregs)(sp),%xmm8
+       movq    (XMM9 *8+offes_fltregs)(sp),%xmm9
+       movq    (XMM10*8+offes_fltregs)(sp),%xmm10
+       movq    (XMM11*8+offes_fltregs)(sp),%xmm11
+       movq    (XMM12*8+offes_fltregs)(sp),%xmm12
+       movq    (XMM13*8+offes_fltregs)(sp),%xmm13
+       movq    (XMM14*8+offes_fltregs)(sp),%xmm14
+       movq    (XMM15*8+offes_fltregs)(sp),%xmm15
+
+       mov     (RAX*8+offes_intregs)(sp),%rax
+       mov     (RBX*8+offes_intregs)(sp),%rbx
+       mov     (RCX*8+offes_intregs)(sp),%rcx
+       mov     (RDX*8+offes_intregs)(sp),%rdx
+       mov     (RSI*8+offes_intregs)(sp),%rsi
+       mov     (RDI*8+offes_intregs)(sp),%rdi
+       mov     (RBP*8+offes_intregs)(sp),%rbp
+       mov     (R8 *8+offes_intregs)(sp),%r8
+       mov     (R9 *8+offes_intregs)(sp),%r9
+       mov     (R10*8+offes_intregs)(sp),%r10
+       mov     (R11*8+offes_intregs)(sp),%r11
+       mov     (R12*8+offes_intregs)(sp),%r12
+       mov     (R13*8+offes_intregs)(sp),%r13
+       mov     (R14*8+offes_intregs)(sp),%r14
+       mov     (R15*8+offes_intregs)(sp),%r15
+
+       /* pop the execution state off the stack */
+       add             $(sizeexecutionstate),sp
 
        /* jump to new code */
        ret
 
+#endif /* defined(ENABLE_REPLACEMENT) */
+
 
 /* asm_builtin_x2x *************************************************************
 *                                                                              *
@@ -761,6 +783,30 @@ asm_builtin_d2l:
        ret
 
 
+/* asm_compare_and_swap ********************************************************
+
+   Does an atomic compare and swap.  Required for the lock
+   implementation.
+
+*******************************************************************************/
+
+asm_compare_and_swap:
+       mov     a1,v0                       /* v0 is %rax                         */
+       lock cmpxchg a2,(a0)
+       ret
+
+
+/* asm_memory_barrier **********************************************************
+
+   A memory barrier for the Java Memory Model.
+
+*******************************************************************************/
+
+asm_memory_barrier:
+       mfence
+       ret
+
+
 asm_getclassvalues_atomic:
 _crit_restart:
 _crit_begin: