* src/vm/jit/patcher-common.h: Added __X86_64__ ifdef.
[cacao.git] / src / vm / jit / x86_64 / asmpart.S
1 /* src/vm/jit/x86_64/asmpart.S - Java-C interface functions for x86_64
2
3    Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
4    C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5    E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6    J. Wenninger, Institut f. Computersprachen - TU Wien
7
8    This file is part of CACAO.
9
10    This program is free software; you can redistribute it and/or
11    modify it under the terms of the GNU General Public License as
12    published by the Free Software Foundation; either version 2, or (at
13    your option) any later version.
14
15    This program is distributed in the hope that it will be useful, but
16    WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18    General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23    02110-1301, USA.
24
25 */
26
27
28 #include "config.h"
29
30 #include "vm/jit/x86_64/arch.h"
31 #include "vm/jit/x86_64/md-abi.h"
32 #include "vm/jit/x86_64/md-asm.h"
33
34 #include "vm/jit/abi-asm.h"
35 #include "vm/jit/methodheader.h"
36
37
38         .text
39
40
41 /* export functions ***********************************************************/
42
43         .globl asm_vm_call_method
44         .globl asm_vm_call_method_int
45         .globl asm_vm_call_method_long
46         .globl asm_vm_call_method_float
47         .globl asm_vm_call_method_double
48         .globl asm_vm_call_method_exception_handler
49         .globl asm_vm_call_method_end
50
51         .globl asm_call_jit_compiler
52
53         .globl asm_handle_exception
54         .globl asm_handle_nat_exception
55
56         .globl asm_abstractmethoderror
57
58 #if defined(ENABLE_REPLACEMENT)
59         .globl asm_replacement_out
60         .globl asm_replacement_in
61 #endif
62
63         .globl asm_builtin_f2i
64         .globl asm_builtin_f2l
65         .globl asm_builtin_d2i
66         .globl asm_builtin_d2l
67
68         .globl asm_compare_and_swap
69         .globl asm_memory_barrier
70
71
72 /********************* function asm_calljavafunction ***************************
73 *                                                                              *
74 *   This function calls a Java-method (which possibly needs compilation)       *
75 *   with up to 4 address parameters.                                           *
76 *                                                                              *
77 *   This functions calls the JIT-compiler which eventually translates the      *
78 *   method into machine code.                                                  *
79 *                                                                              *
80 *   C-prototype:                                                               *
81 *    javaobject_header *asm_calljavamethod (methodinfo *m,                     *
82 *         void *arg1, void *arg2, void *arg3, void *arg4);                     *
83 *                                                                              *
84 *******************************************************************************/
85
86         .align  8
87
88         .quad   0                           /* catch type all                     */
89         .quad   0                           /* handler pc                         */
90         .quad   0                           /* end pc                             */
91         .quad   0                           /* start pc                           */
92         .long   1                           /* extable size                       */
93         .long   0                           /* ALIGNMENT PADDING                  */
94         .quad   0                           /* line number table  start           */
95         .quad   0                           /* line number table  size            */
96         .long   0                           /* ALIGNMENT PADDING                  */
97         .long   0                           /* fltsave                            */
98         .long   0                           /* intsave                            */
99         .long   0                           /* isleaf                             */
100         .long   0                           /* IsSync                             */
101         .long   0                           /* frame size                         */
102         .quad   0                           /* codeinfo pointer                   */
103
104 asm_vm_call_method:
105 asm_vm_call_method_int:
106 asm_vm_call_method_long:
107 asm_vm_call_method_float:
108 asm_vm_call_method_double:
109         sub     $(7*8),sp                   /* keep stack 16-byte aligned         */
110         mov     %rbx,0*8(sp)                /* %rbx is not a callee saved in cacao*/
111         mov     s0,1*8(sp)
112         mov     s1,2*8(sp)
113         mov     s2,3*8(sp)
114         mov     s3,4*8(sp)
115         mov     s4,5*8(sp)
116
117         mov     a0,6*8(sp)                  /* store method PV                    */
118
119         mov     sp,s0                       /* save stack pointer                 */
120
121         mov     a1,t0                       /* address of data structure          */
122         mov     a2,itmp1                    /* number of stack arguments          */
123
124         mov     0*8(t0),a0
125         mov     1*8(t0),a1
126         mov     2*8(t0),a2
127         mov     3*8(t0),a3
128         mov     4*8(t0),a4
129         mov     5*8(t0),a5
130
131         movq    6*8(t0),fa0
132         movq    7*8(t0),fa1
133         movq    8*8(t0),fa2
134         movq    9*8(t0),fa3
135         movq    10*8(t0),fa4
136         movq    11*8(t0),fa5
137         movq    12*8(t0),fa6
138         movq    13*8(t0),fa7
139
140         cmp     $0,itmp1l
141         je      L_asm_vm_call_method_stack_copy_done
142
143         mov     itmp1,itmp2
144         add     $1,itmp2                    /* keep stack 16-byte aligned         */
145         and     $0xfffffffffffffffe,itmp2
146         shl     $3,itmp2                    /* calculate stack size               */
147         sub     itmp2,sp                    /* create stack frame                 */
148         mov     sp,itmp2                    /* temporary stack pointer            */
149
150 L_asm_vm_call_method_stack_copy_loop:
151         mov     14*8(t0),itmp3              /* load argument                      */
152         mov     itmp3,0(itmp2)              /* store argument on stack            */
153
154         sub     $1,itmp1l                   /* subtract 1 argument                */
155         add     $8,t0                       /* set address of next argument       */
156         add     $8,itmp2                    /* increase SP                        */
157
158         cmp     $0,itmp1l
159         jg      L_asm_vm_call_method_stack_copy_loop
160
161 L_asm_vm_call_method_stack_copy_done:
162         lea     (6*8-256)(s0),mptr          /* We subtract 256 to force the next  */
163                                             /* move instruction to have a 32-bit  */
164                                             /* offset.                            */
165
166         mov     (0*8+256)(mptr),itmp3       /* load PV                            */
167         call    *itmp3
168
169         mov     s0,sp                       /* restore SP                         */
170
171 L_asm_vm_call_method_return:
172         mov     0*8(sp),%rbx                /* restore callee saved registers     */
173         mov     1*8(sp),s0
174         mov     2*8(sp),s1
175         mov     3*8(sp),s2
176         mov     4*8(sp),s3
177         mov     5*8(sp),s4
178         add     $(7*8),sp                   /* free stack space                   */
179         ret
180                 
181 asm_vm_call_method_exception_handler:
182         mov     xptr,a0                     /* pass exception pointer             */
183         call    builtin_throw_exception@PLT
184         jmp     L_asm_vm_call_method_return
185
186 asm_vm_call_method_end:
187         nop
188
189
190 /****************** function asm_call_jit_compiler *****************************
191 *                                                                              *
192 *   invokes the compiler for untranslated JavaVM methods.                      *
193 *                                                                              *
194 *   Register R0 contains a pointer to the method info structure (prepared      *
195 *   by createcompilerstub). Using the return address in R26 and the            *
196 *   offset in the LDA instruction or using the value in methodptr R28 the      *
197 *   patching address for storing the method address can be computed:           *
198 *                                                                              *
199 *   method address was either loaded using                                     *
200 *                                                                              *
201 *   i386_mov_imm_reg(a, REG_ITMP2)                ; invokestatic/special       *
202 *   i386_call_reg(REG_ITMP2)                                                   *
203 *                                                                              *
204 *   or                                                                         *
205 *                                                                              *
206 *   i386_mov_membase_reg(REG_SP, 0, REG_ITMP2)    ; invokevirtual/interface    *
207 *   i386_mov_membase_reg(REG_ITMP2, OFFSET(, vftbl), REG_ITMP3)                *
208 *   i386_mov_membase_reg(REG_ITMP3, OFFSET(vftbl, table[0]) + \                *
209 *       sizeof(methodptr) * m->vftblindex, REG_ITMP1)                          *
210 *   i386_call_reg(REG_ITMP1)                                                   *
211 *                                                                              *
212 *   in the static case the method pointer can be computed using the            *
213 *   return address and the lda function following the jmp instruction          *
214 *                                                                              *
215 *******************************************************************************/
216
217 asm_call_jit_compiler:
218 L_asm_call_jit_compiler:                /* required for PIC code              */
219         sub     $(ARG_CNT+1)*8,sp           /* +1: keep stack 16-byte aligned     */
220
221         SAVE_ARGUMENT_REGISTERS(0)
222
223         mov     itmp1,a0                    /* pass methodinfo pointer            */
224         mov     mptr,a1                     /* pass method pointer                */
225         mov     sp,a2                       /* pass java sp                       */
226         add     $(1+ARG_CNT+1)*8,a2
227         mov     (ARG_CNT+1)*8(sp),a3        /* pass ra to java function           */
228         call    jit_asm_compile@PLT
229
230         RESTORE_ARGUMENT_REGISTERS(0)
231
232         add     $(ARG_CNT+1)*8,sp           /* remove stack frame                 */
233
234         test    v0,v0                       /* check for exception                */
235         je      L_asm_call_jit_compiler_exception
236
237         jmp     *v0                         /* ...and now call the new method     */
238
239 L_asm_call_jit_compiler_exception:
240         call    exceptions_get_and_clear_exception@PLT
241         pop     xpc                         /* delete return address              */
242         sub     $3,xpc                      /* faulting address is ra - 3         */
243         jmp     L_asm_handle_exception
244
245
246 /* asm_handle_exception ********************************************************
247 *                                                                              *
248 *   This function handles an exception. It does not use the usual calling      *
249 *   conventions. The exception pointer is passed in REG_ITMP1 and the          *
250 *   pc from the exception raising position is passed in REG_ITMP2. It searches *
251 *   the local exception table for a handler. If no one is found, it unwinds    *
252 *   stacks and continues searching the callers.                                *
253 *                                                                              *
254 *******************************************************************************/
255
256 asm_handle_nat_exception:
257         add     $8,sp                       /* clear return address of native stub*/
258                 
259 asm_handle_exception:
260 L_asm_handle_exception:                 /* required for PIC code              */
261         sub     $((ARG_CNT+TMP_CNT)*8),sp   /* create maybe-leaf stackframe       */
262
263         SAVE_ARGUMENT_REGISTERS(0)          /* we save arg and temp registers in  */
264         SAVE_TEMPORARY_REGISTERS(ARG_CNT)   /* case this is a leaf method         */
265
266         mov     $((ARG_CNT+TMP_CNT)*8),a3   /* prepare a3 for handle_exception    */
267         mov     $1,t0                       /* set maybe-leaf flag                */
268
269 L_asm_handle_exception_stack_loop:
270         sub     $(6*8),sp
271         mov     xptr,0*8(sp)                /* save exception pointer             */
272         mov     xpc,1*8(sp)                 /* save exception pc                  */
273         add     sp,a3                       /* calculate Java sp into a3...       */
274         add     $(6*8),a3
275         mov     a3,3*8(sp)                  /* ...and save it                     */
276         mov     t0,4*8(sp)                  /* save maybe-leaf flag               */
277
278         mov     xpc,a0                      /* exception pc                       */
279         call    codegen_get_pv_from_pc@PLT
280         mov     v0,2*8(sp)                  /* save data segment pointer          */
281         
282         mov     0*8(sp),a0                  /* pass exception pointer             */
283         mov     1*8(sp),a1                  /* pass exception pc                  */
284         mov     v0,a2                       /* pass data segment pointer          */
285         mov     3*8(sp),a3                  /* pass Java stack pointer            */
286         call    exceptions_handle_exception@PLT
287
288         test    v0,v0
289         jz      L_asm_handle_exception_not_catched
290
291         mov     v0,xpc                      /* move handlerpc into xpc            */
292         mov     0*8(sp),xptr                /* restore exception pointer          */
293         mov     4*8(sp),t0                  /* get maybe-leaf flag                */
294         add     $(6*8),sp                   /* free stack frame                   */
295
296         test    t0,t0                       /* test for maybe-leaf flag           */
297         jz      L_asm_handle_exception_no_leaf
298
299         RESTORE_ARGUMENT_REGISTERS(0)       /* if this is a leaf method, we have  */
300         RESTORE_TEMPORARY_REGISTERS(ARG_CNT)/* to restore arg and temp registers  */
301
302         add     $((ARG_CNT+TMP_CNT)*8),sp   /* remove maybe-leaf stackframe       */
303
304 L_asm_handle_exception_no_leaf:
305         jmp     *xpc                        /* jump to the handler                */
306
307 L_asm_handle_exception_not_catched:
308         mov     0*8(sp),xptr                /* restore exception pointer          */
309         mov     2*8(sp),itmp3               /* restore data segment pointer       */
310         mov     4*8(sp),t0                  /* get maybe-leaf flag                */
311         add     $(6*8),sp
312
313         test    t0,t0
314         jz      L_asm_handle_exception_no_leaf_stack
315
316         add     $((ARG_CNT+TMP_CNT)*8),sp   /* remove maybe-leaf stackframe       */
317         xor     t0,t0                       /* clear the isleaf flags             */
318
319 L_asm_handle_exception_no_leaf_stack:
320         mov     FrameSize(itmp3),itmp2l     /* get frame size                     */
321         add     sp,itmp2                    /* pointer to save area               */
322         
323         mov     IntSave(itmp3),a0l          /* a0l = saved int register count     */
324         test    a0l,a0l
325         je      noint
326         
327         cmp     $1,a0l
328         je      int1
329         cmp     $2,a0l
330         je      int2
331         cmp     $3,a0l
332         je      int3
333         cmp     $4,a0l
334         je      int4
335         
336         mov     -5*8(itmp2),s0
337 int4:   
338         mov     -4*8(itmp2),s1
339 int3:   
340         mov     -3*8(itmp2),s2
341 int2:   
342         mov     -2*8(itmp2),s3
343 int1:   
344         mov     -1*8(itmp2),s4
345
346         shl     $3,a0l                      /* multiply by 8 bytes                */
347         sub     a0,itmp2
348                 
349 noint:
350 #if 0
351         mov     FltSave(itmp3),a0l          /* a0l = saved flt register count     */
352         test    a0l,a0l
353         je      noflt
354         
355         cmpl    $1,a0l
356         je      flt1
357         cmpl    $2,a0l
358         je      flt2
359         cmpl    $3,a0l
360         je      flt3
361         cmpl    $4,a0l
362         je      flt4
363
364         movq    -5*8(itmp2),%xmm11
365 flt4:   
366         movq    -4*8(itmp2),%xmm12
367 flt3:   
368         movq    -3*8(itmp2),%xmm13
369 flt2:   
370         movq    -2*8(itmp2),%xmm14
371 flt1:   
372         movq    -1*8(itmp2),%xmm15
373                 
374 noflt:
375 #endif
376         mov     FrameSize(itmp3),itmp2l     /* get frame size                     */
377         add     itmp2,sp                    /* unwind stack                       */
378
379                                             /* exception pointer is still set     */
380         pop     xpc                         /* the new xpc is return address      */
381         sub     $3,xpc                      /* subtract 3 bytes for call          */
382
383         xor     a3,a3                       /* prepare a3 for handle_exception    */
384         
385         jmp             L_asm_handle_exception_stack_loop
386
387
388 /* asm_abstractmethoderror *****************************************************
389
390    Creates and throws an AbstractMethodError.
391
392 *******************************************************************************/
393
394 asm_abstractmethoderror:
395         mov     sp,a0                       /* pass java sp                       */
396         add     $1*8,a0
397         mov     0*8(sp),a1                  /* pass exception address             */
398         sub     $3,a1
399         call    exceptions_asm_new_abstractmethoderror@PLT
400                                             /* exception pointer is return value  */
401         pop     xpc                         /* get exception address              */
402         sub     $3,xpc                      /* exception address is ra - 3        */
403         jmp     L_asm_handle_exception
404
405
406 #if defined(ENABLE_REPLACEMENT)
407
408 /* asm_replacement_out *********************************************************
409
410    This code is jumped to from the replacement-out stubs that are executed
411    when a thread reaches an activated replacement point.
412
413    The purpose of asm_replacement_out is to read out the parts of the
414    execution state that cannot be accessed from C code, store this state,
415    and then call the C function replace_me.
416
417    Stack layout:
418       8                 start of stack inside method to replace
419       0   rplpoint *    info on the replacement point that was reached
420
421 *******************************************************************************/
422
423 /* some room to accomodate changes of the stack frame size during replacement */
424         /* XXX we should find a cleaner solution here */
425 #define REPLACEMENT_ROOM  512
426
427 asm_replacement_out:
428     /* create stack frame */
429         sub     $(sizeexecutionstate + REPLACEMENT_ROOM),sp
430
431         /* save registers in execution state */
432         mov     %rax,(RAX*8+offes_intregs)(sp)
433         mov     %rbx,(RBX*8+offes_intregs)(sp)
434         mov     %rcx,(RCX*8+offes_intregs)(sp)
435         mov     %rdx,(RDX*8+offes_intregs)(sp)
436         mov     %rsi,(RSI*8+offes_intregs)(sp)
437         mov     %rdi,(RDI*8+offes_intregs)(sp)
438         mov     %rbp,(RBP*8+offes_intregs)(sp)
439         movq    $0  ,(RSP*8+offes_intregs)(sp) /* not used */
440         mov     %r8 ,(R8 *8+offes_intregs)(sp)
441         mov     %r9 ,(R9 *8+offes_intregs)(sp)
442         mov     %r10,(R10*8+offes_intregs)(sp)
443         mov     %r11,(R11*8+offes_intregs)(sp)
444         mov     %r12,(R12*8+offes_intregs)(sp)
445         mov     %r13,(R13*8+offes_intregs)(sp)
446         mov     %r14,(R14*8+offes_intregs)(sp)
447         mov     %r15,(R15*8+offes_intregs)(sp)
448
449         movq    %xmm0 ,(XMM0 *8+offes_fltregs)(sp)
450         movq    %xmm1 ,(XMM1 *8+offes_fltregs)(sp)
451         movq    %xmm2 ,(XMM2 *8+offes_fltregs)(sp)
452         movq    %xmm3 ,(XMM3 *8+offes_fltregs)(sp)
453         movq    %xmm4 ,(XMM4 *8+offes_fltregs)(sp)
454         movq    %xmm5 ,(XMM5 *8+offes_fltregs)(sp)
455         movq    %xmm6 ,(XMM6 *8+offes_fltregs)(sp)
456         movq    %xmm7 ,(XMM7 *8+offes_fltregs)(sp)
457         movq    %xmm8 ,(XMM8 *8+offes_fltregs)(sp)
458         movq    %xmm9 ,(XMM9 *8+offes_fltregs)(sp)
459         movq    %xmm10,(XMM10*8+offes_fltregs)(sp)
460         movq    %xmm11,(XMM11*8+offes_fltregs)(sp)
461         movq    %xmm12,(XMM12*8+offes_fltregs)(sp)
462         movq    %xmm13,(XMM13*8+offes_fltregs)(sp)
463         movq    %xmm14,(XMM14*8+offes_fltregs)(sp)
464         movq    %xmm15,(XMM15*8+offes_fltregs)(sp)
465
466         /* calculate sp of method */
467         mov     sp,itmp1
468         add     $(sizeexecutionstate + REPLACEMENT_ROOM + 8),itmp1
469         mov     itmp1,(offes_sp)(sp)
470
471         /* pv must be looked up via AVL tree */
472         movq    $0,(offes_pv)(sp)
473
474         /* call replace_me */
475         mov     -8(itmp1),a0                /* rplpoint *                         */
476     mov     sp,a1                       /* arg1: execution state              */
477     call    replace_me@PLT              /* call C function replace_me         */
478     call    abort@PLT                   /* NEVER REACHED                      */
479
480 /* asm_replacement_in **********************************************************
481
482    This code writes the given execution state and jumps to the replacement
483    code.
484
485    This function never returns!
486
487    C prototype:
488       void asm_replacement_in(executionstate *es, replace_safestack_t *st);
489
490 *******************************************************************************/
491
492 asm_replacement_in:
493         /* get arguments */
494         mov     a1,s1                       /* replace_safestack_t *st            */
495         mov     a0,%rbp                     /* executionstate *es == safe stack   */
496
497         /* switch to the safe stack */
498         mov     %rbp,sp
499
500         /* call replace_build_execution_state(st) */
501         mov             s1,a0
502         call    replace_build_execution_state@PLT
503
504         /* set new sp */
505         mov     (offes_sp)(%rbp),sp
506
507         /* push address of new code */
508         pushq   (offes_pc)(%rbp)
509
510         /* allocate an executionstate_t on the stack */
511         sub             $(sizeexecutionstate),sp
512
513         /* call replace_free_safestack(st,& of allocated executionstate_t) */
514         mov             sp,a1
515         mov             s1,a0
516         call    replace_free_safestack@PLT
517
518         /* copy registers from execution state */
519         movq    (XMM0 *8+offes_fltregs)(sp),%xmm0
520         movq    (XMM1 *8+offes_fltregs)(sp),%xmm1
521         movq    (XMM2 *8+offes_fltregs)(sp),%xmm2
522         movq    (XMM3 *8+offes_fltregs)(sp),%xmm3
523         movq    (XMM4 *8+offes_fltregs)(sp),%xmm4
524         movq    (XMM5 *8+offes_fltregs)(sp),%xmm5
525         movq    (XMM6 *8+offes_fltregs)(sp),%xmm6
526         movq    (XMM7 *8+offes_fltregs)(sp),%xmm7
527         movq    (XMM8 *8+offes_fltregs)(sp),%xmm8
528         movq    (XMM9 *8+offes_fltregs)(sp),%xmm9
529         movq    (XMM10*8+offes_fltregs)(sp),%xmm10
530         movq    (XMM11*8+offes_fltregs)(sp),%xmm11
531         movq    (XMM12*8+offes_fltregs)(sp),%xmm12
532         movq    (XMM13*8+offes_fltregs)(sp),%xmm13
533         movq    (XMM14*8+offes_fltregs)(sp),%xmm14
534         movq    (XMM15*8+offes_fltregs)(sp),%xmm15
535
536         mov     (RAX*8+offes_intregs)(sp),%rax
537         mov     (RBX*8+offes_intregs)(sp),%rbx
538         mov     (RCX*8+offes_intregs)(sp),%rcx
539         mov     (RDX*8+offes_intregs)(sp),%rdx
540         mov     (RSI*8+offes_intregs)(sp),%rsi
541         mov     (RDI*8+offes_intregs)(sp),%rdi
542         mov     (RBP*8+offes_intregs)(sp),%rbp
543         mov     (R8 *8+offes_intregs)(sp),%r8
544         mov     (R9 *8+offes_intregs)(sp),%r9
545         mov     (R10*8+offes_intregs)(sp),%r10
546         mov     (R11*8+offes_intregs)(sp),%r11
547         mov     (R12*8+offes_intregs)(sp),%r12
548         mov     (R13*8+offes_intregs)(sp),%r13
549         mov     (R14*8+offes_intregs)(sp),%r14
550         mov     (R15*8+offes_intregs)(sp),%r15
551
552         /* pop the execution state off the stack */
553         add             $(sizeexecutionstate),sp
554
555         /* jump to new code */
556         ret
557
558 #endif /* defined(ENABLE_REPLACEMENT) */
559
560
561 /* asm_builtin_x2x *************************************************************
562 *                                                                              *
563 *   Wrapper functions for float to int corner cases                            *
564 *                                                                              *
565 *******************************************************************************/
566
567 asm_builtin_f2i:
568         sub     $(ARG_CNT*8),sp
569         
570         SAVE_ARGUMENT_REGISTERS(0)
571         
572         movq    ftmp1,fa0
573         call    builtin_f2i@PLT
574         
575         RESTORE_ARGUMENT_REGISTERS(0)
576         
577         add     $(ARG_CNT*8),sp
578         ret
579
580
581 asm_builtin_f2l:
582         sub     $(ARG_CNT*8),sp
583         
584         SAVE_ARGUMENT_REGISTERS(0)
585         
586         movq    ftmp1,fa0
587         call    builtin_f2l@PLT
588         
589         RESTORE_ARGUMENT_REGISTERS(0)
590         
591         add     $(ARG_CNT*8),sp
592         ret
593
594
595 asm_builtin_d2i:
596         sub     $(ARG_CNT*8),sp
597         
598         SAVE_ARGUMENT_REGISTERS(0)
599         
600         movq    ftmp1,fa0
601         call    builtin_d2i@PLT
602         
603         RESTORE_ARGUMENT_REGISTERS(0)
604         
605         add     $(ARG_CNT*8),sp
606         ret
607
608
609 asm_builtin_d2l:
610         sub     $(ARG_CNT*8),sp
611         
612         SAVE_ARGUMENT_REGISTERS(0)
613         
614         movq    ftmp1,fa0
615         call    builtin_d2l@PLT
616         
617         RESTORE_ARGUMENT_REGISTERS(0)
618         
619         add     $(ARG_CNT*8),sp
620         ret
621
622
623 /* asm_compare_and_swap ********************************************************
624
625    Does an atomic compare and swap.  Required for the lock
626    implementation.
627
628 *******************************************************************************/
629
630 asm_compare_and_swap:
631         mov     a1,v0                       /* v0 is %rax                         */
632         lock cmpxchg a2,(a0)
633         ret
634
635
636 /* asm_memory_barrier **********************************************************
637
638    A memory barrier for the Java Memory Model.
639
640 *******************************************************************************/
641
642 asm_memory_barrier:
643         mfence
644         ret
645
646
647 /* disable exec-stacks ********************************************************/
648
649 #if defined(__linux__) && defined(__ELF__)
650         .section .note.GNU-stack,"",%progbits
651 #endif
652
653
654 /*
655  * These are local overrides for various environment variables in Emacs.
656  * Please do not remove this and leave it at the end of the file, where
657  * Emacs will automagically detect them.
658  * ---------------------------------------------------------------------
659  * Local variables:
660  * mode: asm
661  * indent-tabs-mode: t
662  * c-basic-offset: 4
663  * tab-width: 4
664  * End:
665  * vim:noexpandtab:sw=4:ts=4:
666  */