Reinhard Grafl
Christian Thalinger
- $Id: asmpart.S 2252 2005-04-07 11:20:40Z twisti $
+ $Id: asmpart.S 2265 2005-04-11 09:58:52Z twisti $
*/
#include "vm/jit/x86_64/asmoffsets.h"
+/* XXX Don't remove this!!! ***************************************************/
+
+#if offobjvftbl != 0
+#error OFFSET(java_objectheader, vftbl) != 0: this will break code patching!
+#endif
+
+#if offvftblinterfacetable != 0
+#error OFFSET(vftbl_t, interfacetable[0]) != 0: this will break code patching!
+#endif
+
+/******************************************************************************/
+
+
/* define it like the risc way */
#define v0 %rax
+#define v0l %eax
#define a0 %rdi
#define a1 %rsi
#define xpc itmp2
+/* save and restore macros ****************************************************/
+
+#define SAVE_ARGUMENT_REGISTERS \
+ mov a0,0*8(%rsp) ; \
+ mov a1,1*8(%rsp) ; \
+ mov a2,2*8(%rsp) ; \
+ mov a3,3*8(%rsp) ; \
+ mov a4,4*8(%rsp) ; \
+ mov a5,5*8(%rsp) ; \
+ movq fa0,6*8(%rsp) ; \
+ movq fa1,7*8(%rsp) ; \
+ movq fa2,8*8(%rsp) ; \
+ movq fa3,9*8(%rsp) ; \
+ movq fa4,10*8(%rsp) ; \
+ movq fa5,11*8(%rsp) ; \
+ movq fa6,12*8(%rsp) ; \
+ movq fa7,13*8(%rsp) ;
+
+
+#define RESTORE_ARGUMENT_REGISTERS \
+ mov 0*8(%rsp),a0 ; \
+ mov 1*8(%rsp),a1 ; \
+ mov 2*8(%rsp),a2 ; \
+ mov 3*8(%rsp),a3 ; \
+ mov 4*8(%rsp),a4 ; \
+ mov 5*8(%rsp),a5 ; \
+ movq 6*8(%rsp),fa0 ; \
+ movq 7*8(%rsp),fa1 ; \
+ movq 8*8(%rsp),fa2 ; \
+ movq 9*8(%rsp),fa3 ; \
+ movq 10*8(%rsp),fa4 ; \
+ movq 11*8(%rsp),fa5 ; \
+ movq 12*8(%rsp),fa6 ; \
+ movq 13*8(%rsp),fa7 ;
+
+
+#define SAVE_TEMPORARY_REGISTERS \
+ mov %rbx,14*8(%rsp)
+
+
+#define RESTORE_TEMPORARY_REGISTERS \
+ mov 14*8(%rsp),%rbx
+
+
.text
.globl asm_handle_exception
.globl asm_handle_nat_exception
- .globl asm_check_clinit
-
+ .globl asm_get_putstatic
+ .globl asm_get_putfield
.globl asm_builtin_new
- .globl asm_invokespecial
+ .globl asm_builtin_newarray
+ .globl asm_builtin_multianewarray
+ .globl asm_invokestatic_special
+ .globl asm_invokevirtual
+ .globl asm_invokeinterface
+ .globl asm_checkcast_interface
+ .globl asm_checkcast_class
+ .globl asm_check_clinit
.globl asm_builtin_checkarraycast
.globl asm_builtin_aastore
jmp ex_stack_loop
-/* asm_check_clinit ************************************************************
+/* asm_get_putstatic ***********************************************************
- DOCUMENT ME!!!
+ XXX
- Arguments:
+ Stack layout:
+ 16 return address
+ 8 machine code (which is patched back later)
+ 0 unresolved field reference
+
+*******************************************************************************/
+
+asm_get_putstatic:
+ sub $(15*8),%rsp /* stack frame (16-byte aligned) */
- itmp1 class ; pointer to class
- itmp2 mcode ; machine code to patch back in
+ SAVE_ARGUMENT_REGISTERS
+ SAVE_TEMPORARY_REGISTERS
- Stack layout:
+ mov (0+15)*8(%rsp),a0 /* pass unresolved_field pointer */
+ call helper_resolve_fieldinfo_value_address /* call the helper function*/
- 0 ra ; return address of patched call in java machine code
+ RESTORE_ARGUMENT_REGISTERS
+ RESTORE_TEMPORARY_REGISTERS
+
+ mov (1+15)*8(%rsp),itmp3 /* get machine code */
+ add $((2+15)*8),%rsp /* remove stack frame, keep ra */
+
+ test v0,v0 /* exception thrown? */
+ jz L_asm_codepatcher_exception
+
+ pop itmp2 /* get return address */
+ sub $5,itmp2 /* remove size of `call rel32' */
+ mov itmp3,(itmp2) /* patch back original code (8 bytes) */
+
+ movswq 3(itmp2),itmp3 /* get %rip offset */
+ add itmp2,itmp3 /* add return address (%rip) */
+ add $7,itmp3 /* add mov instruction size */
+ mov v0,(itmp3) /* move field address to data segment */
+ jmp *itmp2 /* call new patched code */
+
+L_asm_codepatcher_exception:
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ call builtin_asm_get_exceptionptrptr
+ mov v0,itmp2
+#else
+ lea _exceptionptr,itmp2
+#endif
+ mov (itmp2),xptr /* get the exception pointer */
+ movl $0,(itmp2) /* clear the exception pointer */
+
+ pop xpc /* get and remove return address */
+ sub $5,xpc /* faulting address is ra - 5 */
+ jmp asm_handle_exception
+
+
+/* asm_get_putfield ************************************************************
+
+ XXX
+
+ Stack layout:
+ 16 return address
+ 8 machine code (which is patched back later)
+ 0 unresolved field reference
*******************************************************************************/
-asm_check_clinit:
- mov offclassinit(itmp1),itmp3l /* get initialized flag (int) */
- test itmp3,itmp3
- jnz L_is_initialized
-
- sub $(17*8),%rsp /* keep stack 16-byte aligned */
-
- mov a0,0*8(%rsp) /* save argument registers */
- mov a1,1*8(%rsp)
- mov a2,2*8(%rsp)
- mov a3,3*8(%rsp)
- mov a4,4*8(%rsp)
- mov a5,5*8(%rsp)
-
- movq fa0,6*8(%rsp) /* maybe cacao does not use all 8 */
- movq fa1,7*8(%rsp) /* argument register, but who knows */
- movq fa2,8*8(%rsp)
- movq fa3,9*8(%rsp)
- movq fa4,10*8(%rsp)
- movq fa5,11*8(%rsp)
- movq fa6,12*8(%rsp)
- movq fa7,13*8(%rsp)
-
- mov itmp2,14*8(%rsp) /* save machine code */
-
- mov itmp1,a0 /* pass classinfo pointer */
- call initialize_class /* call class initialize function */
-
- mov 0*8(%rsp),a0 /* restore argument registers */
- mov 1*8(%rsp),a1
- mov 2*8(%rsp),a2
- mov 3*8(%rsp),a3
- mov 4*8(%rsp),a4
- mov 5*8(%rsp),a5
-
- movq 6*8(%rsp),fa0
- movq 7*8(%rsp),fa1
- movq 8*8(%rsp),fa2
- movq 9*8(%rsp),fa3
- movq 10*8(%rsp),fa4
- movq 11*8(%rsp),fa5
- movq 12*8(%rsp),fa6
- movq 13*8(%rsp),fa7
-
- mov 14*8(%rsp),itmp2 /* restore machine code */
-
- add $(17*8),%rsp /* remove stack frame */
-
- test v0,v0 /* we had an exception */
- je L_initializererror
-
-L_is_initialized:
- pop itmp1 /* get return address */
- sub $5,itmp1 /* remove size of `call rel32' */
- mov itmp2,(itmp1) /* patch back in 8 bytes */
- jmp *itmp1 /* jump to patched code an execute it */
-
-L_initializererror:
+asm_get_putfield:
+ sub $(17*8),%rsp /* stack frame (16-byte aligned) */
+
+ SAVE_ARGUMENT_REGISTERS
+ SAVE_TEMPORARY_REGISTERS
+
+ mov itmp1,15*8(%rsp) /* save itmp1 and itmp2 */
+ mov itmp2,16*8(%rsp) /* can be used by field instructions */
+
+ mov (0+17)*8(%rsp),a0 /* pass unresolved_method pointer */
+ call helper_resolve_fieldinfo_offset /* call the helper function */
+
+ RESTORE_ARGUMENT_REGISTERS
+ RESTORE_TEMPORARY_REGISTERS
+
+ cmp $-1,v0l /* exception thrown? test for -1, */
+ /* because field offset can be 0 */
+ jz L_asm_codepatcher_exception_with_stack_frame
+
+ mov (1+17)*8(%rsp),itmp3 /* get machine code */
+ mov (2+17)*8(%rsp),itmp2 /* get return address */
+ sub $5,itmp2 /* remove size of `call rel32' */
+ mov itmp3,(itmp2) /* patch back original code (8 bytes) */
+
+ cmpb $0xf2,(itmp2) /* test for movsd */
+ je L_asm_get_putfield_float
+ cmpb $0xf3,(itmp2) /* test for movss */
+ je L_asm_get_putfield_float
+ cmpb $0x24,3(itmp2) /* check for (%rsp) or (%r12) */
+ je L_asm_get_putfield_r12_membase
+
+ mov v0l,3(itmp2) /* patch field offset */
+ jmp L_asm_get_putfield_normal
+
+L_asm_get_putfield_float:
+ mov v0l,5(itmp2) /* patch field offset (position + 2) */
+ jmp L_asm_get_putfield_normal
+
+L_asm_get_putfield_r12_membase:
+ mov v0l,4(itmp2) /* patch field offset (position + 1) */
+
+L_asm_get_putfield_normal:
+ mov itmp2,itmp3
+
+ mov 15*8(%rsp),itmp1 /* restore itmp1 and itmp2 */
+ mov 16*8(%rsp),itmp2 /* can be used by field instructions */
+
+ add $((3+17)*8),%rsp /* remove stack frame */
+ jmp *itmp3 /* call new patched code */
+
+L_asm_codepatcher_exception_with_stack_frame:
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
call builtin_asm_get_exceptionptrptr
mov %rax,itmp2
mov (itmp2),xptr /* get the exception pointer */
movl $0,(itmp2) /* clear the exception pointer */
- pop xpc /* delete return address */
+ mov (2+17)*8(%rsp),xpc /* get return address */
sub $5,xpc /* faulting address is ra - 5 */
+ add $((3+17)*8),%rsp /* remove stack frame */
jmp asm_handle_exception
-
+
/* asm_builtin_new *************************************************************
asm_builtin_new:
sub $(8*1),%rsp /* stack frame (16-byte aligned) */
- call asm_builtin_new_helper /* call the helper function */
+ call helper_resolve_classinfo /* call the helper function */
add $(8*1),%rsp /* remove stack frame */
test v0,v0 /* exception thrown? */
- jz L_asm_builtin_new_exception
+ jz L_asm_codepatcher_exception
pop itmp2 /* get return address */
sub $(3+10+10),itmp2 /* 3 (callq) + 10 (movi) + 10 (movi) */
mov itmp1,12(itmp2) /* patch back function address */
jmp *itmp2 /* call new patched code */
-L_asm_builtin_new_exception:
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- call builtin_asm_get_exceptionptrptr
- mov %rax,itmp2
-#else
- lea _exceptionptr,itmp2
-#endif
- mov (itmp2),xptr /* get the exception pointer */
- movl $0,(itmp2) /* clear the exception pointer */
- pop xpc /* get and remove return address */
- sub $5,xpc /* faulting address is ra - 5 */
- jmp asm_handle_exception
+/* asm_builtin_newarray ********************************************************
+
+ XXX
+ Arguments:
+ a1 contains the class reference
-/* asm_builtin_new *************************************************************
+*******************************************************************************/
+
+asm_builtin_newarray:
+ sub $(8*1),%rsp /* stack frame (16-byte aligned) */
+ mov a0,0*8(%rsp) /* save argument */
+ mov a1,a0 /* pass class reference */
+ call helper_resolve_classinfo_vftbl /* call the helper function */
+ mov 0*8(%rsp),a0 /* restore argument */
+ add $(8*1),%rsp /* remove stack frame */
+ test v0,v0 /* exception thrown? */
+ jz L_asm_codepatcher_exception
+
+ pop itmp2 /* get return address */
+ sub $(3+10+10),itmp2 /* 3 (callq) + 10 (movi) + 10 (movi) */
+ mov v0,2(itmp2) /* patch in new vftbl*: 2 (mov) */
+
+ lea builtin_newarray,itmp1 /* get address from builtin_newarray */
+ mov itmp1,12(itmp2) /* patch back function address */
+ jmp *itmp2 /* call new patched code */
+
+
+/* asm_builtin_multianewarray **************************************************
XXX
Arguments:
- itmp1 contains the method reference
- itmp2 machine code (which is patched back later)
+ a1 contains the class reference
+
+*******************************************************************************/
+
+asm_builtin_multianewarray:
+ sub $(1*8),%rsp /* stack frame (16-byte aligned) */
+ mov a1,a0 /* pass class reference */
+ call helper_resolve_classinfo_vftbl /* call the helper function */
+ add $(1*8),%rsp /* remove stack frame */
+ test v0,v0 /* exception thrown? */
+ jz L_asm_codepatcher_exception
+
+ pop itmp2 /* get return address */
+ sub $(3+10+3+10+10),itmp2 /* go back to a0 mov */
+ mov v0,10+2(itmp2) /* patch in new vftbl*: 10 (movi) + 2 */
+
+ lea builtin_nmultianewarray,itmp1 /* get function address */
+ mov itmp1,10+10+3+2(itmp2) /* patch back function address */
+ jmp *itmp2 /* call new patched code */
+
+
+/* asm_invokestatic_special ****************************************************
+
+ XXX
+
+ Stack layout:
+ 16 return address
+ 8 machine code (which is patched back later)
+ 0 unresolved method reference
+
+*******************************************************************************/
+
+asm_invokestatic_special:
+ sub $(15*8),%rsp /* stack frame (16-byte aligned) */
+
+ SAVE_ARGUMENT_REGISTERS
+
+ mov (0+15)*8(%rsp),a0 /* pass unresolved_method pointer */
+ call helper_resolve_methodinfo_stubroutine /* call the helper function */
+
+ RESTORE_ARGUMENT_REGISTERS
+
+ mov (1+15)*8(%rsp),itmp3 /* get machine code */
+ add $((2+15)*8),%rsp /* remove stack frame, keep ra */
+
+ test v0,v0 /* exception thrown? */
+ jz L_asm_codepatcher_exception
+
+ pop itmp2 /* get return address */
+ sub $5,itmp2 /* remove size of `call rel32' */
+ mov itmp3,(itmp2) /* patch back original code (8 bytes) */
+
+ mov v0,2(itmp2) /* patch stubroutine: 2 (mov) */
+ jmp *itmp2 /* call new patched code */
+
+
+/* asm_invokevirtual ***********************************************************
+
+ XXX
+
+ Stack layout:
+ 16 return address
+ 8 machine code (which is patched back later)
+ 0 unresolved method reference
+
+*******************************************************************************/
+
+asm_invokevirtual:
+ sub $(15*8),%rsp /* stack frame (16-byte aligned) */
+
+ SAVE_ARGUMENT_REGISTERS
+
+ mov (0+15)*8(%rsp),a0 /* pass unresolved_method pointer */
+ call helper_resolve_methodinfo_vftblindex /* call the helper function */
+
+ RESTORE_ARGUMENT_REGISTERS
+
+ mov (1+15)*8(%rsp),itmp3 /* get machine code */
+ add $((2+15)*8),%rsp /* remove stack frame, keep ra */
+
+ cmp $-1,v0l /* exception thrown? test for -1, */
+ /* because vftblindex can be 0 */
+ je L_asm_codepatcher_exception
+
+ pop itmp2 /* get return address */
+ sub $5,itmp2 /* remove size of `call rel32' */
+ mov itmp3,(itmp2) /* patch back original code (8 bytes) */
+
+ shl $3,v0l /* sizeof(methodptr) * lm->vftblindex */
+ add $offvftbltable,v0l /* + OFFSET(vftbl_t, table[0]) */
+ movl v0l,3+3(itmp2) /* patch 32 bit offset: 3 (mov) + 3 */
+ jmp *itmp2 /* call new patched code */
+
+
+/* asm_invokeinterface *********************************************************
+
+ XXX
+
+ Stack layout:
+ 16 return address
+ 8 machine code (which is patched back later)
+ 0 unresolved method reference
*******************************************************************************/
-asm_invokespecial:
+asm_invokeinterface:
sub $(15*8),%rsp /* stack frame (16-byte aligned) */
- mov a0,0*8(%rsp) /* save integer argument registers */
- mov a1,1*8(%rsp)
- mov a2,2*8(%rsp)
- mov a3,3*8(%rsp)
- mov a4,4*8(%rsp)
- mov a5,5*8(%rsp)
-
- movq fa0,6*8(%rsp) /* save float argument register */
- movq fa1,7*8(%rsp)
- movq fa2,8*8(%rsp)
- movq fa3,9*8(%rsp)
- movq fa4,10*8(%rsp)
- movq fa5,11*8(%rsp)
- movq fa6,12*8(%rsp)
- movq fa7,13*8(%rsp)
-
- mov itmp2,14*8(%rsp) /* save machine code */
-
- mov itmp1,a0 /* pass unresolved_method pointer */
- call asm_invokespecial_helper /* call the helper function */
-
- mov 0*8(%rsp),a0 /* restore integer argument registers */
- mov 1*8(%rsp),a1
- mov 2*8(%rsp),a2
- mov 3*8(%rsp),a3
- mov 4*8(%rsp),a4
- mov 5*8(%rsp),a5
-
- movq 6*8(%rsp),fa0 /* restore float argument register */
- movq 7*8(%rsp),fa1
- movq 8*8(%rsp),fa2
- movq 9*8(%rsp),fa3
- movq 10*8(%rsp),fa4
- movq 11*8(%rsp),fa5
- movq 12*8(%rsp),fa6
- movq 13*8(%rsp),fa7
-
- mov 14*8(%rsp),itmp3 /* restore machine code */
-
- add $(15*8),%rsp /* remove stack frame */
+ SAVE_ARGUMENT_REGISTERS
+
+ mov (0+15)*8(%rsp),a0 /* pass unresolved_method pointer */
+ call helper_resolve_methodinfo /* call the helper function */
+
+ RESTORE_ARGUMENT_REGISTERS
+
+ mov (1+15)*8(%rsp),itmp3 /* get machine code */
+ add $((2+15)*8),%rsp /* remove stack frame, keep ra */
test v0,v0 /* exception thrown? */
- jz L_asm_builtin_new_exception
+ jz L_asm_codepatcher_exception
pop itmp2 /* get return address */
sub $5,itmp2 /* remove size of `call rel32' */
mov itmp3,(itmp2) /* patch back original code (8 bytes) */
- mov v0,5(itmp2) /* patch stubroutine: 3 + 2 (mov) */
+ mov offmethodclass(v0),itmp3 /* ci = lm->class */
+ mov offclassindex(itmp3),itmp3l /* ci->index (s4) */
+ shl $3,itmp3l /* * sizeof(methodptr) */
+ neg itmp3l /* OFFSET(vftbl_t, interfacetable[0]) */
+ /* XXX the offset is always 0!!! */
+ mov itmp3l,3+3(itmp2) /* patch 32 bit offset: 3 (mov) + 3 */
+
+ mov offmethodclass(v0),itmp3 /* ci = lm->class */
+ sub offclassmethods(itmp3),v0 /* lm - ci->methods */
+
+ push %rdx
+ xor %rdx,%rdx
+ mov $sizemethodinfo,itmp3l
+ idiv itmp3l
+ pop %rdx
+
+ shl $3,v0 /* * sizeof(methodptr) */
+ mov v0l,3+7+3(itmp2) /* patch 32bit offset: 3 + 7 (mov) + 3*/
jmp *itmp2 /* call new patched code */
+/* asm_checkcast_interface *****************************************************
+
+ XXX
+
+ Stack layout:
+ 16 return address
+ 8 machine code (which is patched back later)
+ 0 unresolved field reference
+
+*******************************************************************************/
+
+asm_checkcast_interface:
+ sub $(17*8),%rsp /* stack frame (16-byte aligned) */
+
+ SAVE_ARGUMENT_REGISTERS
+ SAVE_TEMPORARY_REGISTERS
+
+ mov itmp1,15*8(%rsp) /* save itmp1 and itmp2 */
+ mov itmp2,16*8(%rsp) /* can be used by CHECKCAST */
+
+ mov (0+17)*8(%rsp),a0 /* pass unresolved_method pointer */
+ call helper_resolve_classinfo_index /* call the helper function */
+
+ RESTORE_ARGUMENT_REGISTERS
+ RESTORE_TEMPORARY_REGISTERS
+
+ cmp $-1,v0l /* exception thrown? test for -1, */
+ /* because class index can be 0 */
+ jz L_asm_codepatcher_exception_with_stack_frame
+
+ mov (1+17)*8(%rsp),itmp3 /* get machine code */
+ mov (2+17)*8(%rsp),itmp2 /* get return address */
+ sub $5,itmp2 /* remove size of `call rel32' */
+ mov itmp3,(itmp2) /* patch back original code (8 bytes) */
+
+ mov v0l,7+3(itmp2) /* patch super->index */
+
+ shl $3,v0l /* super->index * sizeof(methodptr) */
+ neg v0l /* OFFSET(vftbl_t, interfacetable[0]) */
+ /* XXX the offset is always 0!!! */
+ mov v0l,7+7+3+6+3(itmp2) /* patch calculated value */
+
+ mov itmp2,itmp3 /* move return address */
+ mov 15*8(%rsp),itmp1 /* restore itmp1 and itmp2 */
+ mov 16*8(%rsp),itmp2 /* can be used by field instructions */
+ add $((3+17)*8),%rsp /* remove stack frame */
+ jmp *itmp3 /* call new patched code */
+
+
+/* asm_checkcast_class *********************************************************
+
+ XXX
+
+ Stack layout:
+ 16 return address
+ 8 machine code (which is patched back later)
+ 0 unresolved field reference
+
+*******************************************************************************/
+
+asm_checkcast_class:
+ sub $(17*8),%rsp /* stack frame (16-byte aligned) */
+
+ SAVE_ARGUMENT_REGISTERS
+ SAVE_TEMPORARY_REGISTERS
+
+ mov itmp1,15*8(%rsp) /* save itmp1 and itmp2 */
+ mov itmp2,16*8(%rsp) /* can be used by CHECKCAST */
+
+ mov (0+17)*8(%rsp),a0 /* pass unresolved_method pointer */
+ call helper_resolve_classinfo_vftbl /* call the helper function */
+
+ RESTORE_ARGUMENT_REGISTERS
+ RESTORE_TEMPORARY_REGISTERS
+
+ test v0,v0 /* exception thrown? */
+ jz L_asm_codepatcher_exception_with_stack_frame
+
+ mov (1+17)*8(%rsp),itmp3 /* get machine code */
+ mov (2+17)*8(%rsp),itmp2 /* get return address */
+ sub $5,itmp2 /* remove size of `call rel32' */
+ mov itmp3,(itmp2) /* patch back original code (8 bytes) */
+
+ mov v0,2(itmp2) /* patch super->vftbl */
+ mov v0,10+7+7+3+2(itmp2) /* patch super->vftbl */
+
+ mov itmp2,itmp3 /* move return address */
+ mov 15*8(%rsp),itmp1 /* restore itmp1 and itmp2 */
+ mov 16*8(%rsp),itmp2 /* can be used by field instructions */
+ add $((3+17)*8),%rsp /* remove stack frame */
+ jmp *itmp3 /* call new patched code */
+
+
+/* asm_check_clinit ************************************************************
+
+ DOCUMENT ME!!!
+
+ Stack layout:
+ 16 return address
+ 8 machine code (which is patched back later)
+ 0 classinfo pointer
+
+*******************************************************************************/
+
+asm_check_clinit:
+ mov 0*8(%rsp),itmp1 /* get classinfo pointer */
+ mov offclassinit(itmp1),itmp2l /* get initialized flag (int) */
+ test itmp2l,itmp2l
+ jnz L_asm_check_clinit_is_initialized
+
+ sub $(15*8),%rsp /* keep stack 16-byte aligned */
+
+ SAVE_ARGUMENT_REGISTERS
+ SAVE_TEMPORARY_REGISTERS
+
+ mov (0+15)*8(%rsp),a0 /* pass classinfo pointer */
+ call initialize_class /* call class initialize function */
+
+ RESTORE_ARGUMENT_REGISTERS
+ RESTORE_TEMPORARY_REGISTERS
+
+ add $(15*8),%rsp /* remove stack frame, keep ra */
+
+ test v0,v0 /* exception thrown? */
+ je L_asm_check_clinit_exception
+
+L_asm_check_clinit_is_initialized:
+ add $(1*8),%rsp /* remove classinfo pointer */
+ pop itmp3 /* get machine code */
+ pop itmp2 /* get return address */
+ sub $5,itmp2 /* remove size of `call rel32' */
+ mov itmp3,(itmp2) /* patch back in 8 bytes */
+ jmp *itmp2 /* jump to patched code an execute it */
+
+L_asm_check_clinit_exception:
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ call builtin_asm_get_exceptionptrptr
+ mov v0,itmp2
+#else
+ lea _exceptionptr,itmp2
+#endif
+ mov (itmp2),xptr /* get the exception pointer */
+ movl $0,(itmp2) /* clear the exception pointer */
+
+ add $(2*8),%rsp /* remove stack frame, keep ra */
+ pop xpc /* delete return address */
+ sub $5,xpc /* faulting address is ra - 5 */
+ jmp asm_handle_exception
+
+
/********************* function asm_builtin_monitorenter ***********************
* *
* Does null check and calls monitorenter or throws an exception *
asm_builtin_f2i:
sub $(14*8),%rsp
- mov %rdi,0*8(%rsp)
- mov %rsi,1*8(%rsp)
- mov %rdx,2*8(%rsp)
- mov %rcx,3*8(%rsp)
- mov %r8,4*8(%rsp)
- mov %r9,5*8(%rsp)
-
- movq %xmm0,6*8(%rsp)
- movq %xmm1,7*8(%rsp)
- movq %xmm2,8*8(%rsp)
- movq %xmm3,9*8(%rsp)
- movq %xmm4,10*8(%rsp)
- movq %xmm5,11*8(%rsp)
- movq %xmm6,12*8(%rsp)
- movq %xmm7,13*8(%rsp)
+ SAVE_ARGUMENT_REGISTERS
movq %xmm8,%xmm0
call builtin_f2i
- mov 0*8(%rsp),%rdi
- mov 1*8(%rsp),%rsi
- mov 2*8(%rsp),%rdx
- mov 3*8(%rsp),%rcx
- mov 4*8(%rsp),%r8
- mov 5*8(%rsp),%r9
-
- movq 6*8(%rsp),%xmm0
- movq 7*8(%rsp),%xmm1
- movq 8*8(%rsp),%xmm2
- movq 9*8(%rsp),%xmm3
- movq 10*8(%rsp),%xmm4
- movq 11*8(%rsp),%xmm5
- movq 12*8(%rsp),%xmm6
- movq 13*8(%rsp),%xmm7
+ RESTORE_ARGUMENT_REGISTERS
add $(14*8),%rsp
ret
asm_builtin_f2l:
sub $(14*8),%rsp
- mov %rdi,0*8(%rsp)
- mov %rsi,1*8(%rsp)
- mov %rdx,2*8(%rsp)
- mov %rcx,3*8(%rsp)
- mov %r8,4*8(%rsp)
- mov %r9,5*8(%rsp)
-
- movq %xmm0,6*8(%rsp)
- movq %xmm1,7*8(%rsp)
- movq %xmm2,8*8(%rsp)
- movq %xmm3,9*8(%rsp)
- movq %xmm4,10*8(%rsp)
- movq %xmm5,11*8(%rsp)
- movq %xmm6,12*8(%rsp)
- movq %xmm7,13*8(%rsp)
+ SAVE_ARGUMENT_REGISTERS
movq %xmm8,%xmm0
call builtin_f2l
- mov 0*8(%rsp),%rdi
- mov 1*8(%rsp),%rsi
- mov 2*8(%rsp),%rdx
- mov 3*8(%rsp),%rcx
- mov 4*8(%rsp),%r8
- mov 5*8(%rsp),%r9
-
- movq 6*8(%rsp),%xmm0
- movq 7*8(%rsp),%xmm1
- movq 8*8(%rsp),%xmm2
- movq 9*8(%rsp),%xmm3
- movq 10*8(%rsp),%xmm4
- movq 11*8(%rsp),%xmm5
- movq 12*8(%rsp),%xmm6
- movq 13*8(%rsp),%xmm7
+ RESTORE_ARGUMENT_REGISTERS
add $(14*8),%rsp
ret
asm_builtin_d2i:
sub $(14*8),%rsp
- mov %rdi,0*8(%rsp)
- mov %rsi,1*8(%rsp)
- mov %rdx,2*8(%rsp)
- mov %rcx,3*8(%rsp)
- mov %r8,4*8(%rsp)
- mov %r9,5*8(%rsp)
-
- movq %xmm0,6*8(%rsp)
- movq %xmm1,7*8(%rsp)
- movq %xmm2,8*8(%rsp)
- movq %xmm3,9*8(%rsp)
- movq %xmm4,10*8(%rsp)
- movq %xmm5,11*8(%rsp)
- movq %xmm6,12*8(%rsp)
- movq %xmm7,13*8(%rsp)
+ SAVE_ARGUMENT_REGISTERS
movq %xmm8,%xmm0
call builtin_d2i
- mov 0*8(%rsp),%rdi
- mov 1*8(%rsp),%rsi
- mov 2*8(%rsp),%rdx
- mov 3*8(%rsp),%rcx
- mov 4*8(%rsp),%r8
- mov 5*8(%rsp),%r9
-
- movq 6*8(%rsp),%xmm0
- movq 7*8(%rsp),%xmm1
- movq 8*8(%rsp),%xmm2
- movq 9*8(%rsp),%xmm3
- movq 10*8(%rsp),%xmm4
- movq 11*8(%rsp),%xmm5
- movq 12*8(%rsp),%xmm6
- movq 13*8(%rsp),%xmm7
+ RESTORE_ARGUMENT_REGISTERS
add $(14*8),%rsp
ret
asm_builtin_d2l:
sub $(14*8),%rsp
- mov %rdi,0*8(%rsp)
- mov %rsi,1*8(%rsp)
- mov %rdx,2*8(%rsp)
- mov %rcx,3*8(%rsp)
- mov %r8,4*8(%rsp)
- mov %r9,5*8(%rsp)
-
- movq %xmm0,6*8(%rsp)
- movq %xmm1,7*8(%rsp)
- movq %xmm2,8*8(%rsp)
- movq %xmm3,9*8(%rsp)
- movq %xmm4,10*8(%rsp)
- movq %xmm5,11*8(%rsp)
- movq %xmm6,12*8(%rsp)
- movq %xmm7,13*8(%rsp)
+ SAVE_ARGUMENT_REGISTERS
movq %xmm8,%xmm0
call builtin_d2l
- mov 0*8(%rsp),%rdi
- mov 1*8(%rsp),%rsi
- mov 2*8(%rsp),%rdx
- mov 3*8(%rsp),%rcx
- mov 4*8(%rsp),%r8
- mov 5*8(%rsp),%r9
-
- movq 6*8(%rsp),%xmm0
- movq 7*8(%rsp),%xmm1
- movq 8*8(%rsp),%xmm2
- movq 9*8(%rsp),%xmm3
- movq 10*8(%rsp),%xmm4
- movq 11*8(%rsp),%xmm5
- movq 12*8(%rsp),%xmm6
- movq 13*8(%rsp),%xmm7
+ RESTORE_ARGUMENT_REGISTERS
add $(14*8),%rsp
ret
Authors: Andreas Krall
Christian Thalinger
- $Id: codegen.c 2248 2005-04-06 16:06:26Z twisti $
+ $Id: codegen.c 2265 2005-04-11 09:58:52Z twisti $
*/
void codegen(methodinfo *m, codegendata *cd, registerdata *rd)
{
- s4 len, s1, s2, s3, d;
- s8 a;
- s4 parentargs_base;
+ s4 len, s1, s2, s3, d;
+ ptrint a;
+ s4 parentargs_base;
stackptr src;
varinfo *var;
basicblock *bptr;
/* Keep stack of non-leaf functions 16-byte aligned for calls into native */
/* code e.g. libc or jni (alignment problems with movaps). */
- if (!m->isleafmethod || runverbose) {
+ if (!m->isleafmethod || runverbose)
parentargs_base |= 0x1;
- }
/* create method header */
break;
+ case ICMD_GETSTATIC: /* ... ==> ..., value */
+ /* op1 = type, val.a = field address */
+
+ if (!iptr->val.a) {
+ unresolved_field *uf = iptr->target;
+ codegen_addpatchref(cd, cd->mcodeptr, asm_get_putstatic, uf);
+ a = 0;
+
+ } else {
+ fieldinfo *fi = iptr->val.a;
+
+ if (!fi->class->initialized) {
+ codegen_addpatchref(cd, cd->mcodeptr, asm_check_clinit, fi->class);
+ }
+
+ a = (ptrint) &(fi->value);
+ }
+
+ /* This approach is much faster than moving the field address */
+ /* inline into a register. */
+ a = dseg_addaddress(cd, a);
+ x86_64_mov_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + a, REG_ITMP2);
+ switch (iptr->op1) {
+ case TYPE_INT:
+ d = reg_of_var(rd, iptr->dst, REG_ITMP1);
+ x86_64_movl_membase_reg(cd, REG_ITMP2, 0, d);
+ store_reg_to_var_int(iptr->dst, d);
+ break;
+ case TYPE_LNG:
+ case TYPE_ADR:
+ d = reg_of_var(rd, iptr->dst, REG_ITMP1);
+ x86_64_mov_membase_reg(cd, REG_ITMP2, 0, d);
+ store_reg_to_var_int(iptr->dst, d);
+ break;
+ case TYPE_FLT:
+ d = reg_of_var(rd, iptr->dst, REG_ITMP1);
+ x86_64_movss_membase_reg(cd, REG_ITMP2, 0, d);
+ store_reg_to_var_flt(iptr->dst, d);
+ break;
+ case TYPE_DBL:
+ d = reg_of_var(rd, iptr->dst, REG_ITMP1);
+ x86_64_movsd_membase_reg(cd, REG_ITMP2, 0, d);
+ store_reg_to_var_flt(iptr->dst, d);
+ break;
+ }
+ break;
+
case ICMD_PUTSTATIC: /* ..., value ==> ... */
/* op1 = type, val.a = field address */
- /* If the static fields' class is not yet initialized, we do it */
- /* now. The call code is generated later. */
- if (!((fieldinfo *) iptr->val.a)->class->initialized) {
- codegen_addpatchref(cd, cd->mcodeptr, asm_check_clinit, ((fieldinfo *) iptr->val.a)->class);
-
- /* This is just for debugging purposes. Is very difficult to */
- /* read patched code. Here we patch the following 5 nop's */
- /* so that the real code keeps untouched. */
- if (showdisassemble) {
- x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd);
- x86_64_nop(cd); x86_64_nop(cd);
+ if (!iptr->val.a) {
+ unresolved_field *uf = iptr->target;
+ codegen_addpatchref(cd, cd->mcodeptr, asm_get_putstatic, uf);
+ a = 0;
+
+ } else {
+ fieldinfo *fi = iptr->val.a;
+
+ if (!fi->class->initialized) {
+ codegen_addpatchref(cd, cd->mcodeptr, asm_check_clinit, fi->class);
+
+ if (showdisassemble) {
+ x86_64_nop(cd);
+ x86_64_nop(cd);
+ x86_64_nop(cd);
+ x86_64_nop(cd);
+ x86_64_nop(cd);
+ }
}
+
+ a = (ptrint) &(fi->value);
}
/* This approach is much faster than moving the field address */
/* inline into a register. */
- a = dseg_addaddress(cd, &(((fieldinfo *) iptr->val.a)->value));
- x86_64_mov_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 7) - (s8) cd->mcodebase) + a, REG_ITMP2);
+ a = dseg_addaddress(cd, a);
+ x86_64_mov_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + a, REG_ITMP2);
switch (iptr->op1) {
case TYPE_INT:
var_to_reg_int(s2, src, REG_ITMP1);
/* op1 = type, val.a = field address (in */
/* following NOP) */
- /* If the static fields' class is not yet initialized, we do it */
- /* now. The call code is generated later. */
- if (!((fieldinfo *) iptr[1].val.a)->class->initialized) {
- codegen_addpatchref(cd, cd->mcodeptr, asm_check_clinit, ((fieldinfo *) iptr[1].val.a)->class);
-
- /* This is just for debugging purposes. Is very difficult to */
- /* read patched code. Here we patch the following 5 nop's */
- /* so that the real code keeps untouched. */
- if (showdisassemble) {
- x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd);
- x86_64_nop(cd); x86_64_nop(cd);
+ if (!iptr[1].val.a) {
+ unresolved_field *uf = iptr[1].target;
+ codegen_addpatchref(cd, cd->mcodeptr, asm_get_putstatic, uf);
+ a = 0;
+
+ } else {
+ fieldinfo *fi = iptr[1].val.a;
+
+ if (!fi->class->initialized) {
+ codegen_addpatchref(cd, cd->mcodeptr, asm_check_clinit, fi->class);
}
+
+ a = (ptrint) &(fi->value);
}
/* This approach is much faster than moving the field address */
/* inline into a register. */
- a = dseg_addaddress(cd, &(((fieldinfo *) iptr[1].val.a)->value));
+ a = dseg_addaddress(cd, a);
x86_64_mov_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + a, REG_ITMP1);
switch (iptr->op1) {
case TYPE_INT:
}
break;
- case ICMD_GETSTATIC: /* ... ==> ..., value */
- /* op1 = type, val.a = field address */
+ case ICMD_GETFIELD: /* ... ==> ..., value */
+ /* op1 = type, val.i = field offset */
- /* If the static fields' class is not yet initialized, we do it */
- /* now. The call code is generated later. */
- if (!((fieldinfo *) iptr->val.a)->class->initialized) {
- codegen_addpatchref(cd, cd->mcodeptr, asm_check_clinit, ((fieldinfo *) iptr->val.a)->class);
-
- /* This is just for debugging purposes. Is very difficult to */
- /* read patched code. Here we patch the following 5 nop's */
- /* so that the real code keeps untouched. */
- if (showdisassemble) {
- x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd);
- x86_64_nop(cd); x86_64_nop(cd);
- }
- }
+ var_to_reg_int(s1, src, REG_ITMP1);
+ gen_nullptr_check(s1);
+
+ if (!iptr->val.a) {
+ codegen_addpatchref(cd, cd->mcodeptr, asm_get_putfield,
+ (unresolved_field *) iptr->target);
+ a = 0;
+ } else
+ a = ((fieldinfo *) (iptr->val.a))->offset;
- /* This approach is much faster than moving the field address */
- /* inline into a register. */
- a = dseg_addaddress(cd, &(((fieldinfo *) iptr->val.a)->value));
- x86_64_mov_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 7) - (s8) cd->mcodebase) + a, REG_ITMP2);
switch (iptr->op1) {
case TYPE_INT:
d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movl_membase_reg(cd, REG_ITMP2, 0, d);
+ x86_64_movl_membase32_reg(cd, s1, a, d);
store_reg_to_var_int(iptr->dst, d);
break;
case TYPE_LNG:
case TYPE_ADR:
d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_mov_membase_reg(cd, REG_ITMP2, 0, d);
+ x86_64_mov_membase32_reg(cd, s1, a, d);
store_reg_to_var_int(iptr->dst, d);
break;
case TYPE_FLT:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movss_membase_reg(cd, REG_ITMP2, 0, d);
+ d = reg_of_var(rd, iptr->dst, REG_FTMP1);
+ x86_64_movss_membase32_reg(cd, s1, a, d);
store_reg_to_var_flt(iptr->dst, d);
break;
case TYPE_DBL:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movsd_membase_reg(cd, REG_ITMP2, 0, d);
+ d = reg_of_var(rd, iptr->dst, REG_FTMP1);
+ x86_64_movsd_membase32_reg(cd, s1, a, d);
store_reg_to_var_flt(iptr->dst, d);
break;
}
case ICMD_PUTFIELD: /* ..., objectref, value ==> ... */
/* op1 = type, val.i = field offset */
- a = ((fieldinfo *)(iptr->val.a))->offset;
var_to_reg_int(s1, src->prev, REG_ITMP1);
gen_nullptr_check(s1);
+ if (IS_INT_LNG_TYPE(iptr->op1)) {
+ var_to_reg_int(s2, src, REG_ITMP2);
+ } else {
+ var_to_reg_flt(s2, src, REG_FTMP2);
+ }
+
+ if (!iptr->val.a) {
+ codegen_addpatchref(cd, cd->mcodeptr, asm_get_putfield,
+ (unresolved_field *) iptr->target);
+ a = 0;
+ } else
+ a = ((fieldinfo *) (iptr->val.a))->offset;
+
switch (iptr->op1) {
case TYPE_INT:
- var_to_reg_int(s2, src, REG_ITMP2);
- x86_64_movl_reg_membase(cd, s2, s1, a);
+ x86_64_movl_reg_membase32(cd, s2, s1, a);
break;
case TYPE_LNG:
case TYPE_ADR:
- var_to_reg_int(s2, src, REG_ITMP2);
- x86_64_mov_reg_membase(cd, s2, s1, a);
+ x86_64_mov_reg_membase32(cd, s2, s1, a);
break;
case TYPE_FLT:
- var_to_reg_flt(s2, src, REG_FTMP2);
- x86_64_movss_reg_membase(cd, s2, s1, a);
+ x86_64_movss_reg_membase32(cd, s2, s1, a);
break;
case TYPE_DBL:
- var_to_reg_flt(s2, src, REG_FTMP2);
- x86_64_movsd_reg_membase(cd, s2, s1, a);
+ x86_64_movsd_reg_membase32(cd, s2, s1, a);
break;
}
break;
/* op1 = type, val.a = field address (in */
/* following NOP) */
- a = ((fieldinfo *) iptr[1].val.a)->offset;
var_to_reg_int(s1, src, REG_ITMP1);
gen_nullptr_check(s1);
+
+ if (!iptr[1].val.a) {
+ unresolved_field *uf = iptr[1].target;
+ codegen_addpatchref(cd, cd->mcodeptr, asm_get_putfield, uf);
+ a = 0;
+ } else
+ a = ((fieldinfo *) (iptr[1].val.a))->offset;
+
switch (iptr->op1) {
case TYPE_INT:
case TYPE_FLT:
- x86_64_movl_imm_membase(cd, iptr->val.i, s1, a);
+ x86_64_movl_imm_membase32(cd, iptr->val.i, s1, a);
break;
case TYPE_LNG:
case TYPE_ADR:
case TYPE_DBL:
if (IS_IMM32(iptr->val.l)) {
- x86_64_mov_imm_membase(cd, iptr->val.l, s1, a);
+ x86_64_mov_imm_membase32(cd, iptr->val.l, s1, a);
} else {
- x86_64_movl_imm_membase(cd, iptr->val.l, s1, a);
- x86_64_movl_imm_membase(cd, iptr->val.l >> 32, s1, a + 4);
+ x86_64_movl_imm_membase32(cd, iptr->val.l, s1, a);
+ x86_64_movl_imm_membase32(cd, iptr->val.l >> 32, s1, a + 4);
}
break;
}
break;
- case ICMD_GETFIELD: /* ... ==> ..., value */
- /* op1 = type, val.i = field offset */
-
- a = ((fieldinfo *)(iptr->val.a))->offset;
- var_to_reg_int(s1, src, REG_ITMP1);
- gen_nullptr_check(s1);
- switch (iptr->op1) {
- case TYPE_INT:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movl_membase_reg(cd, s1, a, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
- case TYPE_LNG:
- case TYPE_ADR:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_mov_membase_reg(cd, s1, a, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
- case TYPE_FLT:
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_movss_membase_reg(cd, s1, a, d);
- store_reg_to_var_flt(iptr->dst, d);
- break;
- case TYPE_DBL:
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_movsd_membase_reg(cd, s1, a, d);
- store_reg_to_var_flt(iptr->dst, d);
- break;
- }
- break;
-
/* branch operations **************************************************/
x86_64_call_imm(cd, 0); /* passing exception pointer */
x86_64_pop_reg(cd, REG_ITMP2_XPC);
- x86_64_mov_imm_reg(cd, (s8) asm_handle_exception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (ptrint) asm_handle_exception, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
break;
gen_method: {
methodinfo *lm;
- classinfo *ci;
stackptr tmpsrc;
s4 iarg;
s4 farg;
x86_64_call_reg(cd, REG_ITMP1);
break;
- case ICMD_INVOKESTATIC:
- a = (ptrint) lm->stubroutine;
- d = lm->returntype;
+ case ICMD_INVOKESPECIAL:
+ /* first argument contains pointer */
+ gen_nullptr_check(rd->argintregs[0]);
- x86_64_mov_imm_reg(cd, a, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
- break;
+ /* access memory for hardware nullptr */
+ x86_64_mov_membase_reg(cd, rd->argintregs[0], 0, REG_ITMP2);
- case ICMD_INVOKESPECIAL:
- /* methodinfo* is not resolved, call the assembler function */
+ /* fall through */
+ case ICMD_INVOKESTATIC:
if (!lm) {
unresolved_method *um = iptr->target;
- codegen_addpatchref(cd, cd->mcodeptr, asm_invokespecial, um);
-
-/* if (showdisassemble) { */
-/* x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd); */
-/* x86_64_nop(cd); x86_64_nop(cd); */
-/* } */
+ codegen_addpatchref(cd, cd->mcodeptr,
+ asm_invokestatic_special, um);
a = 0;
d = um->methodref->parseddesc.md->returntype.type;
d = lm->parseddesc->returntype.type;
}
- gen_nullptr_check(rd->argintregs[0]); /* first argument contains pointer */
- x86_64_mov_membase_reg(cd, rd->argintregs[0], 0, REG_ITMP2); /* access memory for hardware nullptr */
x86_64_mov_imm_reg(cd, a, REG_ITMP2);
x86_64_call_reg(cd, REG_ITMP2);
break;
case ICMD_INVOKEVIRTUAL:
- d = lm->returntype;
-
gen_nullptr_check(rd->argintregs[0]);
- x86_64_mov_membase_reg(cd, rd->argintregs[0], OFFSET(java_objectheader, vftbl), REG_ITMP2);
- x86_64_mov_membase32_reg(cd, REG_ITMP2, OFFSET(vftbl_t, table[0]) + sizeof(methodptr) * lm->vftblindex, REG_ITMP1);
+
+ if (!lm) {
+ unresolved_method *um = iptr->target;
+
+ codegen_addpatchref(cd, cd->mcodeptr,
+ asm_invokevirtual, um);
+
+ s1 = 0;
+ d = um->methodref->parseddesc.md->returntype.type;
+
+ } else {
+ s1 = OFFSET(vftbl_t, table[0]) +
+ sizeof(methodptr) * lm->vftblindex;
+ d = lm->parseddesc->returntype.type;
+ }
+
+ x86_64_mov_membase_reg(cd, rd->argintregs[0],
+ OFFSET(java_objectheader, vftbl),
+ REG_ITMP2);
+ x86_64_mov_membase32_reg(cd, REG_ITMP2, s1, REG_ITMP1);
x86_64_call_reg(cd, REG_ITMP1);
break;
case ICMD_INVOKEINTERFACE:
- ci = lm->class;
- d = lm->returntype;
-
gen_nullptr_check(rd->argintregs[0]);
- x86_64_mov_membase_reg(cd, rd->argintregs[0], OFFSET(java_objectheader, vftbl), REG_ITMP2);
- x86_64_mov_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, interfacetable[0]) - sizeof(methodptr) * ci->index, REG_ITMP2);
- x86_64_mov_membase32_reg(cd, REG_ITMP2, sizeof(methodptr) * (lm - ci->methods), REG_ITMP1);
+
+ if (!lm) {
+ unresolved_method *um = iptr->target;
+
+ codegen_addpatchref(cd, cd->mcodeptr,
+ asm_invokeinterface, um);
+
+ s1 = 0;
+ d = um->methodref->parseddesc.md->returntype.type;
+
+ } else {
+ classinfo * ci = lm->class;
+
+ s1 = OFFSET(vftbl_t, interfacetable[0]) -
+ sizeof(methodptr) * ci->index;
+
+ s2 = sizeof(methodptr) * (lm - ci->methods);
+
+ d = lm->parseddesc->returntype.type;
+ }
+
+ x86_64_mov_membase_reg(cd, rd->argintregs[0],
+ OFFSET(java_objectheader, vftbl),
+ REG_ITMP2);
+ x86_64_mov_membase32_reg(cd, REG_ITMP2, s1, REG_ITMP2);
+ x86_64_mov_membase32_reg(cd, REG_ITMP2, s2, REG_ITMP1);
x86_64_call_reg(cd, REG_ITMP1);
break;
}
/* op1: 0 == array, 1 == class */
/* val.a: (classinfo*) superclass */
-/* superclass is an interface:
- *
- * return (sub != NULL) &&
- * (sub->vftbl->interfacetablelength > super->index) &&
- * (sub->vftbl->interfacetable[-super->index] != NULL);
- *
- * superclass is a class:
- *
- * return ((sub != NULL) && (0
- * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
- * super->vftbl->diffvall));
- */
+ /* superclass is an interface:
+ *
+ * return (sub != NULL) &&
+ * (sub->vftbl->interfacetablelength > super->index) &&
+ * (sub->vftbl->interfacetable[-super->index] != NULL);
+ *
+ * superclass is a class:
+ *
+ * return ((sub != NULL) && (0
+ * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
+ * super->vftbl->diffvall));
+ */
{
classinfo *super = (classinfo *) iptr->val.a;
*/
{
- classinfo *super = (classinfo *) iptr->val.a;
-
+ classinfo *super;
+ vftbl_t *supervftbl;
+ s4 superindex;
+
+ if (!iptr->val.a) {
+ superindex = 0;
+ supervftbl = NULL;
+
+ } else {
+ super = (classinfo *) iptr->val.a;
+ superindex = super->index;
+ supervftbl = super->vftbl;
+ }
+
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
codegen_threadcritrestart(cd, cd->mcodeptr - cd->mcodebase);
#endif
var_to_reg_int(s1, src, REG_ITMP1);
if (iptr->op1) { /* class/interface */
- if (super->flags & ACC_INTERFACE) { /* interface */
- x86_64_test_reg_reg(cd, s1, s1);
+ x86_64_test_reg_reg(cd, s1, s1);
+ if (super->flags & ACC_INTERFACE) { /* interface */
/* TODO: clean up this calculation */
a = 3; /* mov_membase_reg */
CALCOFFSETBYTES(a, s1, OFFSET(java_objectheader, vftbl));
- a += 3; /* movl_membase_reg - if REG_ITMP3 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, interfacetablelength));
-
- a += 3; /* sub */
- CALCIMMEDIATEBYTES(a, super->index);
-
- a += 3; /* test */
- a += 6; /* jcc */
+ a += 3 + 4; /* movl_membase32_reg */
+ a += 3 + 4; /* sub imm32 */
+ a += 3; /* test */
+ a += 6; /* jcc */
+ a += 3 + 4; /* mov_membase32_reg */
+ a += 3; /* test */
+ a += 6; /* jcc */
- a += 3; /* mov_membase_reg */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*));
+ x86_64_jcc(cd, X86_64_CC_E, a);
- a += 3; /* test */
- a += 6; /* jcc */
+ x86_64_mov_membase_reg(cd, s1,
+ OFFSET(java_objectheader, vftbl),
+ REG_ITMP2);
- x86_64_jcc(cd, X86_64_CC_E, a);
+ if (!iptr->val.a)
+ codegen_addpatchref(cd, cd->mcodeptr,
+ asm_checkcast_interface,
+ (constant_classref *) iptr->target);
- x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP2);
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, interfacetablelength), REG_ITMP3);
- x86_64_alu_imm_reg(cd, X86_64_SUB, super->index, REG_ITMP3);
+ x86_64_movl_membase32_reg(cd, REG_ITMP2,
+ OFFSET(vftbl_t, interfacetablelength),
+ REG_ITMP3);
+
+ x86_64_alu_imm32_reg(cd, X86_64_SUB, superindex, REG_ITMP3);
x86_64_test_reg_reg(cd, REG_ITMP3, REG_ITMP3);
x86_64_jcc(cd, X86_64_CC_LE, 0);
codegen_addxcastrefs(cd, cd->mcodeptr);
- x86_64_mov_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*), REG_ITMP3);
+ x86_64_mov_membase32_reg(cd, REG_ITMP2,
+ OFFSET(vftbl_t, interfacetable[0]) -
+ superindex * sizeof(methodptr*),
+ REG_ITMP3);
x86_64_test_reg_reg(cd, REG_ITMP3, REG_ITMP3);
x86_64_jcc(cd, X86_64_CC_E, 0);
codegen_addxcastrefs(cd, cd->mcodeptr);
} else { /* class */
- x86_64_test_reg_reg(cd, s1, s1);
-
/* TODO: clean up this calculation */
- a = 3; /* mov_membase_reg */
+ a = 3; /* mov_membase_reg */
CALCOFFSETBYTES(a, s1, OFFSET(java_objectheader, vftbl));
- a += 10; /* mov_imm_reg */
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, baseval));
+ a += 10; /* mov_imm_reg */
+ a += 3 + 4; /* movl_membase32_reg */
+#if 0
if (s1 != REG_ITMP1) {
a += 3; /* movl_membase_reg - only if REG_ITMP3 == R11 */
CALCOFFSETBYTES(a, REG_ITMP3, OFFSET(vftbl_t, baseval));
a += 3; /* sub */
} else {
- a += 3; /* movl_membase_reg - only if REG_ITMP3 == R11 */
- CALCOFFSETBYTES(a, REG_ITMP3, OFFSET(vftbl_t, baseval));
- a += 3; /* sub */
- a += 10; /* mov_imm_reg */
- a += 3; /* movl_membase_reg - only if REG_ITMP3 == R11 */
+#endif
+ a += 3 + 4; /* movl_membase32_reg */
+ a += 3; /* sub */
+ a += 10; /* mov_imm_reg */
+ a += 3; /* movl_membase_reg - only if REG_ITMP3 == R11 */
CALCOFFSETBYTES(a, REG_ITMP3, OFFSET(vftbl_t, diffval));
- }
+/* } */
a += 3; /* cmp */
a += 6; /* jcc */
x86_64_jcc(cd, X86_64_CC_E, a);
- x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP2);
- x86_64_mov_imm_reg(cd, (ptrint) super->vftbl, REG_ITMP3);
+ x86_64_mov_membase_reg(cd, s1,
+ OFFSET(java_objectheader, vftbl),
+ REG_ITMP2);
+
+ if (!iptr->val.a)
+ codegen_addpatchref(cd, cd->mcodeptr,
+ asm_checkcast_class,
+ (constant_classref *) iptr->target);
+
+ x86_64_mov_imm_reg(cd, (ptrint) supervftbl, REG_ITMP3);
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
codegen_threadcritstart(cd, cd->mcodeptr - cd->mcodebase);
#endif
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, baseval), REG_ITMP2);
- if (s1 != REG_ITMP1) {
- x86_64_movl_membase_reg(cd, REG_ITMP3, OFFSET(vftbl_t, baseval), REG_ITMP1);
- x86_64_movl_membase_reg(cd, REG_ITMP3, OFFSET(vftbl_t, diffval), REG_ITMP3);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
-#endif
- x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP1, REG_ITMP2);
-
- } else {
- x86_64_movl_membase_reg(cd, REG_ITMP3, OFFSET(vftbl_t, baseval), REG_ITMP3);
+ x86_64_movl_membase32_reg(cd, REG_ITMP2,
+ OFFSET(vftbl_t, baseval),
+ REG_ITMP2);
+/* if (s1 != REG_ITMP1) { */
+/* x86_64_movl_membase_reg(cd, REG_ITMP3, */
+/* OFFSET(vftbl_t, baseval), */
+/* REG_ITMP1); */
+/* x86_64_movl_membase_reg(cd, REG_ITMP3, */
+/* OFFSET(vftbl_t, diffval), */
+/* REG_ITMP3); */
+/* #if defined(USE_THREADS) && defined(NATIVE_THREADS) */
+/* codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase); */
+/* #endif */
+/* x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP1, REG_ITMP2); */
+
+/* } else { */
+ x86_64_movl_membase32_reg(cd, REG_ITMP3,
+ OFFSET(vftbl_t, baseval),
+ REG_ITMP3);
x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP3, REG_ITMP2);
- x86_64_mov_imm_reg(cd, (ptrint) super->vftbl, REG_ITMP3);
- x86_64_movl_membase_reg(cd, REG_ITMP3, OFFSET(vftbl_t, diffval), REG_ITMP3);
- }
+ x86_64_mov_imm_reg(cd, (ptrint) supervftbl, REG_ITMP3);
+ x86_64_movl_membase_reg(cd, REG_ITMP3,
+ OFFSET(vftbl_t, diffval),
+ REG_ITMP3);
+/* } */
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
#endif
x86_64_jcc(cd, X86_64_CC_A, 0); /* (u) REG_ITMP1 > (u) REG_ITMP2 -> jump */
codegen_addxcastrefs(cd, cd->mcodeptr);
}
-
- } else
- panic("internal error: no inlined array checkcast");
+ }
}
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
M_INTMOVE(s1, d);
/* a0 = dimension count */
x86_64_mov_imm_reg(cd, iptr->op1, rd->argintregs[0]);
- /* a1 = arraydescriptor */
- x86_64_mov_imm_reg(cd, (u8) iptr->val.a, rd->argintregs[1]);
+ /* a1 = arrayvftbl */
+ x86_64_mov_imm_reg(cd, (ptrint) iptr->val.a, rd->argintregs[1]);
/* a2 = pointer to dimensions = stack pointer */
x86_64_mov_reg_reg(cd, REG_SP, rd->argintregs[2]);
- x86_64_mov_imm_reg(cd, (u8) builtin_nmultianewarray, REG_ITMP1);
+ /* contains the correct function to call (from parse.c) */
+ x86_64_mov_imm_reg(cd, (ptrint) iptr->target, REG_ITMP1);
x86_64_call_reg(cd, REG_ITMP1);
s1 = reg_of_var(rd, iptr->dst, REG_RESULT);
x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
x86_64_mov_reg_reg(cd, REG_ITMP1, rd->argintregs[0]);
- x86_64_mov_imm_reg(cd, (u8) new_arrayindexoutofboundsexception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (ptrint) new_arrayindexoutofboundsexception, REG_ITMP3);
x86_64_call_reg(cd, REG_ITMP3);
x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
- x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (ptrint) asm_handle_exception, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
}
}
x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
- x86_64_mov_imm_reg(cd, (u8) new_nullpointerexception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (ptrint) new_nullpointerexception, REG_ITMP3);
x86_64_call_reg(cd, REG_ITMP3);
x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
- x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (ptrint) asm_handle_exception, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
}
}
- /* generate put/getstatic stub call code */
+ /* generate code patching stub call code */
{
patchref *pref;
codegendata *tmpcd;
- u8 mcode;
+ ptrint mcode;
tmpcd = DNEW(codegendata);
for (pref = cd->patchrefs; pref != NULL; pref = pref->next) {
+ MCODECHECK(50);
+
/* Get machine code which is patched back in later. A */
/* `call rel32' is 5 bytes long (but read 8 bytes). */
xcodeptr = cd->mcodebase + pref->branchpos;
mcode = *((ptrint *) xcodeptr);
- MCODECHECK(50);
-
/* patch in `call rel32' to call the following code */
tmpcd->mcodeptr = xcodeptr; /* set dummy mcode pointer */
x86_64_call_imm(tmpcd, cd->mcodeptr - (xcodeptr + 5));
- /* move classinfo pointer and machine code bytes into registers */
- x86_64_mov_imm_reg(cd, (ptrint) pref->ref, REG_ITMP1);
- x86_64_mov_imm_reg(cd, (ptrint) mcode, REG_ITMP2);
+ /* move machine code bytes and classinfo pointer into registers */
+ x86_64_mov_imm_reg(cd, (ptrint) mcode, REG_ITMP3);
+ x86_64_push_reg(cd, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (ptrint) pref->ref, REG_ITMP3);
+ x86_64_push_reg(cd, REG_ITMP3);
x86_64_mov_imm_reg(cd, (ptrint) pref->asmwrapper, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
u1 *xcodeptr;
patchref *pref;
codegendata *tmpcd;
- u8 mcode;
+ ptrint mcode;
tmpcd = DNEW(codegendata);
tmpcd->mcodeptr = xcodeptr; /* set dummy mcode pointer */
x86_64_call_imm(tmpcd, cd->mcodeptr - (xcodeptr + 5));
- /* move classinfo pointer and machine code bytes into registers */
- x86_64_mov_imm_reg(cd, (ptrint) pref->ref, REG_ITMP1);
- x86_64_mov_imm_reg(cd, (ptrint) mcode, REG_ITMP2);
+ /* move machine code bytes and classinfo pointer into registers */
+ x86_64_mov_imm_reg(cd, (ptrint) mcode, REG_ITMP3);
+ x86_64_push_reg(cd, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (ptrint) pref->ref, REG_ITMP3);
+ x86_64_push_reg(cd, REG_ITMP3);
x86_64_mov_imm_reg(cd, (ptrint) pref->asmwrapper, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
Authors: Andreas Krall
Christian Thalinger
- $Id: codegen.h 2223 2005-04-05 18:01:56Z christian $
+ $Id: codegen.h 2265 2005-04-11 09:58:52Z twisti $
*/
} while (0)
+#define x86_64_emit_membase32(basereg,disp,dreg) \
+ do { \
+ if ((basereg) == REG_SP || (basereg) == R12) { \
+ x86_64_address_byte(2,(dreg),REG_SP); \
+ x86_64_address_byte(0,REG_SP,REG_SP); \
+ x86_64_emit_imm32((disp)); \
+ } else {\
+ x86_64_address_byte(2,(dreg),(basereg)); \
+ x86_64_emit_imm32((disp)); \
+ } \
+ } while (0)
+
+
#define x86_64_emit_memindex(reg,disp,basereg,indexreg,scale) \
do { \
if ((basereg) == -1) { \
-/* vm/jit/x86_64/emitfuncs.c - x86_64 code emitter functions
+/* src/vm/jit/x86_64/emitfuncs.c - x86_64 code emitter functions
Copyright (C) 1996-2005 R. Grafl, A. Krall, C. Kruegel, C. Oates,
R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner,
Authors: Christian Thalinger
- $Id: emitfuncs.c 2167 2005-03-31 09:52:47Z christian $
+ Changes:
+
+ $Id: emitfuncs.c 2265 2005-04-11 09:58:52Z twisti $
*/
}
+/*
+ * this one is for INVOKEVIRTUAL/INVOKEINTERFACE to have a
+ * constant membase immediate length of 32bit
+ */
+void x86_64_mov_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg) {
+ x86_64_emit_rex(1,(reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x8b;
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
void x86_64_movl_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg) {
x86_64_emit_rex(0,(reg),0,(basereg));
*(cd->mcodeptr++) = 0x8b;
}
-/*
- * this one is for INVOKEVIRTUAL/INVOKEINTERFACE to have a
- * constant membase immediate length of 32bit
- */
-void x86_64_mov_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg) {
- x86_64_emit_rex(1,(reg),0,(basereg));
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movl_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg) {
+ x86_64_emit_byte_rex((reg),0,(basereg));
*(cd->mcodeptr++) = 0x8b;
- x86_64_address_byte(2, (reg), (basereg));
- x86_64_emit_imm32((disp));
+ x86_64_emit_membase32((basereg),(disp),(reg));
}
}
+void x86_64_mov_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
+ x86_64_emit_rex(1,(reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x89;
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
void x86_64_movl_reg_membase(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
x86_64_emit_rex(0,(reg),0,(basereg));
*(cd->mcodeptr++) = 0x89;
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movl_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
+ x86_64_emit_byte_rex((reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x89;
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
void x86_64_mov_memindex_reg(codegendata *cd, s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg) {
x86_64_emit_rex(1,(reg),(indexreg),(basereg));
*(cd->mcodeptr++) = 0x8b;
}
+void x86_64_mov_imm_membase32(codegendata *cd, s8 imm, s8 basereg, s8 disp) {
+ x86_64_emit_rex(1,0,0,(basereg));
+ *(cd->mcodeptr++) = 0xc7;
+ x86_64_emit_membase32((basereg),(disp),0);
+ x86_64_emit_imm32((imm));
+}
+
+
void x86_64_movl_imm_membase(codegendata *cd, s8 imm, s8 basereg, s8 disp) {
x86_64_emit_rex(0,0,0,(basereg));
*(cd->mcodeptr++) = 0xc7;
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movl_imm_membase32(codegendata *cd, s8 imm, s8 basereg, s8 disp) {
+ x86_64_emit_byte_rex(0,0,(basereg));
+ *(cd->mcodeptr++) = 0xc7;
+ x86_64_emit_membase32((basereg),(disp),0);
+ x86_64_emit_imm32((imm));
+}
+
+
void x86_64_movsbq_reg_reg(codegendata *cd, s8 reg, s8 dreg) {
x86_64_emit_rex(1,(dreg),0,(reg));
*(cd->mcodeptr++) = 0x0f;
}
+void x86_64_alu_imm32_reg(codegendata *cd, s8 opc, s8 imm, s8 dreg) {
+ x86_64_emit_rex(1,0,0,(dreg));
+ *(cd->mcodeptr++) = 0x81;
+ x86_64_emit_reg((opc),(dreg));
+ x86_64_emit_imm32((imm));
+}
+
+
void x86_64_alul_imm_reg(codegendata *cd, s8 opc, s8 imm, s8 dreg) {
if (IS_IMM8(imm)) {
x86_64_emit_rex(0,0,0,(dreg));
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movss_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
+ *(cd->mcodeptr++) = 0xf3;
+ x86_64_emit_byte_rex((reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0x11;
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
void x86_64_movsd_reg_membase(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
*(cd->mcodeptr++) = 0xf2;
x86_64_emit_rex(0,(reg),0,(basereg));
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movsd_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
+ *(cd->mcodeptr++) = 0xf2;
+ x86_64_emit_byte_rex((reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0x11;
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
void x86_64_movss_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
*(cd->mcodeptr++) = 0xf3;
x86_64_emit_rex(0,(dreg),0,(basereg));
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movss_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
+ *(cd->mcodeptr++) = 0xf3;
+ x86_64_emit_byte_rex((dreg),0,(basereg));
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0x10;
+ x86_64_emit_membase32((basereg),(disp),(dreg));
+}
+
+
void x86_64_movlps_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
x86_64_emit_rex(0,(dreg),0,(basereg));
*(cd->mcodeptr++) = 0x0f;
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movsd_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
+ *(cd->mcodeptr++) = 0xf2;
+ x86_64_emit_byte_rex((dreg),0,(basereg));
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0x10;
+ x86_64_emit_membase32((basereg),(disp),(dreg));
+}
+
+
void x86_64_movlpd_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
*(cd->mcodeptr++) = 0x66;
x86_64_emit_rex(0,(dreg),0,(basereg));
-/* jit/x86_64/emitfuncs.h - emit function prototypes
+/* src/jit/x86_64/emitfuncs.h - emit function prototypes
Copyright (C) 1996-2005 R. Grafl, A. Krall, C. Kruegel, C. Oates,
R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner,
Contact: cacao@complang.tuwien.ac.at
- Authors: Andreas Krall
- Christian Thalinger
+ Authors: Christian Thalinger
- $Id: emitfuncs.h 1735 2004-12-07 14:33:27Z twisti $
+ Changes:
+
+ $Id: emitfuncs.h 2265 2005-04-11 09:58:52Z twisti $
*/
void x86_64_mov_imm_reg(codegendata *cd, s8 imm, s8 reg);
void x86_64_movl_imm_reg(codegendata *cd, s8 imm, s8 reg);
void x86_64_mov_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg);
-void x86_64_movl_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg);
void x86_64_mov_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg);
+void x86_64_movl_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg);
+void x86_64_movl_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg);
void x86_64_mov_reg_membase(codegendata *cd, s8 reg, s8 basereg, s8 disp);
+void x86_64_mov_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp);
void x86_64_movl_reg_membase(codegendata *cd, s8 reg, s8 basereg, s8 disp);
+void x86_64_movl_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp);
void x86_64_mov_memindex_reg(codegendata *cd, s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg);
void x86_64_movl_memindex_reg(codegendata *cd, s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg);
void x86_64_mov_reg_memindex(codegendata *cd, s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
void x86_64_movw_reg_memindex(codegendata *cd, s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
void x86_64_movb_reg_memindex(codegendata *cd, s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
void x86_64_mov_imm_membase(codegendata *cd, s8 imm, s8 basereg, s8 disp);
+void x86_64_mov_imm_membase32(codegendata *cd, s8 imm, s8 basereg, s8 disp);
void x86_64_movl_imm_membase(codegendata *cd, s8 imm, s8 basereg, s8 disp);
+void x86_64_movl_imm_membase32(codegendata *cd, s8 imm, s8 basereg, s8 disp);
void x86_64_movsbq_reg_reg(codegendata *cd, s8 reg, s8 dreg);
void x86_64_movsbq_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg);
void x86_64_movswq_reg_reg(codegendata *cd, s8 reg, s8 dreg);
void x86_64_alu_membase_reg(codegendata *cd, s8 opc, s8 basereg, s8 disp, s8 reg);
void x86_64_alul_membase_reg(codegendata *cd, s8 opc, s8 basereg, s8 disp, s8 reg);
void x86_64_alu_imm_reg(codegendata *cd, s8 opc, s8 imm, s8 dreg);
+void x86_64_alu_imm32_reg(codegendata *cd, s8 opc, s8 imm, s8 dreg);
void x86_64_alul_imm_reg(codegendata *cd, s8 opc, s8 imm, s8 dreg);
void x86_64_alu_imm_membase(codegendata *cd, s8 opc, s8 imm, s8 basereg, s8 disp);
void x86_64_alul_imm_membase(codegendata *cd, s8 opc, s8 imm, s8 basereg, s8 disp);
void x86_64_movss_reg_reg(codegendata *cd, s8 reg, s8 dreg);
void x86_64_movsd_reg_reg(codegendata *cd, s8 reg, s8 dreg);
void x86_64_movss_reg_membase(codegendata *cd, s8 reg, s8 basereg, s8 disp);
+void x86_64_movss_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp);
void x86_64_movsd_reg_membase(codegendata *cd, s8 reg, s8 basereg, s8 disp);
+void x86_64_movsd_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp);
void x86_64_movss_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg);
+void x86_64_movss_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg);
void x86_64_movlps_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg);
void x86_64_movsd_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg);
+void x86_64_movsd_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg);
void x86_64_movlpd_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg);
void x86_64_movss_reg_memindex(codegendata *cd, s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
void x86_64_movsd_reg_memindex(codegendata *cd, s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);