Changes: Christian Thalinger
- $Id: asmpart.S 2205 2005-04-03 22:51:43Z twisti $
+ $Id: asmpart.S 2444 2005-05-11 12:51:53Z twisti $
*/
#define ashift 3
+/* save and restore macros ****************************************************/
+
+#define SAVE_ARGUMENT_REGISTERS(off) \
+ sd a0,(0+(off))*8(sp) ; \
+ sd a1,(1+(off))*8(sp) ; \
+ sd a2,(2+(off))*8(sp) ; \
+ sd a3,(3+(off))*8(sp) ; \
+ sd a4,(4+(off))*8(sp) ; \
+ sd a5,(5+(off))*8(sp) ; \
+ sd a6,(6+(off))*8(sp) ; \
+ sd a7,(7+(off))*8(sp) ; \
+ \
+ sdc1 fa0,(8+(off))*8(sp) ; \
+ sdc1 fa1,(9+(off))*8(sp) ; \
+ sdc1 fa2,(10+(off))*8(sp); \
+ sdc1 fa3,(11+(off))*8(sp); \
+ sdc1 fa4,(12+(off))*8(sp); \
+ sdc1 fa5,(13+(off))*8(sp); \
+ sdc1 fa6,(14+(off))*8(sp); \
+ sdc1 fa7,(15+(off))*8(sp);
+
+
+#define RESTORE_ARGUMENT_REGISTERS(off) \
+ ld a0,(0+(off))*8(sp) ; \
+ ld a1,(1+(off))*8(sp) ; \
+ ld a2,(2+(off))*8(sp) ; \
+ ld a3,(3+(off))*8(sp) ; \
+ ld a4,(4+(off))*8(sp) ; \
+ ld a5,(5+(off))*8(sp) ; \
+ ld a6,(6+(off))*8(sp) ; \
+ ld a7,(7+(off))*8(sp) ; \
+ \
+ ldc1 fa0,(8+(off))*8(sp); \
+ ldc1 fa1,(9+(off))*8(sp); \
+ ldc1 fa2,(10+(off))*8(sp); \
+ ldc1 fa3,(11+(off))*8(sp); \
+ ldc1 fa4,(12+(off))*8(sp); \
+ ldc1 fa5,(13+(off))*8(sp); \
+ ldc1 fa6,(14+(off))*8(sp); \
+ ldc1 fa7,(15+(off))*8(sp);
+
+
+#define SAVE_TEMPORARY_REGISTERS(off) \
+ sd t0,(0+(off))*8(sp) ; \
+ sd t1,(1+(off))*8(sp) ; \
+ sd t2,(2+(off))*8(sp) ; \
+ sd t3,(3+(off))*8(sp) ; \
+ sd t8,(4+(off))*8(sp) ; \
+ \
+ sdc1 ft3,(5+(off))*8(sp) ; \
+ sdc1 ft4,(6+(off))*8(sp) ; \
+ sdc1 ft5,(7+(off))*8(sp) ; \
+ sdc1 ft6,(8+(off))*8(sp) ; \
+ sdc1 ft7,(9+(off))*8(sp) ; \
+ sdc1 ft8,(10+(off))*8(sp) ; \
+ sdc1 ft9,(11+(off))*8(sp) ; \
+ sdc1 ft10,(12+(off))*8(sp) ; \
+ sdc1 ft11,(13+(off))*8(sp) ; \
+ sdc1 ft12,(14+(off))*8(sp) ; \
+ sdc1 ft13,(15+(off))*8(sp) ; \
+ sdc1 ft14,(16+(off))*8(sp) ; \
+ sdc1 ft15,(17+(off))*8(sp) ; \
+ sdc1 ft16,(18+(off))*8(sp) ; \
+ sdc1 ft17,(19+(off))*8(sp) ; \
+ sdc1 ft18,(20+(off))*8(sp) ; \
+
+
+#define RESTORE_TEMPORARY_REGISTERS(off) \
+ ld t0,(0+(off))*8(sp) ; \
+ ld t1,(1+(off))*8(sp) ; \
+ ld t2,(2+(off))*8(sp) ; \
+ ld t3,(3+(off))*8(sp) ; \
+ ld t8,(4+(off))*8(sp) ; \
+ \
+ ldc1 ft3,(5+(off))*8(sp) ; \
+ ldc1 ft4,(6+(off))*8(sp) ; \
+ ldc1 ft5,(7+(off))*8(sp) ; \
+ ldc1 ft6,(8+(off))*8(sp) ; \
+ ldc1 ft7,(9+(off))*8(sp) ; \
+ ldc1 ft8,(10+(off))*8(sp) ; \
+ ldc1 ft9,(11+(off))*8(sp) ; \
+ ldc1 ft10,(12+(off))*8(sp) ; \
+ ldc1 ft11,(13+(off))*8(sp) ; \
+ ldc1 ft12,(14+(off))*8(sp) ; \
+ ldc1 ft13,(15+(off))*8(sp) ; \
+ ldc1 ft14,(16+(off))*8(sp) ; \
+ ldc1 ft15,(17+(off))*8(sp) ; \
+ ldc1 ft16,(18+(off))*8(sp) ; \
+ ldc1 ft17,(19+(off))*8(sp) ; \
+ ldc1 ft18,(20+(off))*8(sp) ; \
+
+
.text
.set noat
.globl asm_dumpregistersandcall
.globl asm_handle_exception
.globl asm_handle_nat_exception
- .globl asm_check_clinit
- .globl asm_builtin_checkarraycast
- .globl asm_builtin_checkcast
+
+ .globl asm_wrapper_patcher
+
+ .globl asm_builtin_arraycheckcast
.globl asm_builtin_aastore
#if defined(USE_THREADS)
.end asm_handle_nat_exception
-/********************* asm_check_clinit ****************************************
-* *
-* Checks if class is initialized. If not, do it right now. *
-* *
+/* asm_wrapper_patcher *********************************************************
+
+ XXX
+
+ Stack layout:
+ 32 return address into JIT code (patch position)
+ 24 pointer to virtual java_objectheader
+ 16 machine code (which is patched back later)
+ 8 unresolved class/method/field reference
+ 0 patcher function pointer to call
+
*******************************************************************************/
- .ent asm_check_clinit
-
-asm_check_clinit:
- daddiu sp,sp,-24*8
-
- sd ra,0*8(sp) /* save return address */
-
- sd a0,1*8(sp) /* save argument registers for leaf funcs */
- sd a1,2*8(sp)
- sd a2,3*8(sp)
- sd a3,4*8(sp)
- sd a4,5*8(sp)
- sd a5,6*8(sp)
- sd a6,7*8(sp)
- sd a7,8*8(sp)
-
- sd t0,9*8(sp)
- sd t1,10*8(sp)
- sd t2,11*8(sp)
- sd t3,12*8(sp)
- sd t8,13*8(sp)
-
- sdc1 fa0,14*8(sp)
- sdc1 fa1,15*8(sp)
- sdc1 fa2,16*8(sp)
- sdc1 fa3,17*8(sp)
- sdc1 fa4,18*8(sp)
- sdc1 fa5,19*8(sp)
- sdc1 fa6,20*8(sp)
- sdc1 fa7,21*8(sp)
-
- sd itmp2,22*8(sp) /* save machine code */
+ .ent asm_wrapper_patcher
+
+asm_wrapper_patcher:
+ daddiu sp,sp,-(16+21+4+1)*8 /* create stack frame */
+
+ SAVE_ARGUMENT_REGISTERS(0) /* save 8 int/8 float argument registers */
+ SAVE_TEMPORARY_REGISTERS(16) /* save 5 int/16 float temporary registers */
+
+ sd itmp1,(16+21+0)*8(sp) /* save itmp1 */
+ sd itmp2,(16+21+1)*8(sp) /* save itmp2 */
+ sd ra,(16+21+2+1)*8(sp) /* save method return address (for leafs) */
+ sd pv,(16+21+3+1)*8(sp) /* save pv of calling java function */
+
+ daddiu a0,sp,(1+16+21+4+1)*8 /* pass sp, skip patcher function pointer */
+ ld itmp3,(0+16+21+4+1)*8(sp) /* get function pointer */
+ jalr itmp3
+
+ RESTORE_ARGUMENT_REGISTERS(0) /* restore 8 int/8 float argument registers */
+ RESTORE_TEMPORARY_REGISTERS(16) /* restore 5 int/16 float temporary reg. */
+
+ ld itmp1,(16+21+0)*8(sp) /* restore itmp1 */
+ ld itmp2,(16+21+1)*8(sp) /* restore itmp2 */
+ ld ra,(16+21+2+1)*8(sp) /* restore method return address (for leafs)*/
+ ld pv,(16+21+3+1)*8(sp) /* restore pv of calling java function */
+
+ ld itmp3,(4+16+21+4+1)*8(sp) /* get return address (into JIT code) */
+ daddiu sp,sp,(5+16+21+4+1)*8 /* remove stack frame */
+ beqz v0,L_asm_wrapper_patcher_exception
+
+ jr itmp3 /* jump to new patched code */
+
+L_asm_wrapper_patcher_exception:
+ move xpc,itmp3 /* return address into JIT code is xpc */
+
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ daddiu sp,sp,-4*8
+ sd xpc,0*8(sp)
+ sd ra,1*8(sp)
+ sd pv,2*8(sp)
+ jal builtin_asm_get_exceptionptrptr
+ ld xpc,0*8(sp)
+ ld ra,1*8(sp)
+ ld pv,2*8(sp)
+ daddiu sp,sp,4*8
+#else
+ la v0,_exceptionptr
+#endif
+ ld xptr,0(v0) /* get the exception pointer */
+ sd zero,0(v0) /* clear the exception pointer */
+ b asm_handle_exception
+
+#if 0
/* check if class is initialized */
lw itmp3,offclassinit(itmp1)
bnez itmp3,L_is_initialized
beqz v0,L_initializererror
L_is_initialized:
- ld ra,0*8(sp) /* get return address */
- ld itmp1,22*8(sp) /* get machine code */
+ ld itmp1,(16+21+1)*8(sp) /* get machine code */
+ ld ra,(16+21+2)*8(sp) /* load method return address (for leafs) */
daddiu ra,ra,-2*4 /* go back 2 instructions (jal + nop delay) */
sw itmp1,0(ra) /* patch first instruction */
addiu a1,zero,2*4 /* 2 instruction words long */
jal docacheflush /* flush! */
- ld ra,0*8(sp) /* restore return address */
-
- ld a0,1*8(sp) /* restore argument registers */
- ld a1,2*8(sp)
- ld a2,3*8(sp)
- ld a3,4*8(sp)
- ld a4,5*8(sp)
- ld a5,6*8(sp)
- ld a6,7*8(sp)
- ld a7,8*8(sp)
-
- ld t0,9*8(sp)
- ld t1,10*8(sp)
- ld t2,11*8(sp)
- ld t3,12*8(sp)
- ld t8,13*8(sp)
-
- ldc1 fa0,14*8(sp)
- ldc1 fa1,15*8(sp)
- ldc1 fa2,16*8(sp)
- ldc1 fa3,17*8(sp)
- ldc1 fa4,18*8(sp)
- ldc1 fa5,19*8(sp)
- ldc1 fa6,20*8(sp)
- ldc1 fa7,21*8(sp)
-
- daddiu sp,sp,24*8
+ RESTORE_ARGUMENT_REGISTERS(0) /* restore 8 int/8 float argument registers */
+ RESTORE_TEMPORARY_REGISTERS(16)/* restore 5 int/16 float temporary reg. */
+
+ ld itmp1,(16+21+0)*8(sp) /* restore itmp1 */
+ ld itmp2,(16+21+1)*8(sp) /* restore itmp2 */
+ ld ra,(16+21+2)*8(sp) /* restore method return address (for leafs)*/
+ ld pv,(16+21+3)*8(sp) /* restore pv of calling java function */
+
+ daddiu sp,sp,(16+21+4+1)*8 /* remove stack frame */
daddiu ra,ra,-2*4 /* go back 2 instructions (jal + nop delay) */
jr ra
ld xptr,0(v0) /* get the exception pointer */
sd zero,0(v0) /* clear the exception pointer */
- ld ra,0*8(sp) /* restore return address */
-
- ld a0,1*8(sp) /* restore argument registers */
- ld a1,2*8(sp)
- ld a2,3*8(sp)
- ld a3,4*8(sp)
- ld a4,5*8(sp)
- ld a5,6*8(sp)
- ld a6,7*8(sp)
- ld a7,8*8(sp)
-
- ld t0,9*8(sp)
- ld t1,10*8(sp)
- ld t2,11*8(sp)
- ld t3,12*8(sp)
- ld t8,13*8(sp)
-
- ldc1 fa0,14*8(sp)
- ldc1 fa1,15*8(sp)
- ldc1 fa2,16*8(sp)
- ldc1 fa3,17*8(sp)
- ldc1 fa4,18*8(sp)
- ldc1 fa5,19*8(sp)
- ldc1 fa6,20*8(sp)
- ldc1 fa7,21*8(sp)
-
- daddiu sp,sp,24*8
+ RESTORE_ARGUMENT_REGISTERS(0) /* restore 8 int/8 float argument registers */
+ RESTORE_TEMPORARY_REGISTERS(16)/* restore 5 int/16 float temporary reg. */
+
+ ld itmp1,(16+21+0)*8(sp) /* restore itmp1 */
+ ld itmp2,(16+21+1)*8(sp) /* restore itmp2 */
+ ld ra,(16+21+2)*8(sp) /* restore method return address (for leafs)*/
+ ld pv,(16+21+3)*8(sp) /* restore pv of calling java function */
+
+ daddiu sp,sp,(16+21+4+1)*8 /* remove stack frame */
aaddiu xpc,ra,-4 /* faulting address is return adress - 4 */
b asm_handle_exception
+#endif
- .end asm_check_clinit
+ .end asm_wrapper_patcher
/********************* function asm_builtin_monitorenter ***********************
.end asm_builtin_lrem
-/******************* function asm_builtin_checkarraycast ***********************
-* *
-* Does the cast check and eventually throws an exception *
-* *
-*******************************************************************************/
-
- .ent asm_builtin_checkarraycast
-
-asm_builtin_checkarraycast:
- aaddiu sp,sp,-16 /* allocate stack space */
- sd ra,0(sp) /* save return address */
- sd a0,8(sp) /* save object pointer */
- jal builtin_checkarraycast /* builtin_checkarraycast */
- beqz v0,nb_carray_throw /* if (false) throw exception */
- ld ra,0(sp) /* restore return address */
- ld v0,8(sp) /* return object pointer */
- aaddiu sp,sp,16 /* deallocate stack */
- j ra /* return */
-
-nb_carray_throw:
- jal new_classcastexception
- move xptr,v0
-
- ld ra,0(sp) /* restore return address */
- aaddiu sp,sp,16 /* free stack space */
- aaddiu xpc,ra,-4 /* faulting address is return adress - 4*/
- b asm_handle_nat_exception
-
- .end asm_builtin_checkarraycast
+/* asm_builtin_arraycheckcast **************************************************
+ Does the cast check and eventually throws an exception.
-/********************* function asm_builtin_checkcast **************************
-* *
-* Does the cast check and eventually throws an exception *
-* *
*******************************************************************************/
- .ent asm_builtin_checkcast
+ .ent asm_builtin_arraycheckcast
-asm_builtin_checkcast:
+asm_builtin_arraycheckcast:
aaddiu sp,sp,-16 /* allocate stack space */
sd ra,0(sp) /* save return address */
sd a0,8(sp) /* save object pointer */
- jal builtin_checkcast /* builtin_checkcast */
- beqz v0,nb_ccast_throw /* if (false) throw exception */
+ jal builtin_arraycheckcast /* builtin_arraycheckcast */
+ beqz v0,nb_carray_throw /* if (false) throw exception */
ld ra,0(sp) /* restore return address */
ld v0,8(sp) /* return object pointer */
aaddiu sp,sp,16 /* deallocate stack */
j ra /* return */
-nb_ccast_throw:
+nb_carray_throw:
jal new_classcastexception
move xptr,v0
aaddiu xpc,ra,-4 /* faulting address is return adress - 4*/
b asm_handle_nat_exception
- .end asm_builtin_checkcast
+ .end asm_builtin_arraycheckcast
/******************* function asm_builtin_aastore ******************************
This module generates MIPS machine code for a sequence of
intermediate code commands (ICMDs).
- $Id: codegen.c 2297 2005-04-13 12:50:07Z christian $
+ $Id: codegen.c 2444 2005-05-11 12:51:53Z twisti $
*/
#ifdef LSRA
#include "vm/jit/lsra.h"
#endif
+
+#include "vm/jit/patcher.h"
#include "vm/jit/reg.h"
#include "vm/jit/mips/codegen.h"
+#include "vm/jit/mips/arch.h"
#include "vm/jit/mips/types.h"
void codegen(methodinfo *m, codegendata *cd, registerdata *rd)
{
- s4 len, s1, s2, s3, d;
- s4 a;
+ s4 len, s1, s2, s3, d;
+ ptrint a;
s4 parentargs_base;
s4 *mcodeptr;
stackptr src;
p = parentargs_base;
if (!m->isleafmethod) {
- p--; M_LST(REG_RA, REG_SP, 8 * p);
+ p--; M_LST(REG_RA, REG_SP, p * 8);
}
for (i = rd->savintregcnt - 1; i >= rd->maxsavintreguse; i--) {
- p--; M_LST(rd->savintregs[i], REG_SP, 8 * p);
+ p--; M_LST(rd->savintregs[i], REG_SP, p * 8);
}
for (i = rd->savfltregcnt - 1; i >= rd->maxsavfltreguse; i--) {
- p--; M_DST(rd->savfltregs[i], REG_SP, 8 * p);
- }
-
- /* save monitorenter argument */
-
-#if defined(USE_THREADS)
- if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
- if (m->flags & ACC_STATIC) {
- p = dseg_addaddress(cd, m->class);
- M_ALD(REG_ITMP1, REG_PV, p);
- M_AST(REG_ITMP1, REG_SP, rd->maxmemuse * 8);
-
- } else {
- M_AST(rd->argintregs[0], REG_SP, rd->maxmemuse * 8);
- }
- }
-#endif
-
- /* copy argument registers to stack and call trace function with pointer
- to arguments on stack. ToDo: save floating point registers !!!!!!!!!
- */
-
- if (runverbose) {
- M_LDA(REG_SP, REG_SP, -(18 * 8));
- M_LST(REG_RA, REG_SP, 1 * 8);
-
- /* save integer argument registers */
- for (p = 0; p < m->paramcount && p < INT_ARG_CNT; p++) {
- M_LST(rd->argintregs[p], REG_SP, (2 + p) * 8);
- }
-
- /* save and copy float arguments into integer registers */
- for (p = 0; p < m->paramcount && p < FLT_ARG_CNT; p++) {
- t = m->paramtypes[p];
-
- if (IS_FLT_DBL_TYPE(t)) {
- if (IS_2_WORD_TYPE(t)) {
- M_DST(rd->argfltregs[p], REG_SP, (10 + p) * 8);
- M_LLD(rd->argintregs[p], REG_SP, (10 + p) * 8);
-
- } else {
- M_FST(rd->argfltregs[p], REG_SP, (10 + p) * 8);
- M_ILD(rd->argintregs[p], REG_SP, (10 + p) * 8);
- }
-
- } else {
- M_DST(rd->argfltregs[p], REG_SP, (10 + p) * 8);
- }
- }
-
- p = dseg_addaddress(cd, m);
- M_ALD(REG_ITMP1, REG_PV, p);
- M_LST(REG_ITMP1, REG_SP, 0);
- p = dseg_addaddress(cd, (void *) builtin_trace_args);
- M_ALD(REG_ITMP3, REG_PV, p);
- M_JSR(REG_RA, REG_ITMP3);
- M_NOP;
-
- M_LLD(REG_RA, REG_SP, 1 * 8);
-
- for (p = 0; p < m->paramcount && p < INT_ARG_CNT; p++) {
- M_LLD(rd->argintregs[p], REG_SP, (2 + p) * 8);
- }
-
- for (p = 0; p < m->paramcount && p < FLT_ARG_CNT; p++) {
- t = m->paramtypes[p];
-
- if (IS_FLT_DBL_TYPE(t)) {
- if (IS_2_WORD_TYPE(t)) {
- M_DLD(rd->argfltregs[p], REG_SP, (10 + p) * 8);
-
- } else {
- M_FLD(rd->argfltregs[p], REG_SP, (10 + p) * 8);
- }
-
- } else {
- M_DLD(rd->argfltregs[p], REG_SP, (10 + p) * 8);
- }
- }
-
- M_LDA(REG_SP, REG_SP, 18 * 8);
+ p--; M_DST(rd->savfltregs[i], REG_SP, p * 8);
}
/* take arguments out of register or stack frame */
#if defined(USE_THREADS)
if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
s4 disp;
- s8 func_enter = (m->flags & ACC_STATIC) ?
- (s8) builtin_staticmonitorenter : (s8) builtin_monitorenter;
- p = dseg_addaddress(cd, (void *) func_enter);
+
+ /* stack offset for monitor argument */
+
+ s1 = rd->maxmemuse;
+
+ if (runverbose) {
+ M_LDA(REG_SP, REG_SP, -(INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + FLT_TMP_CNT) * 8);
+
+ for (p = 0; p < INT_ARG_CNT; p++)
+ M_LST(rd->argintregs[p], REG_SP, p * 8);
+
+ for (p = 0; p < FLT_ARG_CNT; p++)
+ M_DST(rd->argfltregs[p], REG_SP, (INT_ARG_CNT + p) * 8);
+
+ if (m->isleafmethod) {
+ for (p = 0; p < INT_TMP_CNT; p++)
+ M_LST(rd->tmpintregs[p], REG_SP, (INT_ARG_CNT + FLT_ARG_CNT + p) * 8);
+
+ for (p = 0; p < FLT_TMP_CNT; p++)
+ M_DST(rd->tmpfltregs[p], REG_SP, (INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + p) * 8);
+ }
+
+ s1 += INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + FLT_TMP_CNT;
+ }
+
+ /* decide which monitor enter function to call */
+
+ if (m->flags & ACC_STATIC) {
+ p = dseg_addaddress(cd, m->class);
+ M_ALD(REG_ITMP1, REG_PV, p);
+ M_AST(REG_ITMP1, REG_SP, s1 * 8);
+ p = dseg_addaddress(cd, BUILTIN_staticmonitorenter);
+ M_ALD(REG_ITMP3, REG_PV, p);
+ M_JSR(REG_RA, REG_ITMP3);
+ M_INTMOVE(REG_ITMP1, rd->argintregs[0]); /* branch delay */
+ disp = -(s4) ((u1 *) mcodeptr - cd->mcodebase);
+ M_LDA(REG_PV, REG_RA, disp);
+
+ } else {
+ M_BEQZ(rd->argintregs[0], 0);
+ codegen_addxnullrefs(cd, mcodeptr);
+ p = dseg_addaddress(cd, BUILTIN_monitorenter);
+ M_ALD(REG_ITMP3, REG_PV, p);
+ M_JSR(REG_RA, REG_ITMP3);
+ M_AST(rd->argintregs[0], REG_SP, s1 * 8); /* br delay */
+ disp = -(s4) ((u1 *) mcodeptr - cd->mcodebase);
+ M_LDA(REG_PV, REG_RA, disp);
+ }
+
+ if (runverbose) {
+ for (p = 0; p < INT_ARG_CNT; p++)
+ M_LLD(rd->argintregs[p], REG_SP, p * 8);
+
+ for (p = 0; p < FLT_ARG_CNT; p++)
+ M_DLD(rd->argfltregs[p], REG_SP, (INT_ARG_CNT + p) * 8);
+
+
+ if (m->isleafmethod) {
+ for (p = 0; p < INT_TMP_CNT; p++)
+ M_LLD(rd->tmpintregs[p], REG_SP, (INT_ARG_CNT + FLT_ARG_CNT + p) * 8);
+
+ for (p = 0; p < FLT_TMP_CNT; p++)
+ M_DLD(rd->tmpfltregs[p], REG_SP, (INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + p) * 8);
+ }
+
+ M_LDA(REG_SP, REG_SP, (INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + FLT_TMP_CNT) * 8);
+ }
+ }
+#endif
+
+ /* copy argument registers to stack and call trace function */
+
+ if (runverbose) {
+ M_LDA(REG_SP, REG_SP, -(2 + INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + FLT_TMP_CNT) * 8);
+ M_LST(REG_RA, REG_SP, 1 * 8);
+
+ /* save integer argument registers */
+
+ for (p = 0; p < m->paramcount && p < INT_ARG_CNT; p++)
+ M_LST(rd->argintregs[p], REG_SP, (2 + p) * 8);
+
+ /* save and copy float arguments into integer registers */
+
+ for (p = 0; p < m->paramcount && p < FLT_ARG_CNT; p++) {
+ t = m->paramtypes[p];
+
+ if (IS_FLT_DBL_TYPE(t)) {
+ if (IS_2_WORD_TYPE(t)) {
+ M_DST(rd->argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ M_LLD(rd->argintregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+
+ } else {
+ M_FST(rd->argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ M_ILD(rd->argintregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+
+ } else {
+ M_DST(rd->argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+ }
+
+ /* save temporary registers for leaf methods */
+
+ if (m->isleafmethod) {
+ for (p = 0; p < INT_TMP_CNT; p++)
+ M_LST(rd->tmpintregs[p], REG_SP, (2 + INT_ARG_CNT + FLT_ARG_CNT + p) * 8);
+
+ for (p = 0; p < FLT_TMP_CNT; p++)
+ M_DST(rd->tmpfltregs[p], REG_SP, (2 + INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + p) * 8);
+ }
+
+ p = dseg_addaddress(cd, m);
+ M_ALD(REG_ITMP1, REG_PV, p);
+ M_LST(REG_ITMP1, REG_SP, 0);
+ p = dseg_addaddress(cd, (void *) builtin_trace_args);
M_ALD(REG_ITMP3, REG_PV, p);
M_JSR(REG_RA, REG_ITMP3);
- M_ALD(rd->argintregs[0], REG_SP, rd->maxmemuse * 8);
- disp = -(s4) ((u1 *) mcodeptr - cd->mcodebase);
- M_LDA(REG_PV, REG_RA, disp);
+ M_NOP;
+
+ M_LLD(REG_RA, REG_SP, 1 * 8);
+
+ /* restore integer argument registers */
+
+ for (p = 0; p < m->paramcount && p < INT_ARG_CNT; p++)
+ M_LLD(rd->argintregs[p], REG_SP, (2 + p) * 8);
+
+ /* restore float argument registers */
+
+ for (p = 0; p < m->paramcount && p < FLT_ARG_CNT; p++) {
+ t = m->paramtypes[p];
+
+ if (IS_FLT_DBL_TYPE(t)) {
+ if (IS_2_WORD_TYPE(t)) {
+ M_DLD(rd->argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+
+ } else {
+ M_FLD(rd->argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+
+ } else {
+ M_DLD(rd->argfltregs[p], REG_SP, (2 + INT_ARG_CNT + p) * 8);
+ }
+ }
+
+ /* restore temporary registers for leaf methods */
+
+ if (m->isleafmethod) {
+ for (p = 0; p < INT_TMP_CNT; p++)
+ M_LLD(rd->tmpintregs[p], REG_SP, (2 + INT_ARG_CNT + FLT_ARG_CNT + p) * 8);
+
+ for (p = 0; p < FLT_TMP_CNT; p++)
+ M_DLD(rd->tmpfltregs[p], REG_SP, (2 + INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + p) * 8);
+ }
+
+ M_LDA(REG_SP, REG_SP, (2 + INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + FLT_TMP_CNT) * 8);
}
-#endif
+
}
/* end of header generation */
/* walk through all basic blocks */
+
for (bptr = m->basicblocks; bptr != NULL; bptr = bptr->next) {
bptr->mpc = (s4) ((u1 *) mcodeptr - cd->mcodebase);
src = bptr->instack;
len = bptr->icount;
+
for (iptr = bptr->iinstr; len > 0; src = iptr->dst, len--, iptr++) {
- MCODECHECK(64); /* an instruction usually needs < 64 words */
- switch (iptr->opc) {
+ MCODECHECK(64); /* an instruction usually needs < 64 words */
+
+ switch (iptr->opc) {
case ICMD_NOP: /* ... ==> ... */
break;
- case ICMD_NULLCHECKPOP: /* ..., objectref ==> ... */
+ case ICMD_CHECKNULL: /* ..., objectref ==> ..., objectref */
var_to_reg_int(s1, src, REG_ITMP1);
M_BEQZ(s1, 0);
break;
+ case ICMD_GETSTATIC: /* ... ==> ..., value */
+ /* op1 = type, val.a = field address */
+
+ if (!iptr->val.a) {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_get_putstatic,
+ (unresolved_field *) iptr->target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ a = 0;
+
+ } else {
+ fieldinfo *fi = iptr->val.a;
+
+ if (!fi->class->initialized) {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_clinit, fi->class);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+ }
+
+ a = (ptrint) &(fi->value);
+ }
+
+ a = dseg_addaddress(cd, a);
+ M_ALD(REG_ITMP1, REG_PV, a);
+ switch (iptr->op1) {
+ case TYPE_INT:
+ d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ M_ILD(d, REG_ITMP1, 0);
+ store_reg_to_var_int(iptr->dst, d);
+ break;
+ case TYPE_LNG:
+ d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ M_LLD(d, REG_ITMP1, 0);
+ store_reg_to_var_int(iptr->dst, d);
+ break;
+ case TYPE_ADR:
+ d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ M_ALD(d, REG_ITMP1, 0);
+ store_reg_to_var_int(iptr->dst, d);
+ break;
+ case TYPE_FLT:
+ d = reg_of_var(rd, iptr->dst, REG_FTMP1);
+ M_FLD(d, REG_ITMP1, 0);
+ store_reg_to_var_flt(iptr->dst, d);
+ break;
+ case TYPE_DBL:
+ d = reg_of_var(rd, iptr->dst, REG_FTMP1);
+ M_DLD(d, REG_ITMP1, 0);
+ store_reg_to_var_flt(iptr->dst, d);
+ break;
+ }
+ break;
+
case ICMD_PUTSTATIC: /* ..., value ==> ... */
/* op1 = type, val.a = field address */
- if (!((fieldinfo *) iptr->val.a)->class->initialized) {
- codegen_addpatchref(cd, mcodeptr, asm_check_clinit,
- ((fieldinfo *) iptr->val.a)->class);
+ if (!iptr->val.a) {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_get_putstatic,
+ (unresolved_field *) iptr->target);
- /* This is just for debugging purposes. Is very difficult to */
- /* read patched code. Here we patch the following 2 nop's */
- /* so that the real code keeps untouched. */
if (showdisassemble) {
M_NOP; M_NOP;
}
+
+ a = 0;
+
+ } else {
+ fieldinfo *fi = iptr->val.a;
+
+ if (!fi->class->initialized) {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_clinit, fi->class);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+ }
+
+ a = (ptrint) &(fi->value);
}
- a = dseg_addaddress(cd, &(((fieldinfo *) iptr->val.a)->value));
+ a = dseg_addaddress(cd, a);
M_ALD(REG_ITMP1, REG_PV, a);
switch (iptr->op1) {
case TYPE_INT:
/* op1 = type, val.a = field address (in */
/* following NOP) */
- if (!((fieldinfo *) iptr[1].val.a)->class->initialized) {
- codegen_addpatchref(cd, mcodeptr, asm_check_clinit,
- ((fieldinfo *) iptr[1].val.a)->class);
+ if (!iptr[1].val.a) {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_get_putstatic,
+ (unresolved_field *) iptr[1].target);
if (showdisassemble) {
M_NOP; M_NOP;
}
+
+ a = 0;
+
+ } else {
+ fieldinfo *fi = iptr[1].val.a;
+
+ if (!fi->class->initialized) {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_clinit, fi->class);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+ }
+
+ a = (ptrint) &(fi->value);
}
- a = dseg_addaddress(cd, &(((fieldinfo *) iptr[1].val.a)->value));
+ a = dseg_addaddress(cd, a);
M_ALD(REG_ITMP1, REG_PV, a);
switch (iptr->op1) {
case TYPE_INT:
}
break;
- case ICMD_GETSTATIC: /* ... ==> ..., value */
- /* op1 = type, val.a = field address */
- if (!((fieldinfo *) iptr->val.a)->class->initialized) {
- codegen_addpatchref(cd, mcodeptr, asm_check_clinit,
- ((fieldinfo *) iptr->val.a)->class);
+ case ICMD_GETFIELD: /* ... ==> ..., value */
+ /* op1 = type, val.i = field offset */
+
+ var_to_reg_int(s1, src, REG_ITMP1);
+ gen_nullptr_check(s1);
+
+ if (!iptr->val.a) {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_get_putfield,
+ (unresolved_field *) iptr->target);
if (showdisassemble) {
M_NOP; M_NOP;
}
- }
- a = dseg_addaddress(cd, &(((fieldinfo *) iptr->val.a)->value));
- M_ALD(REG_ITMP1, REG_PV, a);
+ a = 0;
+
+ } else {
+ a = ((fieldinfo *) (iptr->val.a))->offset;
+ }
+
switch (iptr->op1) {
case TYPE_INT:
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- M_ILD(d, REG_ITMP1, 0);
+ M_ILD(d, s1, a);
store_reg_to_var_int(iptr->dst, d);
break;
case TYPE_LNG:
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- M_LLD(d, REG_ITMP1, 0);
+ M_LLD(d, s1, a);
store_reg_to_var_int(iptr->dst, d);
break;
case TYPE_ADR:
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- M_ALD(d, REG_ITMP1, 0);
+ M_ALD(d, s1, a);
store_reg_to_var_int(iptr->dst, d);
break;
case TYPE_FLT:
d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- M_FLD(d, REG_ITMP1, 0);
+ M_FLD(d, s1, a);
store_reg_to_var_flt(iptr->dst, d);
break;
case TYPE_DBL:
d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- M_DLD(d, REG_ITMP1, 0);
+ M_DLD(d, s1, a);
store_reg_to_var_flt(iptr->dst, d);
break;
}
+ /* XXX quick hack */
+ M_NOP;
break;
-
case ICMD_PUTFIELD: /* ..., objectref, value ==> ... */
/* op1 = type, val.a = field address */
- a = ((fieldinfo *) iptr->val.a)->offset;
var_to_reg_int(s1, src->prev, REG_ITMP1);
gen_nullptr_check(s1);
+
+ if (!IS_FLT_DBL_TYPE(iptr->op1)) {
+ var_to_reg_int(s2, src, REG_ITMP2);
+ } else {
+ var_to_reg_flt(s2, src, REG_FTMP2);
+ }
+
+ if (!iptr->val.a) {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_get_putfield,
+ (unresolved_field *) iptr->target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ a = 0;
+
+ } else {
+ a = ((fieldinfo *) (iptr->val.a))->offset;
+ }
+
switch (iptr->op1) {
case TYPE_INT:
- var_to_reg_int(s2, src, REG_ITMP2);
M_IST(s2, s1, a);
break;
case TYPE_LNG:
- var_to_reg_int(s2, src, REG_ITMP2);
M_LST(s2, s1, a);
break;
case TYPE_ADR:
- var_to_reg_int(s2, src, REG_ITMP2);
M_AST(s2, s1, a);
break;
case TYPE_FLT:
- var_to_reg_flt(s2, src, REG_FTMP2);
M_FST(s2, s1, a);
break;
case TYPE_DBL:
- var_to_reg_flt(s2, src, REG_FTMP2);
M_DST(s2, s1, a);
break;
}
+ /* XXX quick hack */
+ M_NOP;
break;
case ICMD_PUTFIELDCONST: /* ..., objectref ==> ... */
/* op1 = type, val.a = field address (in */
/* following NOP) */
- a = ((fieldinfo *) iptr[1].val.a)->offset;
var_to_reg_int(s1, src, REG_ITMP1);
gen_nullptr_check(s1);
- switch (iptr->op1) {
+
+ if (!iptr[1].val.a) {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_get_putfield,
+ (unresolved_field *) iptr[1].target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ a = 0;
+
+ } else {
+ a = ((fieldinfo *) (iptr[1].val.a))->offset;
+ }
+
+ switch (iptr[1].op1) {
case TYPE_INT:
M_IST(REG_ZERO, s1, a);
break;
M_DST(REG_ZERO, s1, a);
break;
}
- break;
-
- case ICMD_GETFIELD: /* ... ==> ..., value */
- /* op1 = type, val.i = field offset */
-
- a = ((fieldinfo *)(iptr->val.a))->offset;
- var_to_reg_int(s1, src, REG_ITMP1);
- gen_nullptr_check(s1);
- switch (iptr->op1) {
- case TYPE_INT:
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- M_ILD(d, s1, a);
- store_reg_to_var_int(iptr->dst, d);
- break;
- case TYPE_LNG:
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- M_LLD(d, s1, a);
- store_reg_to_var_int(iptr->dst, d);
- break;
- case TYPE_ADR:
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- M_ALD(d, s1, a);
- store_reg_to_var_int(iptr->dst, d);
- break;
- case TYPE_FLT:
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- M_FLD(d, s1, a);
- store_reg_to_var_flt(iptr->dst, d);
- break;
- case TYPE_DBL:
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- M_DLD(d, s1, a);
- store_reg_to_var_flt(iptr->dst, d);
- break;
- }
+ /* XXX quick hack */
+ M_NOP;
break;
case ICMD_BUILTIN3:
case ICMD_BUILTIN2:
case ICMD_BUILTIN1:
- a = dseg_addaddress(cd, (void *) lm);
+ if (iptr->target) {
+ codegen_addpatchref(cd, mcodeptr,
+ (functionptr) lm, iptr->target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ a = 0;
+
+ } else {
+ a = (ptrint) lm;
+ }
+
+ a = dseg_addaddress(cd, a);
d = iptr->op1; /* return type */
M_ALD(REG_ITMP3, REG_PV, a); /* built-in-function pointer */
M_NOP;
goto afteractualcall;
- case ICMD_INVOKESTATIC:
case ICMD_INVOKESPECIAL:
- a = dseg_addaddress(cd, lm->stubroutine);
- d = lm->returntype;
+ gen_nullptr_check(rd->argintregs[0]);
+ M_ILD(REG_ITMP1, rd->argintregs[0], 0); /* hardware nullptr */
+ /* fall through */
+
+ case ICMD_INVOKESTATIC:
+ if (!lm) {
+ unresolved_method *um = iptr->target;
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_invokestatic_special, um);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ a = 0;
+ d = um->methodref->parseddesc.md->returntype.type;
+
+ } else {
+ a = (ptrint) lm->stubroutine;
+ d = lm->parseddesc->returntype.type;
+ }
+
+ a = dseg_addaddress(cd, a);
M_ALD(REG_PV, REG_PV, a); /* method pointer in pv */
break;
case ICMD_INVOKEVIRTUAL:
- d = lm->returntype;
-
gen_nullptr_check(rd->argintregs[0]);
- M_ALD(REG_METHODPTR, rd->argintregs[0], OFFSET(java_objectheader, vftbl));
- M_ALD(REG_PV, REG_METHODPTR, OFFSET(vftbl_t, table[0]) + sizeof(methodptr) * lm->vftblindex);
+
+ if (!lm) {
+ unresolved_method *um = iptr->target;
+
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_invokevirtual, um);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ s1 = 0;
+ d = um->methodref->parseddesc.md->returntype.type;
+
+ } else {
+ s1 = OFFSET(vftbl_t, table[0]) +
+ sizeof(methodptr) * lm->vftblindex;
+ d = lm->parseddesc->returntype.type;
+ }
+
+ M_ALD(REG_METHODPTR, rd->argintregs[0],
+ OFFSET(java_objectheader, vftbl));
+ M_ALD(REG_PV, REG_METHODPTR, s1);
break;
case ICMD_INVOKEINTERFACE:
- d = lm->returntype;
-
gen_nullptr_check(rd->argintregs[0]);
- M_ALD(REG_METHODPTR, rd->argintregs[0], OFFSET(java_objectheader, vftbl));
- M_ALD(REG_METHODPTR, REG_METHODPTR, OFFSET(vftbl_t, interfacetable[0]) - sizeof(methodptr*) * lm->class->index);
- M_ALD(REG_PV, REG_METHODPTR, sizeof(methodptr) * (lm - lm->class->methods));
+
+ if (!lm) {
+ unresolved_method *um = iptr->target;
+
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_invokeinterface, um);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ s1 = 0;
+ s2 = 0;
+ d = um->methodref->parseddesc.md->returntype.type;
+
+ } else {
+ s1 = OFFSET(vftbl_t, interfacetable[0]) -
+ sizeof(methodptr*) * lm->class->index;
+
+ s2 = sizeof(methodptr) * (lm - lm->class->methods);
+
+ d = lm->parseddesc->returntype.type;
+ }
+
+ M_ALD(REG_METHODPTR, rd->argintregs[0],
+ OFFSET(java_objectheader, vftbl));
+ M_ALD(REG_METHODPTR, REG_METHODPTR, s1);
+ M_ALD(REG_PV, REG_METHODPTR, s2);
break;
}
break;
- case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
-
+ case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
/* op1: 0 == array, 1 == class */
/* val.a: (classinfo*) superclass */
-/* superclass is an interface:
- *
- * return (sub != NULL) &&
- * (sub->vftbl->interfacetablelength > super->index) &&
- * (sub->vftbl->interfacetable[-super->index] != NULL);
- *
- * superclass is a class:
- *
- * return ((sub != NULL) && (0
- * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
- * super->vftbl->diffvall));
- */
+ /* superclass is an interface:
+ *
+ * OK if ((sub == NULL) ||
+ * (sub->vftbl->interfacetablelength > super->index) &&
+ * (sub->vftbl->interfacetable[-super->index] != NULL));
+ *
+ * superclass is a class:
+ *
+ * OK if ((sub == NULL) || (0
+ * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
+ * super->vftbl->diffvall));
+ */
{
- classinfo *super = (classinfo*) iptr->val.a;
+ classinfo *super;
+ vftbl_t *supervftbl;
+ s4 superindex;
+
+ super = (classinfo *) iptr->val.a;
+
+ if (!super) {
+ superindex = 0;
+ supervftbl = NULL;
+
+ } else {
+ superindex = super->index;
+ supervftbl = super->vftbl;
+ }
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
codegen_threadcritrestart(cd, (u1 *) mcodeptr - cd->mcodebase);
#endif
+
var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (s1 == d) {
- M_MOV(s1, REG_ITMP1);
- s1 = REG_ITMP1;
+
+ /* calculate interface checkcast code size */
+
+ s2 = 8;
+ if (!super)
+ s2 += (showdisassemble ? 2 : 0);
+
+ /* calculate class checkcast code size */
+
+ s3 = 10 /* 10 + (s1 == REG_ITMP1) */;
+ if (!super)
+ s3 += (showdisassemble ? 2 : 0);
+
+ /* if class is not resolved, check which code to call */
+
+ if (!super) {
+ M_BEQZ(s1, 5 + (showdisassemble ? 2 : 0) + s2 + 2 + s3);
+ M_NOP;
+
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_checkcast_instanceof_flags,
+ (constant_classref *) iptr->target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
}
- M_CLR(d);
- if (iptr->op1) { /* class/interface */
- if (super->flags & ACC_INTERFACE) { /* interface */
- M_BEQZ(s1, 8);
- M_NOP;
- M_ALD(REG_ITMP1, s1, OFFSET(java_objectheader, vftbl));
- M_ILD(REG_ITMP2, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength));
- M_IADD_IMM(REG_ITMP2, - super->index, REG_ITMP2);
- M_BLEZ(REG_ITMP2, 3);
+
+ a = dseg_adds4(cd, 0); /* super->flags */
+ M_ILD(REG_ITMP2, REG_PV, a);
+ M_AND_IMM(REG_ITMP2, ACC_INTERFACE, REG_ITMP2);
+ M_BEQZ(REG_ITMP2, 1 + s2 + 2);
+ M_NOP;
+ }
+
+ /* interface checkcast code */
+
+ if (!super || super->flags & ACC_INTERFACE) {
+ if (super) {
+ M_BEQZ(s1, 1 + s2);
M_NOP;
- M_ALD(REG_ITMP1, REG_ITMP1,
- OFFSET(vftbl_t, interfacetable[0]) -
- super->index * sizeof(methodptr*));
- M_CMPULT(REG_ZERO, REG_ITMP1, d); /* REG_ITMP1 != 0 */
+
+ } else {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_checkcast_instanceof_interface,
+ (constant_classref *) iptr->target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
}
- else { /* class */
- /*
- s2 = super->vftbl->diffval;
- M_BEQZ(s1, 5);
+ }
+
+ M_ALD(REG_ITMP2, s1, OFFSET(java_objectheader, vftbl));
+ M_ILD(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, interfacetablelength));
+ M_IADD_IMM(REG_ITMP3, -superindex, REG_ITMP3);
+ M_BLEZ(REG_ITMP3, 0);
+ codegen_addxcastrefs(cd, mcodeptr);
+ M_NOP;
+ M_ALD(REG_ITMP3, REG_ITMP2,
+ OFFSET(vftbl_t, interfacetable[0]) -
+ superindex * sizeof(methodptr*));
+ M_BEQZ(REG_ITMP3, 0);
+ codegen_addxcastrefs(cd, mcodeptr);
+ M_NOP;
+
+ if (!super) {
+ M_BR(1 + s3);
M_NOP;
- M_ALD(REG_ITMP1, s1, OFFSET(java_objectheader, vftbl));
- M_ILD(REG_ITMP1, REG_ITMP1, OFFSET(vftbl_t, baseval));
- M_IADD_IMM(REG_ITMP1, - super->vftbl->baseval, REG_ITMP1);
- M_CMPULT_IMM(REG_ITMP1, s2 + 1, d);
- */
+ }
+ }
- M_BEQZ(s1, 9);
+ /* class checkcast code */
+
+ if (!super || !(super->flags & ACC_INTERFACE)) {
+ if (super) {
+ M_BEQZ(s1, 1 + s3);
M_NOP;
- M_ALD(REG_ITMP1, s1, OFFSET(java_objectheader, vftbl));
- a = dseg_addaddress(cd, (void *) super->vftbl);
- M_ALD(REG_ITMP2, REG_PV, a);
+
+ } else {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_checkcast_instanceof_class,
+ (constant_classref *) iptr->target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+ }
+
+ M_ALD(REG_ITMP2, s1, OFFSET(java_objectheader, vftbl));
+ a = dseg_addaddress(cd, (void *) supervftbl);
+ M_ALD(REG_ITMP3, REG_PV, a);
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstart(cd, (u1 *) mcodeptr - cd->mcodebase);
+ codegen_threadcritstart(cd, (u1 *) mcodeptr - cd->mcodebase);
#endif
- M_ILD(REG_ITMP1, REG_ITMP1, OFFSET(vftbl_t, baseval));
- M_ILD(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, baseval));
- M_ILD(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, diffval));
+ M_ILD(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, baseval));
+/* if (s1 != REG_ITMP1) { */
+/* M_ILD(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, baseval)); */
+/* M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, diffval)); */
+/* #if defined(USE_THREADS) && defined(NATIVE_THREADS) */
+/* codegen_threadcritstop(cd, (u1 *) mcodeptr - cd->mcodebase); */
+/* #endif */
+/* M_ISUB(REG_ITMP2, REG_ITMP1, REG_ITMP2); */
+/* } else { */
+ M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, baseval));
+ M_ISUB(REG_ITMP2, REG_ITMP3, REG_ITMP2);
+ M_ALD(REG_ITMP3, REG_PV, a);
+ M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, diffval));
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
codegen_threadcritstop(cd, (u1 *) mcodeptr - cd->mcodebase);
#endif
- M_ISUB(REG_ITMP1, REG_ITMP3, REG_ITMP1);
- M_CMPULT(REG_ITMP2, REG_ITMP1, d);
- M_XOR_IMM(d, 1, d);
-
- }
- }
- else
- panic ("internal error: no inlined array instanceof");
+/* } */
+ M_CMPULT(REG_ITMP3, REG_ITMP2, REG_ITMP3);
+ M_BNEZ(REG_ITMP3, 0);
+ codegen_addxcastrefs(cd, mcodeptr);
+ M_NOP;
}
+ d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ M_INTMOVE(s1, d);
store_reg_to_var_int(iptr->dst, d);
+ }
break;
- case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
-
+ case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
/* op1: 0 == array, 1 == class */
/* val.a: (classinfo*) superclass */
-/* superclass is an interface:
- *
- * OK if ((sub == NULL) ||
- * (sub->vftbl->interfacetablelength > super->index) &&
- * (sub->vftbl->interfacetable[-super->index] != NULL));
- *
- * superclass is a class:
- *
- * OK if ((sub == NULL) || (0
- * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
- * super->vftbl->diffvall));
- */
+ /* superclass is an interface:
+ *
+ * return (sub != NULL) &&
+ * (sub->vftbl->interfacetablelength > super->index) &&
+ * (sub->vftbl->interfacetable[-super->index] != NULL);
+ *
+ * superclass is a class:
+ *
+ * return ((sub != NULL) && (0
+ * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
+ * super->vftbl->diffvall));
+ */
{
- classinfo *super = (classinfo *) iptr->val.a;
+ classinfo *super;
+ vftbl_t *supervftbl;
+ s4 superindex;
+
+ super = (classinfo *) iptr->val.a;
+
+ if (!super) {
+ superindex = 0;
+ supervftbl = NULL;
+
+ } else {
+ superindex = super->index;
+ supervftbl = super->vftbl;
+ }
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
codegen_threadcritrestart(cd, (u1 *) mcodeptr - cd->mcodebase);
#endif
var_to_reg_int(s1, src, REG_ITMP1);
- if (iptr->op1) { /* class/interface */
- if (super->flags & ACC_INTERFACE) { /* interface */
- M_BEQZ(s1, 9);
- M_NOP;
- M_ALD(REG_ITMP2, s1, OFFSET(java_objectheader, vftbl));
- M_ILD(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, interfacetablelength));
- M_IADD_IMM(REG_ITMP3, - super->index, REG_ITMP3);
- M_BLEZ(REG_ITMP3, 0);
- codegen_addxcastrefs(cd, mcodeptr);
- M_NOP;
- M_ALD(REG_ITMP3, REG_ITMP2,
- OFFSET(vftbl_t, interfacetable[0]) -
- super->index * sizeof(methodptr*));
- M_BEQZ(REG_ITMP3, 0);
- codegen_addxcastrefs(cd, mcodeptr);
+ d = reg_of_var(rd, iptr->dst, REG_ITMP2);
+ if (s1 == d) {
+ M_MOV(s1, REG_ITMP1);
+ s1 = REG_ITMP1;
+ }
+
+ /* calculate interface instanceof code size */
+
+ s2 = 7;
+ if (!super)
+ s2 += (showdisassemble ? 2 : 0);
+
+ /* calculate class instanceof code size */
+
+ s3 = 8;
+ if (!super)
+ s3 += (showdisassemble ? 2 : 0);
+
+ M_CLR(d);
+
+ /* if class is not resolved, check which code to call */
+
+ if (!super) {
+ M_BEQZ(s1, 5 + (showdisassemble ? 2 : 0) + s2 + 2 + s3);
+ M_NOP;
+
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_checkcast_instanceof_flags,
+ (constant_classref *) iptr->target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ a = dseg_adds4(cd, 0); /* super->flags */
+ M_ILD(REG_ITMP3, REG_PV, a);
+ M_AND_IMM(REG_ITMP3, ACC_INTERFACE, REG_ITMP3);
+ M_BEQZ(REG_ITMP3, 1 + s2 + 2);
+ M_NOP;
+ }
+
+ /* interface instanceof code */
+
+ if (!super || (super->flags & ACC_INTERFACE)) {
+ if (super) {
+ M_BEQZ(s1, 1 + s2);
M_NOP;
- } else { /* class */
+ } else {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_checkcast_instanceof_interface,
+ (constant_classref *) iptr->target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+ }
+
+ M_ALD(REG_ITMP1, s1, OFFSET(java_objectheader, vftbl));
+ M_ILD(REG_ITMP3, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength));
+ M_IADD_IMM(REG_ITMP3, -superindex, REG_ITMP3);
+ M_BLEZ(REG_ITMP3, 3);
+ M_NOP;
+ M_ALD(REG_ITMP1, REG_ITMP1,
+ OFFSET(vftbl_t, interfacetable[0]) -
+ superindex * sizeof(methodptr*));
+ M_CMPULT(REG_ZERO, REG_ITMP1, d); /* REG_ITMP1 != 0 */
- /*
- s2 = super->vftbl->diffval;
- M_BEQZ(s1, 6 + (s2 != 0));
+ if (!super) {
+ M_BR(1 + s3);
M_NOP;
- M_ALD(REG_ITMP1, s1, OFFSET(java_objectheader, vftbl));
- M_ILD(REG_ITMP1, REG_ITMP1, OFFSET(vftbl_t, baseval));
- M_IADD_IMM(REG_ITMP1, - super->vftbl->baseval, REG_ITMP1);
- if (s2 == 0) {
- M_BNEZ(REG_ITMP1, 0);
- }
- else{
- M_CMPULT_IMM(REG_ITMP1, s2 + 1, REG_ITMP2);
- M_BEQZ(REG_ITMP2, 0);
- }
- */
+ }
+ }
- M_BEQZ(s1, 10 + (s1 == REG_ITMP1));
+ /* class instanceof code */
+
+ if (!super || !(super->flags & ACC_INTERFACE)) {
+ if (super) {
+ M_BEQZ(s1, 1 + s3);
M_NOP;
- M_ALD(REG_ITMP2, s1, OFFSET(java_objectheader, vftbl));
- a = dseg_addaddress(cd, (void *) super->vftbl);
- M_ALD(REG_ITMP3, REG_PV, a);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstart(cd, (u1 *) mcodeptr - cd->mcodebase);
-#endif
- M_ILD(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, baseval));
- if (s1 != REG_ITMP1) {
- M_ILD(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, baseval));
- M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, diffval));
+
+ } else {
+ codegen_addpatchref(cd, mcodeptr,
+ PATCHER_checkcast_instanceof_class,
+ (constant_classref *) iptr->target);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+ }
+
+ M_ALD(REG_ITMP1, s1, OFFSET(java_objectheader, vftbl));
+ a = dseg_addaddress(cd, supervftbl);
+ M_ALD(REG_ITMP2, REG_PV, a);
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstop(cd, (u1 *) mcodeptr - cd->mcodebase);
+ codegen_threadcritstart(cd, (u1 *) mcodeptr - cd->mcodebase);
#endif
- M_ISUB(REG_ITMP2, REG_ITMP1, REG_ITMP2);
- } else {
- M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, baseval));
- M_ISUB(REG_ITMP2, REG_ITMP3, REG_ITMP2);
- M_ALD(REG_ITMP3, REG_PV, a);
- M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, diffval));
+ M_ILD(REG_ITMP1, REG_ITMP1, OFFSET(vftbl_t, baseval));
+ M_ILD(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, baseval));
+ M_ILD(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, diffval));
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstop(cd, (u1 *) mcodeptr - cd->mcodebase);
+ codegen_threadcritstop(cd, (u1 *) mcodeptr - cd->mcodebase);
#endif
- }
- M_CMPULT(REG_ITMP3, REG_ITMP2, REG_ITMP3);
- M_BNEZ(REG_ITMP3, 0);
-
- codegen_addxcastrefs(cd, mcodeptr);
- M_NOP;
- }
-
- } else
- panic ("internal error: no inlined array checkcast");
+ M_ISUB(REG_ITMP1, REG_ITMP3, REG_ITMP1);
+ M_CMPULT(REG_ITMP2, REG_ITMP1, d);
+ M_XOR_IMM(d, 1, d);
}
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- M_INTMOVE(s1, d);
store_reg_to_var_int(iptr->dst, d);
+ }
break;
case ICMD_CHECKASIZE: /* ..., size ==> ..., size */
}
}
+ /* is patcher function set? */
+
+ if (iptr->target) {
+ codegen_addpatchref(cd, mcodeptr,
+ (functionptr) iptr->target, iptr->val.a);
+
+ if (showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ a = 0;
+
+ } else {
+ a = (ptrint) iptr->val.a;
+ }
+
/* a0 = dimension count */
ICONST(rd->argintregs[0], iptr->op1);
/* a1 = arraydescriptor */
- a = dseg_addaddress(cd, iptr->val.a);
+ a = dseg_addaddress(cd, a);
M_ALD(rd->argintregs[1], REG_PV, a);
/* a2 = pointer to dimensions = stack pointer */
M_INTMOVE(REG_SP, rd->argintregs[2]);
- a = dseg_addaddress(cd, (void*) builtin_nmultianewarray);
+ a = dseg_addaddress(cd, BUILTIN_multianewarray);
M_ALD(REG_ITMP3, REG_PV, a);
M_JSR(REG_RA, REG_ITMP3);
M_NOP;
- s1 = (int)((u1*) mcodeptr - cd->mcodebase);
+ s1 = (s4)((u1 *) mcodeptr - cd->mcodebase);
if (s1 <= 32768)
M_LDA (REG_PV, REG_RA, -s1);
else {
for (pref = cd->patchrefs; pref != NULL; pref = pref->next) {
/* check code segment size */
- MCODECHECK(5);
+
+ MCODECHECK(22 + 4 + 1);
/* Get machine code which is patched back in later. The call is */
/* 2 instruction words long. */
+
xcodeptr = (s4 *) (cd->mcodebase + pref->branchpos);
/* We need to split this, because an unaligned 8 byte read causes */
/* a SIGSEGV. */
+
mcode = ((u8) xcodeptr[1] << 32) + (u4) xcodeptr[0];
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ /* create a virtual java_objectheader */
+
+ /* align data structure to 8-byte */
+
+ ALIGNCODENOP;
+
+ *((ptrint *) (mcodeptr + 0)) = 0; /* vftbl */
+ *((ptrint *) (mcodeptr + 2)) = (ptrint) get_dummyLR(); /* monitorPtr */
+
+ mcodeptr += 2 * 2; /* mcodeptr is a `u4*' pointer */
+#endif
+
/* patch in the call to call the following code (done at compile */
/* time) */
mcodeptr = xcodeptr; /* set mcodeptr to patch position */
M_BRS(tmpmcodeptr - (xcodeptr + 1));
- M_NOP;
+ M_MOV(REG_RA, REG_ITMP3); /* branch delay slot */
mcodeptr = tmpmcodeptr; /* restore the current mcodeptr */
- /* move class pointer into REG_ITMP1 */
- a = dseg_addaddress(cd, pref->ref);
- M_ALD(REG_ITMP1, REG_PV, a);
+ /* create stack frame */
+
+ M_LSUB_IMM(REG_SP, 5 * 8, REG_SP);
+
+ /* move return address onto stack */
+
+ M_AST(REG_RA, REG_SP, 4 * 8);
+ M_MOV(REG_ITMP3, REG_RA); /* restore return address */
+
+ /* move pointer to java_objectheader onto stack */
+
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ M_MOV(REG_RA, REG_ITMP3); /* save return address */
+ M_BRS(0);
+ M_NOP;
+ M_LSUB_IMM(REG_RA, 6 * 4 + 2 * 8, REG_RA);
+ M_AST(REG_RA, REG_SP, 3 * 8);
+ M_MOV(REG_ITMP3, REG_RA); /* restore return address */
+#else
+ M_AST(REG_ZERO, REG_SP, 3 * 8);
+#endif
+
+ /* move machine code onto stack */
- /* move machine code into REG_ITMP2 */
a = dseg_adds8(cd, mcode);
- M_LLD(REG_ITMP2, REG_PV, a);
+ M_LLD(REG_ITMP3, REG_PV, a);
+ M_LST(REG_ITMP3, REG_SP, 2 * 8);
+
+ /* move class/method/field reference onto stack */
+
+ a = dseg_addaddress(cd, pref->ref);
+ M_ALD(REG_ITMP3, REG_PV, a);
+ M_AST(REG_ITMP3, REG_SP, 1 * 8);
+
+ /* move patcher function pointer onto stack */
- a = dseg_addaddress(cd, pref->asmwrapper);
+ a = dseg_addaddress(cd, pref->patcher);
+ M_ALD(REG_ITMP3, REG_PV, a);
+ M_AST(REG_ITMP3, REG_SP, 0 * 8);
+
+ a = dseg_addaddress(cd, asm_wrapper_patcher);
M_ALD(REG_ITMP3, REG_PV, a);
M_JMP(REG_ITMP3);
M_NOP;
}
-/* function createcompilerstub *************************************************
+/* createcompilerstub **********************************************************
- creates a stub routine which calls the compiler
+ Creates a stub routine which calls the compiler.
*******************************************************************************/
#endif
#define NATIVESTUB_SIZE (54 + 4 + NATIVESTUB_THREAD_EXTRA - 1)
-#define NATIVESTUB_STATIC_SIZE 5
+#define NATIVESTUB_STATIC_SIZE (18 + 4 + 1)
#define NATIVESTUB_VERBOSE_SIZE (50 + 17)
-#define NATIVESTUB_OFFSET 10
+#define NATIVESTUB_OFFSET 11
u1 *createnativestub(functionptr f, methodinfo *m)
inlining_setup(m, id);
reg_setup(m, rd, id);
- method_descriptor2types(m); /* set paramcount and paramtypes */
+ method_descriptor2types(m); /* set paramcount and paramtypes */
stubsize = NATIVESTUB_SIZE; /* calculate nativestub size */
*(cs-6) = (u8) m;
*(cs-7) = (u8) builtin_displaymethodstop;
*(cs-8) = (u8) m->class;
- *(cs-9) = (u8) asm_check_clinit;
+ *(cs-9) = (u8) asm_wrapper_patcher;
*(cs-10) = (u8) NULL; /* filled with machine code */
+ *(cs-11) = (u8) PATCHER_clinit;
M_LDA(REG_SP, REG_SP, -NATIVESTUB_STACK * 8); /* build up stackframe */
M_LST(REG_RA, REG_SP, 0); /* store return address */
/* if function is static, check for initialized */
- if (m->flags & ACC_STATIC && !m->class->initialized) {
+ if ((m->flags & ACC_STATIC) && !m->class->initialized) {
codegen_addpatchref(cd, mcodeptr, NULL, NULL);
}
/* max. 50 instructions */
+
if (runverbose) {
s4 p;
s4 t;
/* save integer argument registers */
for (p = 0; p < m->paramcount && p < INT_ARG_CNT; p++) {
- M_LST(rd->argintregs[p], REG_SP, (2 + p) * 8);
+ M_LST(rd->argintregs[p], REG_SP, (2 + p) * 8);
}
/* save and copy float arguments into integer registers */
}
/* save argument registers on stack -- if we have to */
+
if ((m->flags & ACC_STATIC && m->paramcount > (INT_ARG_CNT - 2)) || m->paramcount > (INT_ARG_CNT - 1)) {
s4 i;
s4 paramshiftcnt = (m->flags & ACC_STATIC) ? 2 : 1;
if (pref) {
/* Get machine code which is patched back in later. The call is */
/* 2 instruction words long. */
+
xcodeptr = (s4 *) (cd->mcodebase + pref->branchpos);
/* We need to split this, because an unaligned 8 byte read causes */
/* a SIGSEGV. */
+
*(cs-10) = ((u8) xcodeptr[1] << 32) + (u4) xcodeptr[0];
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ /* create a virtual java_objectheader */
+
+ /* align data structure to 8-byte */
+
+ ALIGNCODENOP;
+
+ *((ptrint *) (mcodeptr + 0)) = 0; /* vftbl */
+ *((ptrint *) (mcodeptr + 2)) = (ptrint) get_dummyLR(); /* monitorPtr */
+
+ mcodeptr += 2 * 2; /* mcodeptr is a `u4*' pointer */
+#endif
+
/* patch in the call to call the following code (done at compile */
/* time) */
mcodeptr = xcodeptr; /* set mcodeptr to patch position */
M_BRS(tmpmcodeptr - (xcodeptr + 1));
- M_NOP;
+ M_MOV(REG_RA, REG_ITMP3); /* branch delay slot */
mcodeptr = tmpmcodeptr; /* restore the current mcodeptr */
- /* move class pointer into REG_ITMP1 */
- M_ALD(REG_ITMP1, REG_PV, -8 * 8); /* class */
+ /* create stack frame */
+
+ M_LSUB_IMM(REG_SP, 5 * 8, REG_SP);
- /* move machine code into REG_ITMP2 */
- M_LLD(REG_ITMP2, REG_PV, -10 * 8); /* machine code */
+ /* move return address onto stack */
- M_ALD(REG_ITMP3, REG_PV, -9 * 8); /* asm_check_clinit */
+ M_AST(REG_RA, REG_SP, 4 * 8);
+ M_MOV(REG_ITMP3, REG_RA); /* restore return address */
+
+ /* move pointer to java_objectheader onto stack */
+
+#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ M_MOV(REG_RA, REG_ITMP3); /* save return address */
+ M_BRS(0);
+ M_NOP;
+ M_LSUB_IMM(REG_RA, 6 * 4 + 2 * 8, REG_RA);
+ M_AST(REG_RA, REG_SP, 3 * 8);
+ M_MOV(REG_ITMP3, REG_RA); /* restore return address */
+#else
+ M_AST(REG_ZERO, REG_SP, 3 * 8);
+#endif
+
+ /* move machine code onto stack */
+
+ M_LLD(REG_ITMP3, REG_PV, -10 * 8); /* machine code */
+ M_LST(REG_ITMP3, REG_SP, 2 * 8);
+
+ /* move class reference onto stack */
+
+ M_ALD(REG_ITMP3, REG_PV, -8 * 8); /* class */
+ M_AST(REG_ITMP3, REG_SP, 1 * 8);
+
+ /* move patcher function pointer onto stack */
+
+ M_ALD(REG_ITMP3, REG_PV, -11 * 8); /* patcher function */
+ M_AST(REG_ITMP3, REG_SP, 0 * 8);
+
+ M_ALD(REG_ITMP3, REG_PV, -9 * 8); /* asm_wrapper_patcher */
M_JMP(REG_ITMP3);
M_NOP;
}
/* Check if the stub size is big enough to hold the whole stub generated. */
/* If not, this can lead into unpredictable crashes, because of heap */
/* corruption. */
+
if ((s4) ((ptrint) mcodeptr - (ptrint) s) > stubsize * sizeof(u8)) {
throw_cacao_exception_exit(string_java_lang_InternalError,
"Native stub size %d is to small for current stub size %d",
{
u1 *e = p + bytelen;
long psize = sysconf(_SC_PAGESIZE);
- p -= (long) p & (psize-1);
- e += psize - ((((long) e - 1) & (psize-1)) + 1);
+ p -= (long) p & (psize - 1);
+ e += psize - ((((long) e - 1) & (psize - 1)) + 1);
bytelen = e-p;
- mprotect(p, bytelen, PROT_READ|PROT_WRITE|PROT_EXEC);
+ mprotect(p, bytelen, PROT_READ | PROT_WRITE | PROT_EXEC);
}
--- /dev/null
+/* src/vm/jit/mips/patcher.c - MIPS code patching functions
+
+ Copyright (C) 1996-2005 R. Grafl, A. Krall, C. Kruegel, C. Oates,
+ R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner,
+ C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich, J. Wenninger,
+ Institut f. Computersprachen - TU Wien
+
+ This file is part of CACAO.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ Contact: cacao@complang.tuwien.ac.at
+
+ Authors: Christian Thalinger
+
+ Changes:
+
+ $Id: patcher.c 2444 2005-05-11 12:51:53Z twisti $
+
+*/
+
+
+#include "config.h"
+#include "vm/jit/mips/types.h"
+
+#include "vm/builtin.h"
+#include "vm/field.h"
+#include "vm/initialize.h"
+#include "vm/options.h"
+#include "vm/references.h"
+#include "vm/jit/asmpart.h"
+#include "vm/jit/helper.h"
+
+
+/* patcher_get_putstatic *******************************************************
+
+ Machine code:
+
+ <patched call position>
+ dfc1ffb8 ld at,-72(s8)
+ fc250000 sd a1,0(at)
+
+*******************************************************************************/
+
+bool patcher_get_putstatic(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ unresolved_field *uf;
+ u1 *pv;
+ fieldinfo *fi;
+ s2 offset;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ uf = (unresolved_field *) *((ptrint *) (sp + 0 * 8));
+ pv = (u1 *) *((ptrint *) (sp - 2 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the fieldinfo */
+
+ if (!(fi = helper_resolve_fieldinfo(uf)))
+ return false;
+
+ /* check if the field's class is initialized */
+
+ if (!fi->class->initialized)
+ if (!initialize_class(fi->class))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) ra) & 0x0000ffff);
+
+ /* patch the field value's address */
+
+ *((ptrint *) (pv + offset)) = (ptrint) &(fi->value);
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_get_putfield ********************************************************
+
+ Machine code:
+
+ <patched call position>
+ 8ee90020 lw a5,32(s7)
+
+*******************************************************************************/
+
+bool patcher_get_putfield(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ unresolved_field *uf;
+ fieldinfo *fi;
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ uf = (unresolved_field *) *((ptrint *) (sp + 0 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the fieldinfo */
+
+ if (!(fi = helper_resolve_fieldinfo(uf)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* patch the field's offset */
+
+ *((u4 *) ra) |= (s2) (fi->offset & 0x0000ffff);
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_builtin_new *********************************************************
+
+ Machine code:
+
+ dfc4ff98 ld a0,-104(s8)
+ <patched call postition>
+ dfd9ff90 ld t9,-112(s8)
+ 0320f809 jalr t9
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_builtin_new(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ constant_classref *cr;
+ u1 *pv;
+ classinfo *c;
+ s2 offset;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ cr = (constant_classref *) *((ptrint *) (sp + 0 * 8));
+ pv = (u1 *) *((ptrint *) (sp - 2 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - (4 + 2 * 4);
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the classinfo */
+
+ if (!(c = helper_resolve_classinfo(cr)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 1 * 4)) = mcode;
+ *((u4 *) (ra + 2 * 4)) = mcode >> 32;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) ra) & 0x0000ffff);
+
+ /* patch the classinfo pointer */
+
+ *((ptrint *) (pv + offset)) = (ptrint) c;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) (ra + 4)) & 0x0000ffff);
+
+ /* patch new function address */
+
+ *((ptrint *) (pv + offset)) = (ptrint) BUILTIN_new;
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra + 4, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_builtin_newarray ****************************************************
+
+ Machine code:
+
+ dfc5ffa0 ld a1,-96(s8)
+ <patched call position>
+ dfd9ff98 ld t9,-104(s8)
+ 0320f809 jalr t9
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_builtin_newarray(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ constant_classref *cr;
+ u1 *pv;
+ classinfo *c;
+ s2 offset;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ cr = (constant_classref *) *((ptrint *) (sp + 0 * 8));
+ pv = (u1 *) *((ptrint *) (sp - 2 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - (4 + 2 * 4);
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the classinfo */
+
+ if (!(c = helper_resolve_classinfo(cr)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 1 * 4)) = mcode;
+ *((u4 *) (ra + 2 * 4)) = mcode >> 32;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) ra) & 0x0000ffff);
+
+ /* patch the class' vftbl pointer */
+
+ *((ptrint *) (pv + offset)) = (ptrint) c->vftbl;
+
+ /* if we show disassembly, we have to skip the nop */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) (ra + 4)) & 0x0000ffff);
+
+ /* patch new function address */
+
+ *((ptrint *) (pv + offset)) = (ptrint) BUILTIN_newarray;
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra + 4, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_builtin_multianewarray **********************************************
+
+ Machine code:
+
+ <patched call position>
+ 24040002 addiu a0,zero,2
+ dfc5ff90 ld a1,-112(s8)
+ 03a03025 move a2,sp
+ dfd9ff88 ld t9,-120(s8)
+ 0320f809 jalr t9
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_builtin_multianewarray(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ constant_classref *cr;
+ u1 *pv;
+ classinfo *c;
+ s2 offset;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ cr = (constant_classref *) *((ptrint *) (sp + 0 * 8));
+ pv = (u1 *) *((ptrint *) (sp - 2 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the classinfo */
+
+ if (!(c = helper_resolve_classinfo(cr)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) (ra + 4)) & 0x0000ffff);
+
+ /* patch the class' vftbl pointer */
+
+ *((ptrint *) (pv + offset)) = (ptrint) c->vftbl;
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_builtin_arraycheckcast **********************************************
+
+ Machine code:
+
+ dfc5ffc0 ld a1,-64(s8)
+ <patched call position>
+ dfd9ffb8 ld t9,-72(s8)
+ 0320f809 jalr t9
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_builtin_arraycheckcast(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ constant_classref *cr;
+ u1 *pv;
+ classinfo *c;
+ s2 offset;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ cr = (constant_classref *) *((ptrint *) (sp + 0 * 8));
+ pv = (u1 *) *((ptrint *) (sp - 2 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 3 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the classinfo */
+
+ if (!(c = helper_resolve_classinfo(cr)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 1 * 4)) = mcode;
+ *((u4 *) (ra + 2 * 4)) = mcode >> 32;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) ra) & 0x0000ffff);
+
+ /* patch the class' vftbl pointer */
+
+ *((ptrint *) (pv + offset)) = (ptrint) c->vftbl;
+
+ /* if we show disassembly, we have to skip the nop */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) (ra + 1 * 4)) & 0x0000ffff);
+
+ /* patch new function address */
+
+ *((ptrint *) (pv + offset)) = (ptrint) BUILTIN_arraycheckcast;
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra + 4, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_builtin_arrayinstanceof *********************************************
+
+ Machine code:
+
+ dfc5fe98 ld a1,-360(s8)
+ <patched call position>
+ dfd9fe90 ld t9,-368(s8)
+ 0320f809 jalr t9
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_builtin_arrayinstanceof(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ constant_classref *cr;
+ u1 *pv;
+ classinfo *c;
+ s4 offset;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ cr = (constant_classref *) *((ptrint *) (sp + 0 * 8));
+ pv = (u1 *) *((ptrint *) (sp - 2 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 3 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the classinfo */
+
+ if (!(c = helper_resolve_classinfo(cr)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 1 * 4)) = mcode;
+ *((u4 *) (ra + 2 * 4)) = mcode >> 32;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) ra) & 0x0000ffff);
+
+ /* patch the class' vftbl pointer */
+
+ *((ptrint *) (pv + offset)) = (ptrint) c->vftbl;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) (ra + 1 * 4)) & 0x0000ffff);
+
+ /* patch new function address */
+
+ *((ptrint *) (pv + offset)) = (ptrint) BUILTIN_arrayinstanceof;
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra + 4, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_invokestatic_special ************************************************
+
+ Machine code:
+
+ <patched call position>
+ dfdeffc0 ld s8,-64(s8)
+ 03c0f809 jalr s8
+ 00000000 nop
+
+******************************************************************************/
+
+bool patcher_invokestatic_special(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ unresolved_method *um;
+ u1 *pv;
+ methodinfo *m;
+ s4 offset;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ um = (unresolved_method *) *((ptrint *) (sp + 0 * 8));
+ pv = (u1 *) *((ptrint *) (sp - 2 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the fieldinfo */
+
+ if (!(m = helper_resolve_methodinfo(um)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) ra) & 0x0000ffff);
+
+ /* patch stubroutine */
+
+ *((ptrint *) (pv + offset)) = (ptrint) m->stubroutine;
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_invokevirtual *******************************************************
+
+ Machine code:
+
+ <patched call position>
+ dc990000 ld t9,0(a0)
+ df3e0040 ld s8,64(t9)
+ 03c0f809 jalr s8
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_invokevirtual(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ unresolved_method *um;
+ methodinfo *m;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ um = (unresolved_method *) *((ptrint *) (sp + 0 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the fieldinfo */
+
+ if (!(m = helper_resolve_methodinfo(um)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* patch vftbl index */
+
+ *((s4 *) (ra + 1 * 4)) |= (s4) ((OFFSET(vftbl_t, table[0]) +
+ sizeof(methodptr) * m->vftblindex) & 0x0000ffff);
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_invokeinterface *****************************************************
+
+ Machine code:
+
+ <patched call position>
+ dc990000 ld t9,0(a0)
+ df39ffa0 ld t9,-96(t9)
+ df3e0018 ld s8,24(t9)
+ 03c0f809 jalr s8
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_invokeinterface(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ unresolved_method *um;
+ methodinfo *m;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ um = (unresolved_method *) *((ptrint *) (sp + 0 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the fieldinfo */
+
+ if (!(m = helper_resolve_methodinfo(um)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* patch interfacetable index */
+
+ *((s4 *) (ra + 1 * 4)) |= (s4) ((OFFSET(vftbl_t, interfacetable[0]) -
+ sizeof(methodptr*) * m->class->index) & 0x0000ffff);
+
+ /* patch method offset */
+
+ *((s4 *) (ra + 2 * 4)) |=
+ (s4) ((sizeof(methodptr) * (m - m->class->methods)) & 0x0000ffff);
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_checkcast_instanceof_flags ******************************************
+
+ Machine code:
+
+ <patched call position>
+ 8fc3ff24 lw v1,-220(s8)
+ 30630200 andi v1,v1,512
+ 1060000d beq v1,zero,0x000000001051824c
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_checkcast_instanceof_flags(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ constant_classref *cr;
+ u1 *pv;
+ classinfo *c;
+ s2 offset;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ cr = (constant_classref *) *((ptrint *) (sp + 0 * 8));
+ pv = (u1 *) *((ptrint *) (sp - 2 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the fieldinfo */
+
+ if (!(c = helper_resolve_classinfo(cr)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) ra) & 0x0000ffff);
+
+ /* patch class flags */
+
+ *((s4 *) (pv + offset)) = (s4) c->flags;
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_checkcast_instanceof_interface **************************************
+
+ Machine code:
+
+ <patched call position>
+ dd030000 ld v1,0(a4)
+ 8c79001c lw t9,28(v1)
+ 27390000 addiu t9,t9,0
+ 1b200082 blez t9,zero,0x000000001051843c
+ 00000000 nop
+ dc790000 ld t9,0(v1)
+
+*******************************************************************************/
+
+bool patcher_checkcast_instanceof_interface(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ constant_classref *cr;
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ cr = (constant_classref *) *((ptrint *) (sp + 0 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the fieldinfo */
+
+ if (!(c = helper_resolve_classinfo(cr)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* patch super class index */
+
+ *((s4 *) (ra + 2 * 4)) |= (s4) (-(c->index) & 0x0000ffff);
+
+ *((s4 *) (ra + 5 * 4)) |= (s4) ((OFFSET(vftbl_t, interfacetable[0]) -
+ c->index * sizeof(methodptr*)) & 0x0000ffff);
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 6 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_checkcast_instanceof_class ******************************************
+
+ Machine code:
+
+ <patched call position>
+ dd030000 ld v1,0(a4)
+ dfd9ff18 ld t9,-232(s8)
+
+*******************************************************************************/
+
+bool patcher_checkcast_instanceof_class(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ constant_classref *cr;
+ u1 *pv;
+ classinfo *c;
+ s2 offset;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ cr = (constant_classref *) *((ptrint *) (sp + 0 * 8));
+ pv = (u1 *) *((ptrint *) (sp - 2 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* get the fieldinfo */
+
+ if (!(c = helper_resolve_classinfo(cr)))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* get the offset from machine instruction */
+
+ offset = (s2) (*((u4 *) (ra + 1 * 4)) & 0x0000ffff);
+
+ /* patch super class' vftbl */
+
+ *((ptrint *) (pv + offset)) = (ptrint) c->vftbl;
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/* patcher_clinit **************************************************************
+
+ XXX
+
+*******************************************************************************/
+
+bool patcher_clinit(u1 *sp)
+{
+ u1 *ra;
+ java_objectheader *o;
+ u8 mcode;
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 3 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 2 * 8));
+ mcode = *((u8 *) (sp + 1 * 8));
+ c = (classinfo *) *((ptrint *) (sp + 0 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 3 * 8)) = (ptrint) ra;
+
+#if defined(USE_THREADS)
+ /* enter a monitor on the patching position */
+
+ builtin_monitorenter(o);
+
+ /* check if the position has already been patched */
+
+ if (o->vftbl) {
+ builtin_monitorexit(o);
+
+ return true;
+ }
+#endif
+
+ /* check if the class is initialized */
+
+ if (!c->initialized)
+ if (!initialize_class(c))
+ return false;
+
+ /* patch back original code */
+
+ *((u4 *) (ra + 0)) = mcode;
+ *((u4 *) (ra + 4)) = mcode >> 32;
+
+ /* synchronize instruction cache */
+
+ docacheflush(ra, 2 * 4);
+
+#if defined(USE_THREADS)
+ /* this position has been patched */
+
+ o->vftbl = (vftbl_t *) 1;
+
+ /* leave the monitor on the patching position */
+
+ builtin_monitorexit(o);
+#endif
+
+ return true;
+}
+
+
+/*
+ * These are local overrides for various environment variables in Emacs.
+ * Please do not remove this and leave it at the end of the file, where
+ * Emacs will automagically detect them.
+ * ---------------------------------------------------------------------
+ * Local variables:
+ * mode: c
+ * indent-tabs-mode: t
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ * vim:noexpandtab:sw=4:ts=4:
+ */