Merged sparc64 changes from trunk.
--HG--
branch : unified_variables
/* On register window machines, we need a way to force registers into */
/* the stack. Return sp. */
# ifdef SPARC
- asm(" .seg \"text\"");
+ __asm__(" .seg \"text\"");
# if defined(SVR4) || defined(NETBSD) || defined(FREEBSD)
- asm(" .globl GC_save_regs_in_stack");
- asm("GC_save_regs_in_stack:");
- asm(" .type GC_save_regs_in_stack,#function");
+ __asm__(" .globl GC_save_regs_in_stack");
+ __asm__("GC_save_regs_in_stack:");
+ __asm__(" .type GC_save_regs_in_stack,#function");
# else
- asm(" .globl _GC_save_regs_in_stack");
- asm("_GC_save_regs_in_stack:");
+ __asm__(" .globl _GC_save_regs_in_stack");
+ __asm__("_GC_save_regs_in_stack:");
# endif
# if defined(__arch64__) || defined(__sparcv9)
- asm(" save %sp,-128,%sp");
- asm(" flushw");
- asm(" ret");
- asm(" restore %sp,2047+128,%o0");
+ __asm__(" save %sp,-128,%sp");
+ __asm__(" flushw");
+ __asm__(" ret");
+ __asm__(" restore %sp,2047+128,%o0");
# else
- asm(" ta 0x3 ! ST_FLUSH_WINDOWS");
- asm(" retl");
- asm(" mov %sp,%o0");
+ __asm__(" ta 0x3 ! ST_FLUSH_WINDOWS");
+ __asm__(" retl");
+ __asm__(" mov %sp,%o0");
# endif
# ifdef SVR4
- asm(" .GC_save_regs_in_stack_end:");
- asm(" .size GC_save_regs_in_stack,.GC_save_regs_in_stack_end-GC_save_regs_in_stack");
+ __asm__(" .GC_save_regs_in_stack_end:");
+ __asm__(" .size GC_save_regs_in_stack,.GC_save_regs_in_stack_end-GC_save_regs_in_stack");
# endif
# ifdef LINT
word GC_save_regs_in_stack() { return(0 /* sp really */);}
--> fix it
#endif
# ifdef SUNOS4
- asm(".globl _GC_clear_stack_inner");
- asm("_GC_clear_stack_inner:");
+ __asm__(".globl _GC_clear_stack_inner");
+ __asm__("_GC_clear_stack_inner:");
# else
- asm(".globl GC_clear_stack_inner");
- asm("GC_clear_stack_inner:");
- asm(".type GC_save_regs_in_stack,#function");
+ __asm__(".globl GC_clear_stack_inner");
+ __asm__("GC_clear_stack_inner:");
+ __asm__(".type GC_save_regs_in_stack,#function");
# endif
#if defined(__arch64__) || defined(__sparcv9)
- asm("mov %sp,%o2"); /* Save sp */
- asm("add %sp,2047-8,%o3"); /* p = sp+bias-8 */
- asm("add %o1,-2047-192,%sp"); /* Move sp out of the way, */
+ __asm__("mov %sp,%o2"); /* Save sp */
+ __asm__("add %sp,2047-8,%o3"); /* p = sp+bias-8 */
+ __asm__("add %o1,-2047-192,%sp"); /* Move sp out of the way, */
/* so that traps still work. */
/* Includes some extra words */
/* so we can be sloppy below. */
- asm("loop:");
- asm("stx %g0,[%o3]"); /* *(long *)p = 0 */
- asm("cmp %o3,%o1");
- asm("bgu,pt %xcc, loop"); /* if (p > limit) goto loop */
- asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
- asm("retl");
- asm("mov %o2,%sp"); /* Restore sp., delay slot */
+ __asm__("loop:");
+ __asm__("stx %g0,[%o3]"); /* *(long *)p = 0 */
+ __asm__("cmp %o3,%o1");
+ __asm__("bgu,pt %xcc, loop"); /* if (p > limit) goto loop */
+ __asm__("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
+ __asm__("retl");
+ __asm__("mov %o2,%sp"); /* Restore sp., delay slot */
#else
- asm("mov %sp,%o2"); /* Save sp */
- asm("add %sp,-8,%o3"); /* p = sp-8 */
- asm("clr %g1"); /* [g0,g1] = 0 */
- asm("add %o1,-0x60,%sp"); /* Move sp out of the way, */
+ __asm__("mov %sp,%o2"); /* Save sp */
+ __asm__("add %sp,-8,%o3"); /* p = sp-8 */
+ __asm__("clr %g1"); /* [g0,g1] = 0 */
+ __asm__("add %o1,-0x60,%sp"); /* Move sp out of the way, */
/* so that traps still work. */
/* Includes some extra words */
/* so we can be sloppy below. */
- asm("loop:");
- asm("std %g0,[%o3]"); /* *(long long *)p = 0 */
- asm("cmp %o3,%o1");
- asm("bgu loop "); /* if (p > limit) goto loop */
- asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
- asm("retl");
- asm("mov %o2,%sp"); /* Restore sp., delay slot */
+ __asm__("loop:");
+ __asm__("std %g0,[%o3]"); /* *(long long *)p = 0 */
+ __asm__("cmp %o3,%o1");
+ __asm__("bgu loop "); /* if (p > limit) goto loop */
+ __asm__("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
+ __asm__("retl");
+ __asm__("mov %o2,%sp"); /* Restore sp., delay slot */
#endif /* old SPARC */
/* First argument = %o0 = return value */
# ifdef SVR4
- asm(" .GC_clear_stack_inner_end:");
- asm(" .size GC_clear_stack_inner,.GC_clear_stack_inner_end-GC_clear_stack_inner");
+ __asm__(" .GC_clear_stack_inner_end:");
+ __asm__(" .size GC_clear_stack_inner,.GC_clear_stack_inner_end-GC_clear_stack_inner");
# endif
# ifdef LINT
noinst_HEADERS = \
arch.h \
machine-instr.h
-# \
-# md-asm.h
+ \
+ md-asm.h
noinst_LTLIBRARIES = libarch.la
+if ENABLE_DISASSEMBLER
+DISASS_SOURCES = \
+ disass.c
+endif
+
+
libarch_la_SOURCES = \
+ asmpart.S \
codegen.c \
codegen.h \
- md.h \
+ $(DISASS_SOURCES) \
+ emit.c \
+ patcher.c \
+ \
+ md-abi.c \
md-abi.h \
md.c \
- disass.c \
- asmpart.S \
- md-abi.c
-# patcher.c
-
+ md.h
libarch_la_LIBADD = \
$(OS_DIR)/libmd.la
-#$(srcdir)/asmpart.S: $(top_builddir)/config.h offsets.h
+$(srcdir)/asmpart.S: $(top_builddir)/config.h offsets.h
offsets.h: $(top_builddir)/src/vm/jit/tools/genoffsets $(top_builddir)/config.h
$(top_builddir)/src/vm/jit/tools/genoffsets > offsets.h
#include "offsets.h"
#include "md-asm.h"
+ .register %g2,#scratch /* define as scratch */
+ .register %g3,#scratch /* XXX reserve for application */
.text
/* export functions ***********************************************************/
.global asm_vm_call_method_double
.global asm_vm_call_method_exception_handler
+ .global asm_call_jit_compiler
+
+ .global asm_handle_exception
+ .global asm_handle_nat_exception
+
+
+ .global asm_abstractmethoderror
+
+ .global asm_criticalsections
+ .global asm_getclassvalues_atomic
+
/* asm_vm_call_method ******************************************************
* *
.align 8 /* v9: All data types are aligned to their size */
-/*x*/ .xword 0 /* catch type all */
-/*x*/ .word calljava_xhandler2 /* handler pc */
-/*x*/ .word calljava_xhandler2 /* end pc */
-/*x*/ .word asm_vm_call_method /* start pc */
+ .xword 0 /* catch type all */
+ .xword 0 /* handler pc */
+ .xword 0 /* end pc */
+ .xword 0 /* start pc */
.word 1 /* extable size */
.word 0 /* ALIGNMENT PADDING */
-/*x*/ .xword 0 /* line number table start */
-/*x*/ .xword 0 /* line number table size */
+ .xword 0 /* line number table start */
+ .xword 0 /* line number table size */
.word 0 /* ALIGNMENT PADDING */
.word 0 /* fltsave */
.word 1 /* intsave */
.word 0 /* isleaf */
.word 0 /* IsSync */
.word 0 /* frame size */
-/*x*/ .xword 0 /* method pointer (pointer to name)*/
+ .xword 0 /* method pointer (pointer to name)*/
asm_vm_call_method:
asm_vm_call_method_int:
asm_vm_call_method_float:
asm_vm_call_method_double:
- save %sp, -128, %sp /* minimal (reg-window) frame */
+ save %sp, -144, %sp /* 16 reg-save + 2 */
/* todo: copy fp registers */
mov %i0,itmp1 /* pass method pointer via itmp1 */
- addx %sp,1*8,mptr_itmp2 /* set method pointer ??? */
-
- /* setx asm_call_jit_compiler, %l1, %l0 */
- call asm_call_jit_compiler /* call JIT compiler */
+ setx asm_call_jit_compiler,%l0,mptr_itmp2 /* fake virtual function call (2 instr) */
+ stx mptr_itmp2,[%sp + 17*8] /* store function address */
+ addx %sp,16*8,mptr_itmp2 /* set method pointer */
+
+ ldx [1*8 + mptr_itmp2], pv_caller /* method call as in Java */
+ jmpl pv_caller,ra_caller /* call JIT compiler */
nop
calljava_jit2:
/* no need to restore pv */
return %i7 + 8 /* implicit window restore */
nop
-
-
-
+
/****************** function asm_call_jit_compiler *****************************
* *
/* XXX save + reserve stack space */
/* XXX save float arg registers */
- mov mptr_itmp2,%l0 /* save method pointer */
- stq itmp1,14*8(sp) /* save methodinfo pointer */
-
- addx 15*8,%sp,%o0 /* create stackframe info */
- mov zero,%o1 /* we don't have pv handy */
+ mov itmp1,%o0 /* pass methodinfo pointer */
+ mov mptr_itmp2,%o1 /* pass method pointer */
mov %fp,%o2 /* pass java sp (==fp) */
- mov %ra,%o3 /* pass Java ra */
+ mov ra_callee,%o3 /* pass Java ra */
mov %o3,%o4 /* xpc is equal to ra */
- call stacktrace_create_extern_stackframeinfo
+ call jit_asm_compile /* call jit compiler */
- mov itmp1,%o0, /* pass methodinfo pointer */
- call jit_compile /* call jit compiler */
- mov %o0,pv_callee /* save return value into callee pv */
+ mov %o0,pv_callee /* save return value into callee(!) pv */
/* the in's we have are also the in's of the*/
/* method that will be called */
- mov %ra,%o0 /* pass return address */
- addx 15*8,sp,%o1 /* pass stackframeinfo (for PV) */
- mov %l0,%o2 /* pass method pointer */
- jsr ra,md_assembler_get_patch_address /* get address of patch position*/
- mov %o0,%l2 /* store patch address for later use */
-
- addx 15*8,sp,%o0 /* remove stackframe info */
- call stacktrace_remove_stackframeinfo
-
-
- mov %l2, itmp3 /* move patch address out of window */
/* XXX do a window restore here ??? */
/* XXX restore float argument registers */
- beq pv_callee,L_asm_call_jit_compiler_exception
+ brz pv_callee,L_asm_call_jit_compiler_exception
- stx pv_callee,[itmp3] /* patch method entry point */
+ /* synchronise instruction cache moved somewhere else */
- /* XXX synchronise instruction cache */
-
- jmpl pv,zero /* and call method, the method returns */
+ jmpl pv_callee,zero /* and call method, the method returns */
/* directly to the caller (ra). */
L_asm_call_jit_compiler_exception:
- /* XXX how to handle stack & window here? */
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- /* itmpx shall be one of the application globals */
- mov %ra,itmpx /* save return address (xpc) */
+ /* no need to do a save, only ra needs to be preserved */
+
+ /* we save ra in one of the application globals */
+ mov ra_caller,xpc_itmp3 /* save return address (xpc) */
- call builtin_asm_get_exceptionptrptr
-#else
- setx v0,_exceptionptr
-#endif
- ldq xptr,0(v0) /* get the exception pointer */
- stq zero,0(v0) /* clear the exception pointer */
+ call exceptions_get_and_clear_exception
+
+ mov xpc_itmp3,ra_caller /* restore return address (xpc) */
+
+ mov %o0,xptr_itmp2 /* get exception */
+ sub ra_caller,4,xpc_itmp3 /* exception address is ra - 4 */
+ ba L_asm_handle_nat_exception
- subq ra,4,xpc
- br L_asm_handle_nat_exception
+/* asm_handle_exception ********************************************************
+ This function handles an exception. It does not use the usual calling
+ conventions. The exception pointer is passed in REG_ITMP2 and the
+ pc from the exception raising position is passed in REG_ITMP3. It searches
+ the local exception table for a handler. If no one is found, it unwinds
+ stacks and continues searching the callers.
+*******************************************************************************/
+
+
+asm_handle_nat_exception:
+L_asm_handle_nat_exception: /* required for PIC code */
+asm_handle_exception:
+
+ /* nothing here */
+
+ restore zero,0,zero
+
+
+/* asm_abstractmethoderror *****************************************************
+
+ Creates and throws an AbstractMethodError.
+
+*******************************************************************************/
+
+asm_abstractmethoderror:
+ /* do a window save */
+ save %sp,-192,%sp
+
+ mov %fp,%o0 /* pass java sp(==fp) */
+ mov ra_callee,%o1 /* pass exception address */
+ call exceptions_asm_new_abstractmethoderror
+
+ mov %o0,xptr_itmp2 /* get exception pointer */
+ sub ra_callee,4,xpc_itmp3 /* exception address is ra - 4 */
+ ba L_asm_handle_nat_exception
+
+ /* XXX: leave the register window open for handle_exception ??? */
+
+asm_getclassvalues_atomic:
+_crit_restart:
+_crit_begin:
+/* not doing a window save, using the global temporary registers */
+ ldsw [offbaseval+%o0],itmp1
+ ldsw [offdiffval+%o0],itmp2
+ ldsw [offbaseval+%o1],itmp3
+_crit_end:
+ stw itmp1,[offcast_super_baseval+%o2]
+ stw itmp2,[offcast_super_diffval+%o2]
+ stw itmp3,[offcast_sub_baseval+%o2]
+ jmpl ra_caller,zero /* caller's ra, b/c no window save */
+
+ .end asm_getclassvalues_atomic
+
+
+ .data
+
+asm_criticalsections:
+#if defined(ENABLE_THREADS)
+ .xword _crit_begin
+ .xword _crit_end
+ .xword _crit_restart
+#endif
+ .xword 0
*/
-#include <stdio.h>
#include "config.h"
+
+#include <stdio.h>
+#include <assert.h>
+
+
#include "vm/types.h"
#include "md-abi.h"
#include "vm/jit/patcher.h"
#include "vm/jit/reg.h"
-
-#define REG_PV (own_window?REG_PV_CALLEE:REG_PV_CALLER)
+/* XXX use something like this for window control ?
+ * #define REG_PV (own_window?REG_PV_CALLEE:REG_PV_CALLER)
+ */
+#define REG_PV REG_PV_CALLEE
static int fabort(char *x)
{
}
+/* codegen *********************************************************************
+
+ Generates machine code.
+
+*******************************************************************************/
bool codegen(jitdata *jd)
{
methodinfo *m;
+ codeinfo *code;
codegendata *cd;
registerdata *rd;
s4 len, s1, s2, s3, d, disp;
s4 stackframesize;
- s4 *mcodeptr;
stackptr src;
varinfo *var;
basicblock *bptr;
methoddesc *md;
rplpoint *replacementpoint;
- bool own_window = true; /* currently assumes immediate save*/
-
/* get required compiler data */
m = jd->m;
+ code = jd->code;
cd = jd->cd;
rd = jd->rd;
+ /* prevent compiler warnings */
+
+ d = 0;
+ currentline = 0;
+ lm = NULL;
+ bte = NULL;
+
+ {
+ s4 i, p, t, l;
+ s4 savedregs_num;
+
+#if 0 /* no leaf optimization yet */
+ savedregs_num = (jd->isleafmethod) ? 0 : 1; /* space to save the RA */
+#endif
+ savedregs_num = 16; /* register-window save area */
+
+
+ /* space to save used callee saved registers */
+
+ savedregs_num += (INT_SAV_CNT - rd->savintreguse);
+ savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
+
+ stackframesize = rd->memuse + savedregs_num;
+
+#if defined(ENABLE_THREADS) /* space to save argument of monitor_enter */
+ if (checksync && (m->flags & ACC_SYNCHRONIZED))
+ stackframesize++;
+#endif
+
+ /* create method header */
- (void) dseg_addaddress(cd, m); /* MethodPointer */
+ (void) dseg_addaddress(cd, code); /* CodeinfoPointer */
(void) dseg_adds4(cd, stackframesize * 8); /* FrameSize */
-#if defined(USE_THREADS)
+#if defined(ENABLE_THREADS)
/* IsSync contains the offset relative to the stack pointer for the
argument of monitor_exit used in the exception handler. Since the
offset could be zero and give a wrong meaning of the flag it is
#endif
(void) dseg_adds4(cd, 0); /* IsSync */
- (void) dseg_adds4(cd, m->isleafmethod); /* IsLeaf */
+ (void) dseg_adds4(cd, jd->isleafmethod); /* IsLeaf */
(void) dseg_adds4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
(void) dseg_adds4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
-
dseg_addlinenumbertablesize(cd);
(void) dseg_adds4(cd, cd->exceptiontablelength); /* ExTableSize */
+ /* create exception table */
+
+ for (ex = cd->exceptiontable; ex != NULL; ex = ex->down) {
+ dseg_addtarget(cd, ex->start);
+ dseg_addtarget(cd, ex->end);
+ dseg_addtarget(cd, ex->handler);
+ (void) dseg_addaddress(cd, ex->catchtype.cls);
+ }
+
+ /* save register window and create stack frame (if necessary) */
+
+ if (stackframesize)
+ M_SAVE(REG_SP, -stackframesize * 8, REG_SP);
+
+ /* save return address and used callee saved registers */
+
+ p = stackframesize;
+ for (i = FLT_SAV_CNT - 1; i >= rd->savfltreguse; i--) {
+ p--; M_DST(rd->savfltregs[i], REG_SP, (WINSAVE_REGS + p) * 8);
+ }
-
-
- /* XXX create exception table */
-
- /* initialize mcode variables */
- mcodeptr = (s4 *) cd->mcodeptr;
-
-
- /* XXX stack setup */
-
-
- /* XXX copy arguments */
+ /* take arguments out of register or stack frame */
+
+ md = m->parseddesc;
+
+ for (p = 0, l = 0; p < md->paramcount; p++) {
+ t = md->paramtypes[p].type;
+ var = &(rd->locals[l][t]);
+ l++;
+ if (IS_2_WORD_TYPE(t)) /* increment local counter for 2 word types */
+ l++;
+ if (var->type < 0)
+ continue;
+ s1 = md->params[p].regoff;
+ if (IS_INT_LNG_TYPE(t)) { /* integer args */
+ if (!md->params[p].inmemory) { /* register arguments */
+ s2 = rd->argintregs[s1];
+ if (!(var->flags & INMEMORY)) { /* reg arg -> register */
+ M_INTMOVE(s2, var->regoff);
+
+ } else { /* reg arg -> spilled */
+ M_STX(s2, REG_SP, (WINSAVE_REGS + var->regoff) * 8);
+ }
+
+ } else { /* stack arguments */
+ if (!(var->flags & INMEMORY)) { /* stack arg -> register */
+ M_LDX(var->regoff, REG_SP, (stackframesize + s1) * 8);
+
+ } else { /* stack arg -> spilled */
+ var->regoff = stackframesize + s1;
+ }
+ }
+
+ } else { /* floating args */
+ if (!md->params[p].inmemory) { /* register arguments */
+ s2 = rd->argfltregs[s1];
+ if (!(var->flags & INMEMORY)) { /* reg arg -> register */
+ M_FLTMOVE(s2, var->regoff);
+
+ } else { /* reg arg -> spilled */
+ M_DST(s2, REG_SP, (WINSAVE_REGS + var->regoff) * 8);
+ }
+
+ } else { /* stack arguments */
+ if (!(var->flags & INMEMORY)) { /* stack-arg -> register */
+ M_DLD(var->regoff, REG_SP, (stackframesize + s1) * 8);
+
+ } else { /* stack-arg -> spilled */
+ var->regoff = stackframesize + s1;
+ }
+ }
+ }
+ } /* end for */
/* XXX monitor enter and tracing */
+ }
/* end of header generation */
for (bptr = m->basicblocks; bptr != NULL; bptr = bptr->next) {
- bptr->mpc = (s4) ((u1 *) mcodeptr - cd->mcodebase);
+ bptr->mpc = (s4) (cd->mcodeptr - cd->mcodebase);
if (bptr->flags >= BBREACHED) {
codegen_addreference(cd, iptr->dst.block);
M_NOP;
break;
-/* XXX: CMP_IMM */
+
case ICMD_IFEQ: /* ..., value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
codegen_addreference(cd, iptr->dst.block);
M_NOP;
break;
+
+ case ICMD_IFGT: /* ..., value ==> ... */
+ /* op1 = target JavaVM pc, val.i = constant */
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (iptr->val.i == 0) {
+ M_BLTZ(s1, 0);
+ } else {
+ if ((iptr->val.i >= -4096) && (iptr->val.i <= 4095)) {
+ M_CMP_IMM(s1, iptr->val.i);
+ } else {
+ ICONST(REG_ITMP2, iptr->val.i);
+ M_CMP(s1, REG_ITMP2);
+ }
+ M_BGT(0);
+ }
+ codegen_addreference(cd, (basicblock *) iptr->target);
+ M_NOP;
+ break;
+
+ case ICMD_IFGE: /* ..., value ==> ... */
+ /* op1 = target JavaVM pc, val.i = constant */
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (iptr->val.i == 0) {
+ M_BLEZ(s1, 0);
+ }
+ else {
+ if ((iptr->val.i >= -4096) && (iptr->val.i <= 4095)) {
+ M_CMP_IMM(s1, iptr->val.i);
+ }
+ else {
+ ICONST(REG_ITMP2, iptr->val.i);
+ M_CMP(s1, REG_ITMP2);
+ }
+ M_BGE(0);
+ }
+ codegen_addreference(cd, (basicblock *) iptr->target);
+ M_NOP;
+ break;
+
+ case ICMD_IF_LEQ: /* ..., value ==> ... */
+ /* op1 = target JavaVM pc, val.l = constant */
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (iptr->val.l == 0) {
+ M_BEQZ(s1, 0);
+ }
+ else {
+ if ((iptr->val.l >= -4096) && (iptr->val.l <= 4095)) {
+ M_CMP_IMM(s1, iptr->val.l);
+ }
+ else {
+ LCONST(REG_ITMP2, iptr->val.l);
+ M_CMP(s1, REG_ITMP2);
+ }
+ M_XBEQ(0);
+ }
+ codegen_addreference(cd, (basicblock *) iptr->target);
+ M_NOP;
+ break;
+
+ case ICMD_IF_LLT: /* ..., value ==> ... */
+ /* op1 = target JavaVM pc, val.l = constant */
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (iptr->val.l == 0) {
+ M_BLTZ(s1, 0);
+ } else {
+ if ((iptr->val.l >= -4096) && (iptr->val.l <= 4095)) {
+ M_CMP_IMM(s1, iptr->val.l);
+ } else {
+ ICONST(REG_ITMP2, iptr->val.l);
+ M_CMP(s1, REG_ITMP2);
+ }
+ M_XBLT(0);
+ }
+ codegen_addreference(cd, (basicblock *) iptr->target);
+ M_NOP;
+ break;
+
+ case ICMD_IF_LLE: /* ..., value ==> ... */
+ /* op1 = target JavaVM pc, val.l = constant */
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (iptr->val.l == 0) {
+ M_BLEZ(s1, 0);
+ }
+ else {
+ if ((iptr->val.l >= -4096) && (iptr->val.l <= 4095)) {
+ M_CMP_IMM(s1, iptr->val.l);
+ }
+ else {
+ ICONST(REG_ITMP2, iptr->val.l);
+ M_CMP(s1, REG_ITMP2);
+ }
+ M_XBLE(0);
+ }
+ codegen_addreference(cd, (basicblock *) iptr->target);
+ M_NOP;
+ break;
+
+ case ICMD_IF_LNE: /* ..., value ==> ... */
+ /* op1 = target JavaVM pc, val.l = constant */
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (iptr->val.l == 0) {
+ M_BNEZ(s1, 0);
+ }
+ else {
+ if ((iptr->val.l >= -4096) && (iptr->val.l <= 4095)) {
+ M_CMP_IMM(s1, iptr->val.i);
+ }
+ else {
+ ICONST(REG_ITMP2, iptr->val.l);
+ M_CMP(s1, REG_ITMP2);
+ }
+ M_XBNE(0);
+ }
+ codegen_addreference(cd, (basicblock *) iptr->target);
+ M_NOP;
+ break;
+
+ case ICMD_IF_LGT: /* ..., value ==> ... */
+ /* op1 = target JavaVM pc, val.l = constant */
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (iptr->val.l == 0) {
+ M_BLTZ(s1, 0);
+ } else {
+ if ((iptr->val.l >= -4096) && (iptr->val.l <= 4095)) {
+ M_CMP_IMM(s1, iptr->val.l);
+ } else {
+ ICONST(REG_ITMP2, iptr->val.l);
+ M_CMP(s1, REG_ITMP2);
+ }
+ M_XBGT(0);
+ }
+ codegen_addreference(cd, (basicblock *) iptr->target);
+ M_NOP;
+ break;
+
+ case ICMD_IF_LGE: /* ..., value ==> ... */
+ /* op1 = target JavaVM pc, val.l = constant */
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (iptr->val.l == 0) {
+ M_BLEZ(s1, 0);
+ }
+ else {
+ if ((iptr->val.l >= -4096) && (iptr->val.l <= 4095)) {
+ M_CMP_IMM(s1, iptr->val.l);
+ }
+ else {
+ ICONST(REG_ITMP2, iptr->val.l);
+ M_CMP(s1, REG_ITMP2);
+ }
+ M_XBGE(0);
+ }
+ codegen_addreference(cd, (basicblock *) iptr->target);
+ M_NOP;
+ break;
+
case ICMD_IF_ACMPEQ: /* ..., value, value ==> ... */
case ICMD_IF_LCMPEQ: /* op1 = target JavaVM pc */
}
break;
+ case ICMD_TABLESWITCH: /* ..., index ==> ... */
+ {
+ s4 i, l, *s4ptr;
+ void **tptr;
+
+ tptr = (void **) iptr->target;
+
+ s4ptr = iptr->val.a;
+ l = s4ptr[1]; /* low */
+ i = s4ptr[2]; /* high */
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (l == 0) {
+ M_INTMOVE(s1, REG_ITMP1);
+ }
+ else if (l <= 4095) {
+ M_ADD_IMM(s1, -l, REG_ITMP1);
+ }
+ else {
+ ICONST(REG_ITMP2, l);
+ /* XXX: do I need to truncate s1 to 32-bit ? */
+ M_SUB(s1, REG_ITMP2, REG_ITMP1);
+ }
+ i = i - l + 1;
+
+
+ /* range check */
+
+ if (i <= 4095) {
+ M_CMP_IMM(REG_ITMP1, i);
+ }
+ else {
+ ICONST(REG_ITMP2, i);
+ M_CMP(REG_ITMP1, REG_ITMP2);
+ }
+ M_XBULT(0);
+ codegen_addreference(cd, (basicblock *) tptr[0]);
+ M_ASLL_IMM(REG_ITMP1, POINTERSHIFT, REG_ITMP1); /* delay slot*/
+
+ /* build jump table top down and use address of lowest entry */
+
+ /* s4ptr += 3 + i; */
+ tptr += i;
+
+ while (--i >= 0) {
+ /* dseg_addtarget(cd, BlockPtrOfPC(*--s4ptr)); */
+ dseg_addtarget(cd, (basicblock *) tptr[0]);
+ --tptr;
+ }
+ }
+
+ /* length of dataseg after last dseg_addtarget is used by load */
+
+ M_AADD(REG_ITMP1, REG_PV, REG_ITMP2);
+ M_ALD(REG_ITMP2, REG_ITMP2, -(cd->dseglen));
+ M_JMP(REG_ZERO, REG_ITMP2, REG_ZERO);
+ M_NOP;
+ ALIGNCODENOP;
+ break;
+
+ case ICMD_LOOKUPSWITCH: /* ..., key ==> ... */
+ {
+ s4 i, /*l, */val, *s4ptr;
+ void **tptr;
+
+ tptr = (void **) iptr->target;
+
+ s4ptr = iptr->val.a;
+ /*l = s4ptr[0];*/ /* default */
+ i = s4ptr[1]; /* count */
+
+ MCODECHECK((i<<2)+8);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ while (--i >= 0) {
+ s4ptr += 2;
+ ++tptr;
+
+ val = s4ptr[0];
+ if ((val >= -4096) && (val <= 4095)) {
+ M_CMP_IMM(s1, val);
+ } else {
+ ICONST(REG_ITMP2, val);
+ M_CMP(s1, REG_ITMP2);
+ }
+ M_BEQ(0);
+ codegen_addreference(cd, (basicblock *) tptr[0]);
+ M_NOP;
+ }
+
+ M_BR(0);
+ tptr = (void **) iptr->target;
+ codegen_addreference(cd, (basicblock *) tptr[0]);
+ M_NOP;
+ ALIGNCODENOP;
+ break;
+ }
+
+
+ case ICMD_BUILTIN: /* ..., arg1, arg2, arg3 ==> ... */
+ /* op1 = arg count val.a = builtintable entry */
+
+ bte = iptr->val.a;
+ md = bte->md;
+ goto gen_method;
+
+ case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ... */
+ /* op1 = arg count, val.a = method pointer */
+
+ case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
+ case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
+ case ICMD_INVOKEINTERFACE:
+
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ md = INSTRUCTION_UNRESOLVED_METHOD(iptr)->methodref->parseddesc.md;
+ lm = NULL;
+ }
+ else {
+ lm = INSTRUCTION_RESOLVED_METHODINFO(iptr);
+ md = lm->parseddesc;
+ }
+
+gen_method:
+ s3 = md->paramcount;
+
+ MCODECHECK((s3 << 1) + 64);
+
+ /* copy arguments to registers or stack location */
+
+ for (s3 = s3 - 1; s3 >= 0; s3--, src = src->prev) {
+ if (src->varkind == ARGVAR)
+ continue;
+ if (IS_INT_LNG_TYPE(src->type)) {
+ if (!md->params[s3].inmemory) {
+ s1 = rd->argintregs[md->params[s3].regoff];
+ d = emit_load_s1(jd, iptr, src, s1);
+ M_INTMOVE(d, s1);
+ } else {
+ d = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_STX(d, REG_SP, md->params[s3].regoff * 8);
+ }
+
+ } else {
+ if (!md->params[s3].inmemory) {
+ s1 = rd->argfltregs[md->params[s3].regoff];
+ d = emit_load_s1(jd, iptr, src, s1);
+ if (IS_2_WORD_TYPE(src->type))
+ M_DMOV(d, s1);
+ else
+ M_FMOV(d, s1);
+
+ } else {
+ d = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ if (IS_2_WORD_TYPE(src->type))
+ M_DST(d, REG_SP, md->params[s3].regoff * 8);
+ else
+ M_FST(d, REG_SP, md->params[s3].regoff * 8);
+ }
+ }
+ }
+
+ switch (iptr->opc) {
+ case ICMD_BUILTIN:
+ disp = dseg_addaddress(cd, bte->fp);
+ d = md->returntype.type;
+
+ M_ALD(REG_ITMP3, REG_PV, disp); /* built-in-function pointer */
+ M_JMP(REG_RA_CALLER, REG_ITMP3, REG_ZERO);
+ M_NOP;
+/* XXX: how do builtins handle the register window? */
+/* disp = (s4) (cd->mcodeptr - cd->mcodebase);*/
+/* M_LDA(REG_PV, REG_RA, -disp);*/
+
+ /* if op1 == true, we need to check for an exception */
+
+ if (iptr->op1 == true) {
+ M_BEQZ(REG_RESULT_CALLER, 0);
+ codegen_add_fillinstacktrace_ref(cd);
+ M_NOP;
+ }
+ break;
+
+ case ICMD_INVOKESPECIAL:
+ M_BEQZ(rd->argintregs[0], 0);
+ codegen_add_nullpointerexception_ref(cd);
+ M_NOP;
+ /* fall through */
+
+ case ICMD_INVOKESTATIC:
+ if (lm == NULL) {
+ unresolved_method *um = INSTRUCTION_UNRESOLVED_METHOD(iptr);
+
+ disp = dseg_addaddress(cd, NULL);
+
+ codegen_addpatchref(cd, PATCHER_invokestatic_special,
+ um, disp);
+
+ if (opt_showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ d = um->methodref->parseddesc.md->returntype.type;
+
+ } else {
+ disp = dseg_addaddress(cd, lm->stubroutine);
+ d = lm->parseddesc->returntype.type;
+ }
+
+ M_ALD(REG_PV_CALLER, REG_PV, disp); /* method pointer in callee pv */
+ M_JMP(REG_RA_CALLER, REG_PV_CALLER, REG_ZERO);
+ M_NOP;
+/* XXX no need to restore PV, when its in the regs */
+ break;
+
+ case ICMD_INVOKEVIRTUAL:
+ gen_nullptr_check(rd->argintregs[0]);
+
+ if (lm == NULL) {
+ unresolved_method *um = INSTRUCTION_UNRESOLVED_METHOD(iptr);
+
+ codegen_addpatchref(cd, PATCHER_invokevirtual, um, 0);
+
+ if (opt_showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ s1 = 0;
+ d = um->methodref->parseddesc.md->returntype.type;
+
+ } else {
+ s1 = OFFSET(vftbl_t, table[0]) +
+ sizeof(methodptr) * lm->vftblindex;
+ d = lm->parseddesc->returntype.type;
+ }
+
+ M_ALD(REG_METHODPTR, rd->argintregs[0],
+ OFFSET(java_objectheader, vftbl));
+ M_ALD(REG_PV_CALLER, REG_METHODPTR, s1);
+ M_JMP(REG_RA_CALLER, REG_PV_CALLER, REG_ZERO);
+ M_NOP;
+/* XXX no need to restore PV, when its in the regs */
+ break;
+
+ case ICMD_INVOKEINTERFACE:
+ gen_nullptr_check(rd->argintregs[0]);
+
+ if (lm == NULL) {
+ unresolved_method *um = INSTRUCTION_UNRESOLVED_METHOD(iptr);
+
+ codegen_addpatchref(cd, PATCHER_invokeinterface, um, 0);
+
+ if (opt_showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ s1 = 0;
+ s2 = 0;
+ d = um->methodref->parseddesc.md->returntype.type;
+
+ } else {
+ s1 = OFFSET(vftbl_t, interfacetable[0]) -
+ sizeof(methodptr*) * lm->class->index;
+
+ s2 = sizeof(methodptr) * (lm - lm->class->methods);
+
+ d = lm->parseddesc->returntype.type;
+ }
+
+ M_ALD(REG_METHODPTR, rd->argintregs[0],
+ OFFSET(java_objectheader, vftbl));
+ M_ALD(REG_METHODPTR, REG_METHODPTR, s1);
+ M_ALD(REG_PV_CALLER, REG_METHODPTR, s2);
+ M_JMP(REG_RA_CALLER, REG_PV_CALLER, REG_ZERO);
+ M_NOP;
+/* XXX no need to restore PV, when its in the regs */
+ break;
+ }
+
+ /* d contains return type */
+
+ if (d != TYPE_VOID) {
+ if (IS_INT_LNG_TYPE(iptr->dst->type)) {
+ s1 = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_RESULT_CALLER);
+ M_INTMOVE(REG_RESULT_CALLER, s1);
+ } else {
+ s1 = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FRESULT);
+ if (IS_2_WORD_TYPE(iptr->dst->type)) {
+ M_DBLMOVE(REG_FRESULT, s1);
+ } else {
+ M_FLTMOVE(REG_FRESULT, s1);
+ }
+ }
+ emit_store(jd, iptr, iptr->dst, s1);
+ }
+ break;
+
+
+ case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
+ /* op1: 0 == array, 1 == class */
+ /* val.a: (classinfo*) superclass */
+
+ /* superclass is an interface:
+ *
+ * OK if ((sub == NULL) ||
+ * (sub->vftbl->interfacetablelength > super->index) &&
+ * (sub->vftbl->interfacetable[-super->index] != NULL));
+ *
+ * superclass is a class:
+ *
+ * OK if ((sub == NULL) || (0
+ * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
+ * super->vftbl->diffvall));
+ */
+
+ if (iptr->op1 == 1) {
+ classinfo *super;
+ vftbl_t *supervftbl;
+ s4 superindex;
+
+ super = (classinfo *) iptr->val.a;
+
+ if (super == NULL) {
+ superindex = 0;
+ supervftbl = NULL;
+ }
+ else {
+ superindex = super->index;
+ supervftbl = super->vftbl;
+ }
+
+#if defined(ENABLE_THREADS)
+ codegen_threadcritrestart(cd, cd->mcodeptr - cd->mcodebase);
+#endif
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+
+ /* calculate interface checkcast code size */
+
+ s2 = 8;
+ if (super == NULL)
+ s2 += (opt_showdisassemble ? 2 : 0);
+
+ /* calculate class checkcast code size */
+
+ s3 = 10 /* 10 + (s1 == REG_ITMP1) */;
+ if (super == NULL)
+ s3 += (opt_showdisassemble ? 2 : 0);
+
+ /* if class is not resolved, check which code to call */
+
+ if (super == NULL) {
+ M_BEQZ(s1, 5 + (opt_showdisassemble ? 2 : 0) + s2 + 2 + s3);
+ M_NOP;
+
+ disp = dseg_add_unique_s4(cd, 0); /* super->flags */
+
+ codegen_addpatchref(cd, PATCHER_checkcast_instanceof_flags,
+ (constant_classref *) iptr->target,
+ disp);
+
+ if (opt_showdisassemble) {
+ M_NOP; M_NOP;
+ }
+
+ M_ILD(REG_ITMP2, REG_PV, disp);
+ M_AND_IMM(REG_ITMP2, ACC_INTERFACE, REG_ITMP2);
+ M_BEQZ(REG_ITMP2, 1 + s2 + 2);
+ M_NOP;
+ }
+
+ /* interface checkcast code */
+
+ if ((super == NULL) || (super->flags & ACC_INTERFACE)) {
+ if (super == NULL) {
+ codegen_addpatchref(cd,
+ PATCHER_checkcast_instanceof_interface,
+ (constant_classref *) iptr->target,
+ 0);
+
+ if (opt_showdisassemble) {
+ M_NOP; M_NOP;
+ }
+ }
+ else {
+ M_BEQZ(s1, 1 + s2);
+ M_NOP;
+ }
+
+ M_ALD(REG_ITMP2, s1, OFFSET(java_objectheader, vftbl));
+ M_ILD(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, interfacetablelength));
+ M_LDA(REG_ITMP3, REG_ITMP3, -superindex);
+ M_BLEZ(REG_ITMP3, 0);
+ codegen_add_classcastexception_ref(cd, s1);
+ M_NOP;
+ M_ALD(REG_ITMP3, REG_ITMP2,
+ (s4) (OFFSET(vftbl_t, interfacetable[0]) -
+ superindex * sizeof(methodptr*)));
+ M_BEQZ(REG_ITMP3, 0);
+ codegen_add_classcastexception_ref(cd, s1);
+ M_NOP;
+
+ if (super == NULL) {
+ M_BR(1 + s3);
+ M_NOP;
+ }
+ }
+
+ /* class checkcast code */
+
+ if ((super == NULL) || !(super->flags & ACC_INTERFACE)) {
+ if (super == NULL) {
+ disp = dseg_add_unique_address(cd, NULL);
+
+ codegen_addpatchref(cd,
+ PATCHER_checkcast_instanceof_class,
+ (constant_classref *) iptr->target,
+ disp);
+
+ if (opt_showdisassemble) {
+ M_NOP; M_NOP;
+ }
+ }
+ else {
+ disp = dseg_add_address(cd, supervftbl);
+
+ M_BEQZ(s1, 1 + s3);
+ M_NOP;
+ }
+
+ M_ALD(REG_ITMP2, s1, OFFSET(java_objectheader, vftbl));
+ M_ALD(REG_ITMP3, REG_PV, disp);
+#if defined(ENABLE_THREADS)
+ codegen_threadcritstart(cd, cd->mcodeptr - cd->mcodebase);
+#endif
+ M_ILD(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, baseval));
+ /* if (s1 != REG_ITMP1) { */
+ /* M_ILD(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, baseval)); */
+ /* M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, diffval)); */
+ /* #if defined(ENABLE_THREADS) */
+ /* codegen_threadcritstop(cd, (u1 *) mcodeptr - cd->mcodebase); */
+ /* #endif */
+ /* M_ISUB(REG_ITMP2, REG_ITMP1, REG_ITMP2); */
+
+ /* } else { */
+ M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, baseval));
+ M_SUB(REG_ITMP2, REG_ITMP3, REG_ITMP2);
+ M_ALD(REG_ITMP3, REG_PV, disp);
+ M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, diffval));
+#if defined(ENABLE_THREADS)
+ codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
+#endif
+ /* } */
+ M_CMP(REG_ITMP3, REG_ITMP2);
+ M_BULT(0); /* branch if ITMP3 < ITMP2 */
+ codegen_add_classcastexception_ref(cd, s1);
+ M_NOP;
+ }
+
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, s1);
+ }
+ else {
+ /* array type cast-check */
+
+ s1 = emit_load_s1(jd, iptr, src, rd->argintregs[0]);
+ M_INTMOVE(s1, rd->argintregs[0]);
+
+ disp = dseg_addaddress(cd, iptr->val.a);
+
+ if (iptr->val.a == NULL) {
+ codegen_addpatchref(cd, PATCHER_builtin_arraycheckcast,
+ (constant_classref *) iptr->target,
+ disp);
+
+ if (opt_showdisassemble) {
+ M_NOP; M_NOP;
+ }
+ }
+
+ M_ALD(rd->argintregs[1], REG_PV, disp);
+ disp = dseg_addaddress(cd, BUILTIN_arraycheckcast);
+ M_ALD(REG_ITMP3, REG_PV, disp);
+ M_JMP(REG_RA_CALLER, REG_ITMP3, REG_ZERO);
+ M_NOP;
+
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_BEQZ(REG_RESULT_CALLER, 0);
+ codegen_add_classcastexception_ref(cd, s1);
+ M_NOP;
+
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, s1);
+ }
+
+ M_INTMOVE(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
+ break;
+
+
default:
*exceptionptr = new_internalerror("Unknown ICMD %d", iptr->opc);
} /* for instruction */
-
/* copy values to interface registers */
src = bptr->outstack;
if ((src->varkind != STACKVAR)) {
s2 = src->type;
if (IS_FLT_DBL_TYPE(s2)) {
- var_to_reg_flt(s1, src, REG_FTMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
M_FLTMOVE(s1,rd->interfaces[len][s2].regoff);
}
}
}
else {
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
M_INTMOVE(s1,rd->interfaces[len][s2].regoff);
}
} /* if (bptr -> flags >= BBREACHED) */
} /* for basic block */
+ dseg_createlinenumbertable(cd);
+
+ /* generate stubs */
+
+ emit_exception_stubs(jd);
+ emit_patcher_stubs(jd);
+ emit_replacement_stubs(jd);
+
+ codegen_finish(jd);
-
+ /* everything's ok */
+
+ return true;
}
*******************************************************************************/
-#define COMPILERSTUB_DATASIZE 2 * SIZEOF_VOID_P
-#define COMPILERSTUB_CODESIZE 3 * 4
+#define COMPILERSTUB_DATASIZE 3 * SIZEOF_VOID_P
+#define COMPILERSTUB_CODESIZE 4 * 4
#define COMPILERSTUB_SIZE COMPILERSTUB_DATASIZE + COMPILERSTUB_CODESIZE
u1 *createcompilerstub(methodinfo *m)
{
u1 *s; /* memory to hold the stub */
- ptrint *d;
- s4 *mcodeptr; /* code generation pointer */
-
+ ptrint *d;
+ codeinfo *code;
+ codegendata *cd;
+ s4 dumpsize;
+
s = CNEW(u1, COMPILERSTUB_SIZE);
/* set data pointer and code pointer */
d = (ptrint *) s;
s = s + COMPILERSTUB_DATASIZE;
- mcodeptr = (s4 *) s;
+ /* mark start of dump memory area */
+
+ dumpsize = dump_size();
+
+ cd = DNEW(codegendata);
+ cd->mcodeptr = s;
- /* Store the methodinfo* in the same place as in the methodheader
- for compiled methods. */
+ /* Store the codeinfo pointer in the same place as in the
+ methodheader for compiled methods. */
+
+ code = code_codeinfo_new(m);
d[0] = (ptrint) asm_call_jit_compiler;
d[1] = (ptrint) m;
+ d[2] = (ptrint) code;
/* code for the stub */
-
- M_LDX(REG_ITMP1, REG_PV_CALLER, -1 * 8); /* load methodinfo pointer */
- /* XXX CALLER PV ??? */
- M_LDX(REG_PV_CALLER ,REG_PV_CALLER, -2 * 8); /* load pointer to the compiler */
+ /* no window save yet, user caller's PV */
+ M_ALD_INTERN(REG_ITMP1, REG_PV_CALLER, -2 * SIZEOF_VOID_P); /* codeinfo pointer */
+ M_ALD_INTERN(REG_PV_CALLER, REG_PV_CALLER, -3 * SIZEOF_VOID_P); /* pointer to compiler */
M_JMP(REG_ZERO, REG_PV_CALLER, REG_ZERO); /* jump to the compiler, RA is wasted */
+ M_NOP;
#if defined(ENABLE_STATISTICS)
if (opt_stat)
count_cstub_len += COMPILERSTUB_SIZE;
#endif
+ /* release dump area */
+
+ dump_release(dumpsize);
+
return s;
}
u1 *createnativestub(functionptr f, jitdata *jd, methoddesc *nmd)
{
- fabort("help me!");
+ /* fabort("help me!"); */
+ printf("createnativestub not implemented\n");
return NULL;
}
#define ALIGNCODENOP \
- if ((s4) ((ptrint) mcodeptr & 7)) { \
+ if ((s4) ((ptrint) cd->mcodeptr & 7)) { \
M_NOP; \
}
#define M_COPY(s,d) emit_copy(jd, iptr, (s), (d))
+#define ICONST(d,c) emit_iconst(cd, (d), (c))
+#define LCONST(d,c) emit_lconst(cd, (d), (c))
* */
#define M_OP3(op,op3,rd,rs1,rs2,imm) \
- *(mcodeptr++) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | (imm?((rs2)&0x1fff):(rs2)) )
+ *((u4 *) cd->mcodeptr++) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | (imm?((rs2)&0x1fff):(rs2)) )
/* 3-address-operations: M_OP3C
* rcond ... condition opcode
*/
#define M_OP3C(op,op3,rcond,rd,rs1,rs2,imm) \
- *(mcodeptr++) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | \
+ *((u4 *) cd->mcodeptr++) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | \
((rcond) << 10) | (imm?((rs2)&0x3ff):(rs2)) )
* x ...... 0 => 32, 1 => 64 bit shift
*/
#define M_SHFT(op,op3,rs1,rs2,rd,imm,x) \
- *(mcodeptr++) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((rs1) << 14) | ((rs2) << 0) | \
+ *((u4 *) cd->mcodeptr++) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((rs1) << 14) | ((rs2) << 0) | \
((imm) << 13) | ((x) << 12) )
/* Format 4
*/
#define M_FMT4(op,op3,rd,rs2,cond,cc2,cc1,cc0,imm) \
- *(mcodeptr++) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((cc2) << 18) | ((cond) << 14) | \
+ *((u4 *) cd->mcodeptr++) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((cc2) << 18) | ((cond) << 14) | \
((imm) << 13) | ((cc1) << 12) | ((cc0) << 11) | ((rs2) << 0) )
/* 3-address-floating-point-operation
op .... opcode
op3,opf .... function-number
- XXX
+ rd .... dest reg
+ rs2 ... source reg
+
+ !!! 6-bit to 5-bit conversion done here !!!
*/
#define M_FOP3(op,op3,opf,rd,rs1,rs2) \
- *(mcodeptr++) = ( (((s4)(op))<<30) | ((rd)<<25) | ((op3)<<19) | ((rs1) << 14) | ((opf)<<5) | (rs2) )
+ *((u4 *) cd->mcodeptr++) = ( (((s4)(op))<<30) | ((rd*2)<<25) | ((op3)<<19) | ((rs1*2) << 14) | ((opf)<<5) | (rs2*2) )
/**** format 2 operations ********/
anul .... annullment bit
*/
#define M_BRAREG(op,rcond,rs1,disp16,p,anul) \
- *(mcodeptr++) = ( (((s4)(op))<<30) | ((anul)<<29) | (0<<28) | ((rcond)<<25) | (3<<22) | \
+ *((u4 *) cd->mcodeptr++) = ( (((s4)(op))<<30) | ((anul)<<29) | (0<<28) | ((rcond)<<25) | (3<<22) | \
( ((disp16)& 0xC000) << 6 ) | (p << 19) | ((rs1) << 14) | ((disp16)&0x3fff) )
/* branch on integer reg instruction
anul .... annullment bit
*/
#define M_BRACC(op,op2,cond,disp19,ccx,p,anul) \
- *(mcodeptr++) = ( (((s4)(op))<<30) | ((anul)<<29) | ((cond)<<25) | (op2<<22) | (ccx<<20) | \
+ *((u4 *) cd->mcodeptr++) = ( (((s4)(op))<<30) | ((anul)<<29) | ((cond)<<25) | (op2<<22) | (ccx<<20) | \
(p << 19 ) | (disp19) )
/************** end-user instructions (try to follow asm style) ***************/
#define M_SETHI(imm22, rd) \
- *(mcodeptr++) = ((((s4)(0x00)) << 30) | ((rd) << 25) | ((0x04)<<22) | ((imm22)&0x3FFFFF) )
+ *((u4 *) cd->mcodeptr++) = ((((s4)(0x00)) << 30) | ((rd) << 25) | ((0x04)<<22) | ((imm22)&0x3FFFFF) )
#define M_NOP (M_SETHI(0,0)) /* nop */
/**** load/store operations ********/
+#define M_LDA(rd,rs,disp) \
+ do { \
+ s4 lo = (short) (disp); \
+ s4 hi = (short) (((disp) - lo) >> 13); \
+ if (hi == 0) { \
+ M_AADD_IMM(rs,lo,rd); \
+ } else { \
+ M_SETHI(hi&0x3ffff8,rd); \
+ M_AADD_IMM(rd,lo,rd); \
+ M_AADD(rd,rs,rd); \
+ } \
+ } while (0)
+
#define M_SLDU(rd,rs,disp) M_OP3(0x03,0x02,rd,rs,disp,IMM) /* 16-bit load, uns*/
#define M_SLDS(rd,rs,disp) M_OP3(0x03,0x0a,rd,rs,disp,IMM) /* 16-bit load, sig*/
#define M_BLDS(rd,rs,disp) M_OP3(0x03,0x09,rd,rs,disp,IMM) /* 8-bit load, sig */
/* branch on (64-bit) integer condition codes */
#define M_XBEQ(disp) M_BRACC(0x00,0x1,0x1,disp,2,1,0) /* branch a==b */
-#define M_XBNEQ(disp) M_BRACC(0x00,0x1,0x9,disp,2,1,0) /* branch a!=b */
+#define M_XBNE(disp) M_BRACC(0x00,0x1,0x9,disp,2,1,0) /* branch a!=b */
#define M_XBGT(disp) M_BRACC(0x00,0x1,0xa,disp,2,1,0) /* branch a>b */
#define M_XBLT(disp) M_BRACC(0x00,0x1,0x3,disp,2,1,0) /* branch a<b */
#define M_XBGE(disp) M_BRACC(0x00,0x1,0xb,disp,2,1,0) /* branch a>=b */
#define M_XBLE(disp) M_BRACC(0x00,0x1,0x2,disp,2,1,0) /* branch a<=b */
#define M_XBUGE(disp) M_BRACC(0x00,0x1,0xd,disp,2,1,0) /* br uns a>=b */
+#define M_XBULT(disp) M_BRACC(0x00,0x1,0x5,disp,2,1,0) /* br uns a<b */
/* branch on (32-bit) integer condition codes */
#define M_BR(disp) M_BRACC(0x00,0x1,0x8,disp,0,1,0) /* branch */
#define M_BEQ(disp) M_BRACC(0x00,0x1,0x1,disp,0,1,0) /* branch a==b */
-#define M_BNEQ(disp) M_BRACC(0x00,0x1,0x9,disp,0,1,0) /* branch a!=b */
+#define M_BNE(disp) M_BRACC(0x00,0x1,0x9,disp,0,1,0) /* branch a!=b */
#define M_BGT(disp) M_BRACC(0x00,0x1,0xa,disp,0,1,0) /* branch a>b */
#define M_BLT(disp) M_BRACC(0x00,0x1,0x3,disp,0,1,0) /* branch a<b */
#define M_BGE(disp) M_BRACC(0x00,0x1,0xb,disp,0,1,0) /* branch a>=b */
#define M_BLE(disp) M_BRACC(0x00,0x1,0x2,disp,0,1,0) /* branch a<=b */
+#define M_BULE(disp) M_BRACC(0x00,0x1,0x4,disp,0,1,0) /* br uns a<=b */
+#define M_BULT(disp) M_BRACC(0x00,0x1,0x5,disp,0,1,0) /* br uns a<b */
-
+#define M_SAVE(rs1,rs2,rd) M_OP3(0x02,0x36,rd,rs1,rs2,IMM)
+#define M_REST(rs1,rs2,rd) M_OP3(0x02,0x37,rd,rs1,rs2,IMM)
#define M_JMP_IMM(rd,rs1,rs2) M_OP3(0x02,0x38,rd, rs1,rs2,IMM)
#define M_RET(rs) M_OP3(0x02,0x38,REG_ZERO,rs,REG_ZERO,REG)
-#define M_RETURN(rs) M_OP3(0x02,0x39,0,rs,REG_ZERO,REG) /* like ret, does window restore */
+#define M_RETURN(rs) M_OP3(0x02,0x39,0,rs,REG_ZERO,REG) /* like ret, but does window restore */
/**** floating point operations **/
#define M_DMOV(rs,rd) M_FOP3(0x02,0x34,0x02,rd,0,rs) /* rd = rs */
-#define M_FMOV(rs,rd) M_FOP3(0x02,0x34,0x01,rd*2,0,rs*2) /* rd = rs */
+#define M_FMOV(rs,rd) M_FOP3(0x02,0x34,0x01,rd,0,rs) /* rd = rs */
#define M_FNEG(rs,rd) M_FOP3(0x02,0x34,0x05,rd,0,rs) /* rd = -rs */
#define M_DNEG(rs,rd) M_FOP3(0x02,0x34,0x06,rd,0,rs) /* rd = -rs */
/**** compare and conditional FPU operations ***********/
/* rd field 0 ==> fcc target unit is fcc0 */
-#define M_FCMP(rs1,rs2) M_FOP3(0x02,0x35,0x051,0,rs1*2,rs2*2) /* set fcc flt */
+#define M_FCMP(rs1,rs2) M_FOP3(0x02,0x35,0x051,0,rs1,rs2) /* set fcc flt */
#define M_DCMP(rs1,rs2) M_FOP3(0x02,0x35,0x052,0,rs1,rs2) /* set fcc dbl */
/* conversion functions */
-#define M_CVTIF(rs,rd) M_FOP3(0x02,0x34,0x0c4,rd*2,0,rs*2)/* int2flt */
-#define M_CVTID(rs,rd) M_FOP3(0x02,0x34,0x0c8,rd,0,rs*2) /* int2dbl */
-#define M_CVTLF(rs,rd) M_FOP3(0x02,0x34,0x084,rd*2,0,rs) /* long2flt */
+#define M_CVTIF(rs,rd) M_FOP3(0x02,0x34,0x0c4,rd,0,rs)/* int2flt */
+#define M_CVTID(rs,rd) M_FOP3(0x02,0x34,0x0c8,rd,0,rs) /* int2dbl */
+#define M_CVTLF(rs,rd) M_FOP3(0x02,0x34,0x084,rd,0,rs) /* long2flt */
#define M_CVTLD(rs,rd) M_FOP3(0x02,0x34,0x088,rd,0,rs) /* long2dbl */
-#define M_CVTFI(rs,rd) M_FOP3(0x02,0x34,0x0d1,rd*2,0,rs*2) /* flt2int */
-#define M_CVTDI(rs,rd) M_FOP3(0x02,0x34,0x0d2,rd*2,0,rs) /* dbl2int */
-#define M_CVTFL(rs,rd) M_FOP3(0x02,0x34,0x081,rd,0,rs*2) /* flt2long */
+#define M_CVTFI(rs,rd) M_FOP3(0x02,0x34,0x0d1,rd,0,rs) /* flt2int */
+#define M_CVTDI(rs,rd) M_FOP3(0x02,0x34,0x0d2,rd,0,rs) /* dbl2int */
+#define M_CVTFL(rs,rd) M_FOP3(0x02,0x34,0x081,rd,0,rs) /* flt2long */
#define M_CVTDL(rs,rd) M_FOP3(0x02,0x34,0x082,rd,0,rs) /* dbl2long */
-#define M_CVTFD(rs,rd) M_FOP3(0x02,0x34,0x0c9,rd,0,rs*2) /* flt2dbl */
-#define M_CVTDF(rs,rd) M_FOP3(0x02,0x34,0x0c6,rd*2,0,rs) /* dbl2float */
+#define M_CVTFD(rs,rd) M_FOP3(0x02,0x34,0x0c9,rd,0,rs) /* flt2dbl */
+#define M_CVTDF(rs,rd) M_FOP3(0x02,0x34,0x0c6,rd,0,rs) /* dbl2float */
+
+/* a 6-bit double register index has to be converted into the 5-bit representation
+ * (%d1 -> %f2, %d2 -> %f4, ie. shift left once )
+ * don't have to pack the MSB, since we are not using the upper 16 doubles
+ *
+ * since single precision floats reside in the lower register of a double pair their
+ * register numbers need to be handled in the same way
+ */
-/* translate logical double register index to float index. (e.g. %d1 -> %f2, %d2 -> %f4, etc.) */
-/* we don't have to pack the 6-bit register number, since we are not using the upper 16 doubles */
-/* floats reside in lower register of a double pair, use same translation as above */
+/* M_OP3 will not do the floar register number conversion */
#define M_DLD_INTERN(rd,rs1,disp) M_OP3(0x03,0x23,rd*2,rs1,disp,IMM) /* double (64-bit) load */
#define M_DLD(rd,rs,disp) \
do { \
} while (0)
-#define M_DST_INTERN(rd,rs1,disp) M_OP3(0x03,0x27,rd,rs1,disp,IMM) /* double (64-bit) store */
+#define M_DST_INTERN(rd,rs1,disp) M_OP3(0x03,0x27,rd*2,rs1,disp,IMM) /* double (64-bit) store */
#define M_DST(rd,rs,disp) \
do { \
s4 lo = (short) (disp); \
#include "md-abi.h"
#include "vm/jit/jit.h"
+#include "vm/jit/dseg.h"
+#include "vm/jit/emit.h"
#include "vm/jit/sparc64/codegen.h"
-#include "vm/jit/sparc64/emit.h"
/* code generation functions **************************************************/
COUNT_SPILLS;
if (IS_FLT_DBL_TYPE(src->type))
- M_LDDF(REG_SP, src->regoff * 8, tempreg);
+ M_DLD(tempreg, REG_SP, src->regoff * 8);
else
- M_LDX(REG_SP, src->regoff * 8, tempreg);
+ M_LDX(tempreg, REG_SP, src->regoff * 8);
reg = tempreg;
} else
COUNT_SPILLS;
if (IS_FLT_DBL_TYPE(src->type))
- M_LDDF(REG_SP, src->regoff * 8, tempreg);
+ M_DLD(tempreg, REG_SP, src->regoff * 8);
else
- M_LDX(REG_SP, src->regoff * 8, tempreg);
+ M_LDX(tempreg, REG_SP, src->regoff * 8);
reg = tempreg;
} else
COUNT_SPILLS;
if (IS_FLT_DBL_TYPE(src->type))
- M_LDDF(REG_SP, src->regoff * 8, tempreg);
+ M_DLD(tempreg, REG_SP, src->regoff * 8);
else
- M_LDX(REG_SP, src->regoff * 8, tempreg);
+ M_LDX(tempreg, REG_SP, src->regoff * 8);
reg = tempreg;
} else
COUNT_SPILLS;
if (IS_FLT_DBL_TYPE(dst->type))
- M_STDF(d, REG_SP, dst->regoff * 8);
+ M_DST(d, REG_SP, dst->regoff * 8);
else
M_STX(d, REG_SP, dst->regoff * 8);
}
s4 disp;
if ((value >= -4096) && (value <= 4095)) {
- M_IOR(REG_ZERO, value, d, IMM);
+ M_XOR(REG_ZERO, value, d);
} else {
disp = dseg_adds4(cd, value);
- M_ILD(d, REG_PV, disp);
+ M_ILD(d, REG_PV_CALLEE, disp);
}
}
+void emit_lconst(codegendata *cd, s4 d, s8 value)
+{
+ s4 disp;
+
+ if ((value >= -4096) && (value <= 4095)) {
+ M_XOR(REG_ZERO, value, d);
+ } else {
+ disp = dseg_adds8(cd, value);
+ M_LDX(d, REG_PV_CALLEE, disp);
+ }
+}
+
+/* emit_exception_stubs ********************************************************
+
+ Generates the code for the exception stubs.
+
+*******************************************************************************/
+
+void emit_exception_stubs(jitdata *jd)
+{
+}
+
+/* emit_patcher_stubs **********************************************************
+
+ Generates the code for the patcher stubs.
+
+*******************************************************************************/
+
+void emit_patcher_stubs(jitdata *jd)
+{
+}
+
+/* emit_replacement_stubs ******************************************************
+
+ Generates the code for the replacement stubs.
+
+*******************************************************************************/
+
+void emit_replacement_stubs(jitdata *jd)
+{
+}
+
/*
* These are local overrides for various environment variables in Emacs.
/* zero itmp1/g1 itmp2/g2 itmp3/g3 temp/g4 temp/g5 sys/g6 sys/g7 */
REG_RES, REG_RES, REG_RES, REG_RES, REG_TMP, REG_TMP, REG_RES, REG_RES,
- /* o0 o1 o2 o3 o4 o5 sp/o6 o7 */
- REG_TMP, REG_TMP, REG_TMP, REG_TMP, REG_TMP, REG_TMP, REG_RES, REG_TMP,
+ /* o0 o1 o2 o3 o4 pv/o5 sp/o6 o7/ra */
+ REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_RES, REG_RES, REG_RES,
/* l0 l1 l2 l3 l4 l5 l6 l7 */
REG_SAV, REG_SAV, REG_SAV, REG_SAV, REG_SAV, REG_SAV, REG_SAV, REG_SAV,
*******************************************************************************/
-void md_return_alloc(methodinfo *m, registerdata *rd, s4 return_type,
- stackptr stackslot)
+void md_return_alloc(jitdata *jd, stackptr stackslot)
{
- /* Only precolor the stackslot, if it is not used for parameter precoloring AND */
- /* it is not a SAVEDVAR <-> has not to survive method invokations */
+ methodinfo *m;
+ methoddesc *md;
- if (!m->isleafmethod || (m->parseddesc->paramcount == 0)) {
+ /* get required compiler data */
- if (!(stackslot->flags & SAVEDVAR)) {
- stackslot->varkind = ARGVAR;
- stackslot->varnum = -1;
- stackslot->flags = 0;
+ m = jd->m;
- if (IS_INT_LNG_TYPE(return_type)) {
- stackslot->regoff = REG_RESULT_CALLEE;
- } else { /* float/double */
- stackslot->regoff = REG_FRESULT;
- }
+ md = m->parseddesc;
+
+ /* Only precolor the stackslot, if it is not a SAVEDVAR <-> has
+ not to survive method invokations. */
+
+
+ if (!(stackslot->flags & SAVEDVAR)) {
+ stackslot->varkind = ARGVAR;
+ stackslot->varnum = -1;
+ stackslot->flags = 0;
+
+ if (IS_INT_LNG_TYPE(md->returntype.type)) {
+ stackslot->regoff = REG_RESULT_CALLEE;
+ } else { /* float/double */
+ stackslot->regoff = REG_FRESULT;
}
- }
+ }
}
#define REG_RESULT_CALLEE 24 /* to deliver method results */
#define REG_RESULT_CALLER 8 /* to read method results */
-#define REG_RA_CALLEE 26 /* callee reads return address here */
+#define REG_RA_CALLEE 31 /* callee reads return address here */
#define REG_RA_CALLER 15 /* caller puts address of call instr here */
#define REG_PV_CALLEE 29 /* procedure vector, as found by callee */
#define REG_PV_CALLER 13 /* caller provides PV here */
-#define REG_METHODPTR 4 /* pointer to the place from where the procedure */
+#define REG_METHODPTR 2 /* pointer to the place from where the procedure */
/* vector has been fetched */
#define INT_REG_CNT 32 /* number of integer registers */
-#define INT_SAV_CNT 16 /* number of int callee saved registers */
+#define INT_SAV_CNT 12 /* number of int callee saved registers */
#define INT_ARG_CNT 5 /* number of int argument registers (-1 for PV) */
-#define INT_TMP_CNT 5 /* int temp registers (%g1-%g5) */
-#define INT_RES_CNT 6 /* number of reserved integer registers */
+#define INT_TMP_CNT 2 /* int temp registers (%g4-%g5) */
+#define INT_RES_CNT 12 /* number of reserved integer registers */
/* pv, zero, %g6, %g7, sp, ra */
#define FLT_REG_CNT 16 /* number of float registers */
/* #define TRACE_ARGS_NUM 5 */
+
+#define WINSAVE_REGS 16 /* number of regs that SPARC saves onto stack */
+
#endif /* _MD_ABI_H */
#define itmp2 %g2
#define itmp3 %g3
+#define mptr_itmp2 itmp2
+
+#define xptr_itmp2 itmp2
+#define xpc_itmp3 itmp3
+
+#define ra_caller %o7
+#define ra_callee %i7
+
+#define pv_caller %o5
+#define pv_callee %i5
#endif /* _MD_ASM_H */
+#include <assert.h>
+
#include "config.h"
#include "vm/types.h"
#include "vm/jit/asmpart.h"
#include "vm/jit/stacktrace.h"
+#if !defined(NDEBUG) && defined(ENABLE_DISASSEMBLER)
+#include "vm/options.h" /* XXX debug */
+#include "vm/jit/disass.h" /* XXX debug */
+#endif
+
/* md_init *********************************************************************
u1 *md_stacktrace_get_returnaddress(u1 *sp, u4 framesize)
{
- /* where's it gonna be ? */
+ u1 *ra;
+ /* flush register windows to the stack */
+ __asm__ ("flushw");
+
+ /* the return address resides in register i7, the last register in the
+ * 16-extended-word save area
+ */
+ ra = *((u1 **) (sp + 120));
+
+ /* ra is the address of the call instr, advance to the real return address */
+ ra += 8;
- return 0;
+ return ra;
}
return pv;
}
+/* md_get_method_patch_address *************************************************
+
+ Gets the patch address of the currently compiled method. The offset
+ is extracted from the load instruction(s) before the jump and added
+ to the right base address (PV or REG_METHODPTR).
+
+ INVOKESTATIC/SPECIAL:
+
+ dfdeffb8 ld s8,-72(s8)
+ 03c0f809 jalr s8
+ 00000000 nop
+
+ INVOKEVIRTUAL:
+
+ dc990000 ld t9,0(a0)
+ df3e0000 ld s8,0(t9)
+ 03c0f809 jalr s8
+ 00000000 nop
+
+ INVOKEINTERFACE:
+
+ dc990000 ld t9,0(a0)
+ df39ff90 ld t9,-112(t9)
+ df3e0018 ld s8,24(t9)
+ 03c0f809 jalr s8
+ 00000000 nop
+
+*******************************************************************************/
+
+u1 *md_get_method_patch_address(u1 *ra, stackframeinfo *sfi, u1 *mptr)
+{
+ u4 mcode;
+ s4 offset;
+ u1 *pa;
+
+ /* go back to the actual load instruction (3 instructions on MIPS) */
+
+ ra -= 3 * 4;
+
+ /* get first instruction word on current PC */
+
+ mcode = *((u4 *) ra);
+
+ /* check if we have 2 instructions (lui) */
+
+ if ((mcode >> 16) == 0x3c19) {
+ /* XXX write a regression for this */
+ assert(0);
+
+ /* get displacement of first instruction (lui) */
+
+ offset = (s4) (mcode << 16);
+
+ /* get displacement of second instruction (daddiu) */
+
+ mcode = *((u4 *) (ra + 1 * 4));
+
+ assert((mcode >> 16) != 0x6739);
+
+ offset += (s2) (mcode & 0x0000ffff);
+
+ } else {
+ /* get first instruction (ld) */
+
+ mcode = *((u4 *) ra);
+
+ /* get the offset from the instruction */
+
+ offset = (s2) (mcode & 0x0000ffff);
+
+ /* check for call with REG_METHODPTR: ld s8,x(t9) */
+
+#if SIZEOF_VOID_P == 8
+ if ((mcode >> 16) == 0xdf3e) {
+#else
+ if ((mcode >> 16) == 0x8f3e) {
+#endif
+ /* in this case we use the passed method pointer */
+
+ pa = mptr + offset;
+
+ } else {
+ /* in the normal case we check for a `ld s8,x(s8)' instruction */
+
+#if SIZEOF_VOID_P == 8
+ assert((mcode >> 16) == 0xdfde);
+#else
+ assert((mcode >> 16) == 0x8fde);
+#endif
+
+ /* and get the final data segment address */
+
+ pa = sfi->pv + offset;
+ }
+ }
+
+ return pa;
+}
+
/* md_cacheflush ***************************************************************
--- /dev/null
+/* src/vm/jit/mips/patcher.c - MIPS code patching functions
+
+ Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+ C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
+ E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
+ J. Wenninger, Institut f. Computersprachen - TU Wien
+
+ This file is part of CACAO.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Contact: cacao@cacaojvm.org
+
+ Authors: Christian Thalinger
+
+ Changes:
+
+ $Id: patcher.c 5164 2006-07-19 15:54:01Z twisti $
+
+*/
+
+
+#include "config.h"
+
+#include <assert.h>
+
+#include "vm/types.h"
+
+#include "mm/memory.h"
+#include "native/native.h"
+#include "vm/builtin.h"
+#include "vm/class.h"
+#include "vm/exceptions.h"
+#include "vm/field.h"
+#include "vm/initialize.h"
+#include "vm/options.h"
+#include "vm/resolve.h"
+#include "vm/references.h"
+#include "vm/jit/asmpart.h"
+#include "vm/jit/patcher.h"
+
+
+/* patcher_wrapper *************************************************************
+
+ Wrapper for all patchers. It also creates the stackframe info
+ structure.
+
+ If the return value of the patcher function is false, it gets the
+ exception object, clears the exception pointer and returns the
+ exception.
+
+*******************************************************************************/
+
+java_objectheader *patcher_wrapper(u1 *sp, u1 *pv, u1 *ra)
+{
+ stackframeinfo sfi;
+ u1 *xpc;
+ java_objectheader *o;
+ functionptr f;
+ bool result;
+ java_objectheader *e;
+
+ /* define the patcher function */
+
+ bool (*patcher_function)(u1 *);
+
+ assert(pv != NULL);
+
+ /* get stuff from the stack */
+
+ xpc = (u1 *) *((ptrint *) (sp + 5 * 8));
+ o = (java_objectheader *) *((ptrint *) (sp + 4 * 8));
+ f = (functionptr) *((ptrint *) (sp + 0 * 8));
+
+ /* store PV into the patcher function position */
+
+ *((ptrint *) (sp + 0 * 8)) = (ptrint) pv;
+
+ /* cast the passed function to a patcher function */
+
+ patcher_function = (bool (*)(u1 *)) (ptrint) f;
+
+ /* enter a monitor on the patching position */
+
+ PATCHER_MONITORENTER;
+
+ /* create the stackframeinfo */
+
+ stacktrace_create_extern_stackframeinfo(&sfi, pv, sp + 6 * 8, ra, xpc);
+
+ /* call the proper patcher function */
+
+ result = (patcher_function)(sp);
+
+ /* remove the stackframeinfo */
+
+ stacktrace_remove_stackframeinfo(&sfi);
+
+ /* check for return value and exit accordingly */
+
+ if (result == false) {
+ e = exceptions_get_and_clear_exception();
+
+ PATCHER_MONITOREXIT;
+
+ return e;
+ }
+
+ PATCHER_MARK_PATCHED_MONITOREXIT;
+
+ return NULL;
+}
+
+
+/* patcher_get_putstatic *******************************************************
+
+ Machine code:
+
+ <patched call position>
+ dfc1ffb8 ld at,-72(s8)
+ fc250000 sd a1,0(at)
+
+*******************************************************************************/
+
+bool patcher_get_putstatic(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ unresolved_field *uf;
+ s4 disp;
+ u1 *pv;
+ fieldinfo *fi;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ uf = (unresolved_field *) *((ptrint *) (sp + 2 * 8));
+ disp = *((s4 *) (sp + 1 * 8));
+ pv = (u1 *) *((ptrint *) (sp + 0 * 8));
+
+ /* get the fieldinfo */
+
+ if (!(fi = resolve_field_eager(uf)))
+ return false;
+
+ /* check if the field's class is initialized */
+
+ if (!(fi->class->state & CLASS_INITIALIZED))
+ if (!initialize_class(fi->class))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ /* patch the field value's address */
+
+ *((ptrint *) (pv + disp)) = (ptrint) &(fi->value);
+
+ /* synchronize data cache */
+
+ /*md_dcacheflush(pv + disp, SIZEOF_VOID_P);*/
+
+ return true;
+}
+
+
+/* patcher_get_putfield ********************************************************
+
+ Machine code:
+
+ <patched call position>
+ 8ee90020 lw a5,32(s7)
+
+*******************************************************************************/
+
+bool patcher_get_putfield(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ unresolved_field *uf;
+ fieldinfo *fi;
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ uf = (unresolved_field *) *((ptrint *) (sp + 2 * 8));
+
+ /* get the fieldinfo */
+
+ if (!(fi = resolve_field_eager(uf)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (opt_showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* patch the field's offset */
+
+#if SIZEOF_VOID_P == 4
+ if (fi->type == TYPE_LNG) {
+# if WORDS_BIGENDIAN == 1
+ /* ATTENTION: order of these instructions depend on M_LLD_INTERN */
+ *((u4 *) (ra + 0 * 4)) |= (s2) ((fi->offset + 0) & 0x0000ffff);
+ *((u4 *) (ra + 1 * 4)) |= (s2) ((fi->offset + 4) & 0x0000ffff);
+# else
+ /* ATTENTION: order of these instructions depend on M_LLD_INTERN */
+ *((u4 *) (ra + 0 * 4)) |= (s2) ((fi->offset + 4) & 0x0000ffff);
+ *((u4 *) (ra + 1 * 4)) |= (s2) ((fi->offset + 0) & 0x0000ffff);
+# endif
+ } else
+#endif
+ *((u4 *) ra) |= (s2) (fi->offset & 0x0000ffff);
+
+ /* synchronize instruction cache */
+
+ if (opt_showdisassemble) {
+#if SIZEOF_VOID_P == 4
+ if (fi->type == TYPE_LNG)
+ md_icacheflush(ra - 2 * 4, 4 * 4);
+ else
+#endif
+ md_icacheflush(ra - 2 * 4, 3 * 4);
+ }
+ else {
+ md_icacheflush(ra, 2 * 4);
+ }
+
+ return true;
+}
+
+
+/* patcher_aconst **************************************************************
+
+ Machine code:
+
+ <patched call postition>
+ dfc4ff98 ld a0,-104(s8)
+
+*******************************************************************************/
+
+bool patcher_aconst(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ constant_classref *cr;
+ s4 disp;
+ u1 *pv;
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ cr = (constant_classref *) *((ptrint *) (sp + 2 * 8));
+ disp = *((s4 *) (sp + 1 * 8));
+ pv = (u1 *) *((ptrint *) (sp + 0 * 8));
+
+ /* get the classinfo */
+
+ if (!(c = resolve_classref_eager(cr)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ /* patch the classinfo pointer */
+
+ *((ptrint *) (pv + disp)) = (ptrint) c;
+
+ /* synchronize data cache */
+
+ /*md_dcacheflush(pv + disp, SIZEOF_VOID_P);*/
+
+ return true;
+}
+
+
+/* patcher_builtin_multianewarray **********************************************
+
+ Machine code:
+
+ <patched call position>
+ dfc5ff90 ld a1,-112(s8)
+ 03a03025 move a2,sp
+ dfd9ff88 ld t9,-120(s8)
+ 0320f809 jalr t9
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_builtin_multianewarray(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ constant_classref *cr;
+ s4 disp;
+ u1 *pv;
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ cr = (constant_classref *) *((ptrint *) (sp + 2 * 8));
+ disp = *((s4 *) (sp + 1 * 8));
+ pv = (u1 *) *((ptrint *) (sp + 0 * 8));
+
+ /* get the classinfo */
+
+ if (!(c = resolve_classref_eager(cr)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ /* patch the classinfo pointer */
+
+ *((ptrint *) (pv + disp)) = (ptrint) c;
+
+ /* synchronize data cache */
+
+ /* md_dcacheflush(pv + disp, SIZEOF_VOID_P);*/
+
+ return true;
+}
+
+
+/* patcher_builtin_arraycheckcast **********************************************
+
+ Machine code:
+
+ <patched call position>
+ dfc5ffc0 ld a1,-64(s8)
+ dfd9ffb8 ld t9,-72(s8)
+ 0320f809 jalr t9
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_builtin_arraycheckcast(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ constant_classref *cr;
+ s4 disp;
+ u1 *pv;
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ cr = (constant_classref *) *((ptrint *) (sp + 2 * 8));
+ disp = *((s4 *) (sp + 1 * 8));
+ pv = (u1 *) *((ptrint *) (sp + 0 * 8));
+
+ /* get the classinfo */
+
+ if (!(c = resolve_classref_eager(cr)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ /* patch the classinfo pointer */
+
+ *((ptrint *) (pv + disp)) = (ptrint) c;
+
+ /* synchronize data cache */
+
+ /*md_dcacheflush(pv + disp, SIZEOF_VOID_P); */
+
+ return true;
+}
+
+
+/* patcher_invokestatic_special ************************************************
+
+ Machine code:
+
+ <patched call position>
+ dfdeffc0 ld s8,-64(s8)
+ 03c0f809 jalr s8
+ 00000000 nop
+
+******************************************************************************/
+
+bool patcher_invokestatic_special(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ unresolved_method *um;
+ s4 disp;
+ u1 *pv;
+ methodinfo *m;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ um = (unresolved_method *) *((ptrint *) (sp + 2 * 8));
+ disp = *((s4 *) (sp + 1 * 8));
+ pv = (u1 *) *((ptrint *) (sp + 0 * 8));
+
+ /* get the fieldinfo */
+
+ if (!(m = resolve_method_eager(um)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ /* patch stubroutine */
+
+ *((ptrint *) (pv + disp)) = (ptrint) m->stubroutine;
+
+ /* synchronize data cache */
+
+ /* md_dcacheflush(pv + disp, SIZEOF_VOID_P);*/
+
+ return true;
+}
+
+
+/* patcher_invokevirtual *******************************************************
+
+ Machine code:
+
+ <patched call position>
+ dc990000 ld t9,0(a0)
+ df3e0040 ld s8,64(t9)
+ 03c0f809 jalr s8
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_invokevirtual(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ unresolved_method *um;
+ methodinfo *m;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ um = (unresolved_method *) *((ptrint *) (sp + 2 * 8));
+
+ /* get the fieldinfo */
+
+ if (!(m = resolve_method_eager(um)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (opt_showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* patch vftbl index */
+
+ *((s4 *) (ra + 1 * 4)) |= (s4) ((OFFSET(vftbl_t, table[0]) +
+ sizeof(methodptr) * m->vftblindex) & 0x0000ffff);
+
+ /* synchronize instruction cache */
+
+ if (opt_showdisassemble)
+ md_icacheflush(ra - 2 * 4, 4 * 4);
+ else
+ md_icacheflush(ra, 2 * 4);
+
+ return true;
+}
+
+
+/* patcher_invokeinterface *****************************************************
+
+ Machine code:
+
+ <patched call position>
+ dc990000 ld t9,0(a0)
+ df39ffa0 ld t9,-96(t9)
+ df3e0018 ld s8,24(t9)
+ 03c0f809 jalr s8
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_invokeinterface(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ unresolved_method *um;
+ methodinfo *m;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ um = (unresolved_method *) *((ptrint *) (sp + 2 * 8));
+
+ /* get the fieldinfo */
+
+ if (!(m = resolve_method_eager(um)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (opt_showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* patch interfacetable index */
+
+ *((s4 *) (ra + 1 * 4)) |= (s4) ((OFFSET(vftbl_t, interfacetable[0]) -
+ sizeof(methodptr*) * m->class->index) & 0x0000ffff);
+
+ /* patch method offset */
+
+ *((s4 *) (ra + 2 * 4)) |=
+ (s4) ((sizeof(methodptr) * (m - m->class->methods)) & 0x0000ffff);
+
+ /* synchronize instruction cache */
+
+ if (opt_showdisassemble)
+ md_icacheflush(ra - 2 * 4, 5 * 4);
+ else
+ md_icacheflush(ra, 3 * 4);
+
+ return true;
+}
+
+
+/* patcher_checkcast_instanceof_flags ******************************************
+
+ Machine code:
+
+ <patched call position>
+ 8fc3ff24 lw v1,-220(s8)
+ 30630200 andi v1,v1,512
+ 1060000d beq v1,zero,0x000000001051824c
+ 00000000 nop
+
+*******************************************************************************/
+
+bool patcher_checkcast_instanceof_flags(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ constant_classref *cr;
+ s4 disp;
+ u1 *pv;
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ cr = (constant_classref *) *((ptrint *) (sp + 2 * 8));
+ disp = *((s4 *) (sp + 1 * 8));
+ pv = (u1 *) *((ptrint *) (sp + 0 * 8));
+
+ /* get the fieldinfo */
+
+ if (!(c = resolve_classref_eager(cr)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ /* patch class flags */
+
+ *((s4 *) (pv + disp)) = (s4) c->flags;
+
+ /* synchronize data cache */
+
+ /*md_dcacheflush(pv + disp, sizeof(s4)); */
+
+ return true;
+}
+
+
+/* patcher_checkcast_instanceof_interface **************************************
+
+ Machine code:
+
+ <patched call position>
+ dd030000 ld v1,0(a4)
+ 8c79001c lw t9,28(v1)
+ 27390000 addiu t9,t9,0
+ 1b200082 blez t9,zero,0x000000001051843c
+ 00000000 nop
+ dc790000 ld t9,0(v1)
+
+*******************************************************************************/
+
+bool patcher_checkcast_instanceof_interface(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ constant_classref *cr;
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ cr = (constant_classref *) *((ptrint *) (sp + 2 * 8));
+
+ /* get the fieldinfo */
+
+ if (!(c = resolve_classref_eager(cr)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* if we show disassembly, we have to skip the nop's */
+
+ if (opt_showdisassemble)
+ ra = ra + 2 * 4;
+
+ /* patch super class index */
+
+ *((s4 *) (ra + 2 * 4)) |= (s4) (-(c->index) & 0x0000ffff);
+
+ *((s4 *) (ra + 5 * 4)) |= (s4) ((OFFSET(vftbl_t, interfacetable[0]) -
+ c->index * sizeof(methodptr*)) & 0x0000ffff);
+
+ /* synchronize instruction cache */
+
+ if (opt_showdisassemble)
+ md_icacheflush(ra - 2 * 4, 8 * 4);
+ else
+ md_icacheflush(ra, 6 * 4);
+
+ return true;
+}
+
+
+/* patcher_checkcast_instanceof_class ******************************************
+
+ Machine code:
+
+ <patched call position>
+ dd030000 ld v1,0(a4)
+ dfd9ff18 ld t9,-232(s8)
+
+*******************************************************************************/
+
+bool patcher_checkcast_instanceof_class(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ constant_classref *cr;
+ s4 disp;
+ u1 *pv;
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ cr = (constant_classref *) *((ptrint *) (sp + 2 * 8));
+ disp = *((s4 *) (sp + 1 * 8));
+ pv = (u1 *) *((ptrint *) (sp + 0 * 8));
+
+ /* get the fieldinfo */
+
+ if (!(c = resolve_classref_eager(cr)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ /* patch super class' vftbl */
+
+ *((ptrint *) (pv + disp)) = (ptrint) c->vftbl;
+
+ /* synchronize data cache */
+
+ /*md_dcacheflush(pv + disp, SIZEOF_VOID_P);*/
+
+ return true;
+}
+
+
+/* patcher_clinit **************************************************************
+
+ No special machine code.
+
+*******************************************************************************/
+
+bool patcher_clinit(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ c = (classinfo *) *((ptrint *) (sp + 2 * 8));
+
+ /* check if the class is initialized */
+
+ if (!(c->state & CLASS_INITIALIZED))
+ if (!initialize_class(c))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ return true;
+}
+
+
+/* patcher_athrow_areturn ******************************************************
+
+ Machine code:
+
+ <patched call position>
+
+*******************************************************************************/
+
+#ifdef ENABLE_VERIFIER
+bool patcher_athrow_areturn(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ unresolved_class *uc;
+ classinfo *c;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ uc = (unresolved_class *) *((ptrint *) (sp + 2 * 8));
+
+ /* resolve the class */
+
+ if (!resolve_class(uc, resolveEager, false, &c))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ return true;
+}
+#endif /* ENABLE_VERIFIER */
+
+
+/* patcher_resolve_native ******************************************************
+
+ XXX
+
+*******************************************************************************/
+
+#if !defined(WITH_STATIC_CLASSPATH)
+bool patcher_resolve_native(u1 *sp)
+{
+ u1 *ra;
+#if SIZEOF_VOID_P == 8
+ u8 mcode;
+#else
+ u4 mcode[2];
+#endif
+ methodinfo *m;
+ s4 disp;
+ u1 *pv;
+ functionptr f;
+
+ /* get stuff from the stack */
+
+ ra = (u1 *) *((ptrint *) (sp + 5 * 8));
+#if SIZEOF_VOID_P == 8
+ mcode = *((u8 *) (sp + 3 * 8));
+#else
+ mcode[0] = *((u4 *) (sp + 3 * 8));
+ mcode[1] = *((u4 *) (sp + 3 * 8 + 4));
+#endif
+ m = (methodinfo *) *((ptrint *) (sp + 2 * 8));
+ disp = *((s4 *) (sp + 1 * 8));
+ pv = (u1 *) *((ptrint *) (sp + 0 * 8));
+
+ /* calculate and set the new return address */
+
+ ra = ra - 2 * 4;
+ *((ptrint *) (sp + 5 * 8)) = (ptrint) ra;
+
+ /* resolve native function */
+
+ if (!(f = native_resolve_function(m)))
+ return false;
+
+ /* patch back original code */
+
+#if SIZEOF_VOID_P == 8
+ *((u4 *) (ra + 0 * 4)) = mcode;
+ *((u4 *) (ra + 1 * 4)) = mcode >> 32;
+#else
+ *((u4 *) (ra + 0 * 4)) = mcode[0];
+ *((u4 *) (ra + 1 * 4)) = mcode[1];
+#endif
+
+ /* synchronize instruction cache */
+
+ md_icacheflush(ra, 2 * 4);
+
+ /* patch native function pointer */
+
+ *((ptrint *) (pv + disp)) = (ptrint) f;
+
+ /* synchronize data cache */
+
+ /*md_dcacheflush(pv + disp, SIZEOF_VOID_P);*/
+
+ return true;
+}
+#endif /* !defined(WITH_STATIC_CLASSPATH) */
+
+
+/*
+ * These are local overrides for various environment variables in Emacs.
+ * Please do not remove this and leave it at the end of the file, where
+ * Emacs will automagically detect them.
+ * ---------------------------------------------------------------------
+ * Local variables:
+ * mode: c
+ * indent-tabs-mode: t
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ * vim:noexpandtab:sw=4:ts=4:
+ */