Authors: Andreas Krall
Christian Thalinger
-
- Changes: Joseph Wenninger
+ Joseph Wenninger
Christian Ullrich
- Edwin Steiner
+ Edwin Steiner
- $Id: codegen.c 5363 2006-09-06 10:20:07Z christian $
+ $Id: codegen.c 6264 2007-01-02 19:40:18Z edwin $
*/
#include "vm/jit/i386/md-abi.h"
#include "vm/jit/i386/codegen.h"
-#include "vm/jit/i386/md-emit.h"
+#include "vm/jit/i386/emit.h"
#include "mm/memory.h"
#include "native/jni.h"
#include "vm/jit/asmpart.h"
#include "vm/jit/codegen-common.h"
#include "vm/jit/dseg.h"
-#include "vm/jit/emit.h"
+#include "vm/jit/emit-common.h"
#include "vm/jit/jit.h"
#include "vm/jit/parse.h"
#include "vm/jit/patcher.h"
#include "vm/jit/reg.h"
#include "vm/jit/replace.h"
-#if defined(ENABLE_LSRA) && !defined(ENABLE_SSA)
-# include "vm/jit/allocator/lsra.h"
-#endif
#if defined(ENABLE_SSA)
# include "vm/jit/optimizing/lsra.h"
# include "vm/jit/optimizing/ssa.h"
+#elif defined(ENABLE_LSRA)
+# include "vm/jit/allocator/lsra.h"
#endif
#if defined(ENABLE_SSA)
void cg_move(codegendata *cd, s4 type, s4 src_regoff, s4 src_flags,
s4 dst_regoff, s4 dst_flags);
-void codegen_insert_phi_moves(codegendata *cd, registerdata *rd, lsradata *ls,
- basicblock *bptr);
+void codegen_insert_phi_moves(jitdata *jd, basicblock *bptr);
#endif
bool codegen(jitdata *jd)
codegendata *cd;
registerdata *rd;
s4 len, s1, s2, s3, d, disp;
- stackptr src;
- varinfo *var;
+ varinfo *var, *var1;
basicblock *bptr;
instruction *iptr;
- exceptiontable *ex;
+ exception_entry *ex;
u2 currentline;
methodinfo *lm; /* local methodinfo for ICMD_INVOKE* */
builtintable_entry *bte;
methoddesc *md;
- rplpoint *replacementpoint;
s4 fieldtype;
+ s4 varindex;
#if defined(ENABLE_SSA)
lsradata *ls;
bool last_cmd_was_goto;
if (!jd->isleafmethod)
cd->stackframesize |= 0x3;
- (void) dseg_addaddress(cd, code); /* CodeinfoPointer */
- (void) dseg_adds4(cd, cd->stackframesize * 4); /* FrameSize */
+ (void) dseg_add_unique_address(cd, code); /* CodeinfoPointer */
+ (void) dseg_add_unique_s4(cd, cd->stackframesize * 4); /* FrameSize */
#if defined(ENABLE_THREADS)
/* IsSync contains the offset relative to the stack pointer for the
*/
if (checksync && (m->flags & ACC_SYNCHRONIZED))
- (void) dseg_adds4(cd, (rd->memuse + 1) * 4); /* IsSync */
+ (void) dseg_add_unique_s4(cd, (rd->memuse + 1) * 4); /* IsSync */
else
#endif
- (void) dseg_adds4(cd, 0); /* IsSync */
+ (void) dseg_add_unique_s4(cd, 0); /* IsSync */
- (void) dseg_adds4(cd, jd->isleafmethod); /* IsLeaf */
- (void) dseg_adds4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
- (void) dseg_adds4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
+ (void) dseg_add_unique_s4(cd, jd->isleafmethod); /* IsLeaf */
+ (void) dseg_add_unique_s4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
+ (void) dseg_add_unique_s4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
/* adds a reference for the length of the line number counter. We don't
know the size yet, since we evaluate the information during code
to the information gotten from the class file */
(void) dseg_addlinenumbertablesize(cd);
- (void) dseg_adds4(cd, cd->exceptiontablelength); /* ExTableSize */
+ (void) dseg_add_unique_s4(cd, jd->exceptiontablelength); /* ExTableSize */
/* create exception table */
- for (ex = cd->exceptiontable; ex != NULL; ex = ex->down) {
- dseg_addtarget(cd, ex->start);
- dseg_addtarget(cd, ex->end);
- dseg_addtarget(cd, ex->handler);
- (void) dseg_addaddress(cd, ex->catchtype.cls);
+ for (ex = jd->exceptiontable; ex != NULL; ex = ex->down) {
+ dseg_add_target(cd, ex->start);
+ dseg_add_target(cd, ex->end);
+ dseg_add_target(cd, ex->handler);
+ (void) dseg_add_unique_address(cd, ex->catchtype.any);
}
/* generate method profiling code */
stack_off = 0;
for (p = 0, l = 0; p < md->paramcount; p++) {
t = md->paramtypes[p].type;
+
#if defined(ENABLE_SSA)
if ( ls != NULL ) {
l = ls->local_0[p];
}
#endif
- var = &(rd->locals[l][t]);
+ varindex = jd->local_map[l * 5 + t];
l++;
if (IS_2_WORD_TYPE(t)) /* increment local counter for 2 word types */
l++;
- if (var->type < 0)
+
+ if (varindex == UNUSED)
continue;
+
+ var = VAR(varindex);
+
s1 = md->params[p].regoff;
if (IS_INT_LNG_TYPE(t)) { /* integer args */
log_text("integer register argument");
assert(0);
if (!(var->flags & INMEMORY)) { /* reg arg -> register */
- /* rd->argintregs[md->params[p].regoff -> var->regoff */
+ /* rd->argintregs[md->params[p].regoff -> var->vv.regoff */
}
else { /* reg arg -> spilled */
- /* rd->argintregs[md->params[p].regoff -> var->regoff * 4 */
+ /* rd->argintregs[md->params[p].regoff -> var->vv.regoff * 4 */
}
}
else { /* stack arguments */
if (!(var->flags & INMEMORY)) { /* stack arg -> register */
emit_mov_membase_reg( /* + 4 for return address */
- cd, REG_SP, (cd->stackframesize + s1) * 4 + 4, var->regoff);
+ cd, REG_SP, (cd->stackframesize + s1) * 4 + 4, var->vv.regoff);
/* + 4 for return address */
}
else { /* stack arg -> spilled */
cd, REG_SP, (cd->stackframesize + s1) * 4 + 4,
REG_ITMP1);
emit_mov_reg_membase(
- cd, REG_ITMP1, REG_SP, var->regoff * 4);
+ cd, REG_ITMP1, REG_SP, var->vv.regoff * 4);
}
else
#endif /*defined(ENABLE_SSA)*/
/* reuse Stackslotand avoid copying */
- var->regoff = cd->stackframesize + s1 + 1;
+ var->vv.regoff = cd->stackframesize + s1 + 1;
}
else {
cd, REG_SP, (cd->stackframesize + s1) * 4 + 4,
REG_ITMP1);
emit_mov_reg_membase(
- cd, REG_ITMP1, REG_SP, var->regoff * 4);
+ cd, REG_ITMP1, REG_SP, var->vv.regoff * 4);
emit_mov_membase_reg( /* + 4 for return address */
cd, REG_SP, (cd->stackframesize + s1) * 4 + 4 + 4,
REG_ITMP1);
emit_mov_reg_membase(
- cd, REG_ITMP1, REG_SP, var->regoff * 4 + 4);
+ cd, REG_ITMP1, REG_SP, var->vv.regoff * 4 + 4);
}
else
#endif /*defined(ENABLE_SSA)*/
/* reuse Stackslotand avoid copying */
- var->regoff = cd->stackframesize + s1 + 1;
+ var->vv.regoff = cd->stackframesize + s1 + 1;
}
}
}
log_text("There are no float argument registers!");
assert(0);
if (!(var->flags & INMEMORY)) { /* reg arg -> register */
- /* rd->argfltregs[md->params[p].regoff -> var->regoff */
+ /* rd->argfltregs[md->params[p].regoff -> var->vv.regoff */
} else { /* reg arg -> spilled */
- /* rd->argfltregs[md->params[p].regoff -> var->regoff * 4 */
+ /* rd->argfltregs[md->params[p].regoff -> var->vv.regoff * 4 */
}
}
emit_flds_membase(
cd, REG_SP, (cd->stackframesize + s1) * 4 + 4);
assert(0);
-/* emit_fstp_reg(cd, var->regoff + fpu_st_offset); */
+/* emit_fstp_reg(cd, var->vv.regoff + fpu_st_offset); */
}
else {
emit_fldl_membase(
cd, REG_SP, (cd->stackframesize + s1) * 4 + 4);
assert(0);
-/* emit_fstp_reg(cd, var->regoff + fpu_st_offset); */
+/* emit_fstp_reg(cd, var->vv.regoff + fpu_st_offset); */
}
} else { /* stack-arg -> spilled */
emit_mov_membase_reg(
cd, REG_SP, (cd->stackframesize + s1) * 4 + 4, REG_ITMP1);
emit_mov_reg_membase(
- cd, REG_ITMP1, REG_SP, var->regoff * 4);
+ cd, REG_ITMP1, REG_SP, var->vv.regoff * 4);
if (t == TYPE_FLT) {
emit_flds_membase(
cd, REG_SP, (cd->stackframesize + s1) * 4 + 4);
- emit_fstps_membase(cd, REG_SP, var->regoff * 4);
+ emit_fstps_membase(cd, REG_SP, var->vv.regoff * 4);
}
else {
emit_fldl_membase(
cd, REG_SP, (cd->stackframesize + s1) * 4 + 4);
- emit_fstpl_membase(cd, REG_SP, var->regoff * 4);
+ emit_fstpl_membase(cd, REG_SP, var->vv.regoff * 4);
}
}
else
#endif /*defined(ENABLE_SSA)*/
/* reuse Stackslotand avoid copying */
- var->regoff = cd->stackframesize + s1 + 1;
+ var->vv.regoff = cd->stackframesize + s1 + 1;
}
}
}
#endif
#if !defined(NDEBUG)
- if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
- emit_verbosecall_enter(jd);
+ emit_verbosecall_enter(jd);
#endif
}
#if defined(ENABLE_SSA)
/* with SSA Header is Basic Block 0 - insert phi Moves if necessary */
if ( ls != NULL)
- codegen_insert_phi_moves(cd, rd, ls, ls->basicblocks[0]);
+ codegen_insert_phi_moves(jd, ls->basicblocks[0]);
#endif
/* end of header generation */
- replacementpoint = jd->code->rplpoints;
+ /* create replacement points */
+
+ REPLACEMENT_POINTS_INIT(cd, jd);
/* walk through all basic blocks */
- for (bptr = jd->new_basicblocks; bptr != NULL; bptr = bptr->next) {
+
+ for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
bptr->mpc = (s4) (cd->mcodeptr - cd->mcodebase);
if (bptr->flags >= BBREACHED) {
-
/* branch resolving */
- branchref *brefs;
- for (brefs = bptr->branchrefs; brefs != NULL; brefs = brefs->next) {
- gen_resolvebranch(cd->mcodebase + brefs->branchpos,
- brefs->branchpos,
- bptr->mpc);
- }
+ codegen_resolve_branchrefs(cd, bptr);
-#if 0
/* handle replacement points */
- if (bptr->bitflags & BBFLAG_REPLACEMENT) {
- replacementpoint->pc = (u1*)bptr->mpc; /* will be resolved later */
-
- replacementpoint++;
-
- assert(cd->lastmcodeptr <= cd->mcodeptr);
- cd->lastmcodeptr = cd->mcodeptr + 5; /* 5 byte jmp patch */
- }
-#endif
+ REPLACEMENT_POINT_BLOCK_START(cd, bptr);
/* copy interface registers to their destination */
# endif
if (len > 0) {
len--;
- src = bptr->invars[len];
+ var = VAR(bptr->invars[len]);
if (bptr->type != BBTYPE_STD) {
- if (!IS_2_WORD_TYPE(src->type)) {
- if (bptr->type == BBTYPE_SBR) {
- if (!(src->flags & INMEMORY))
- d = src->regoff;
- else
- d = REG_ITMP1;
- emit_pop_reg(cd, d);
- emit_store(jd, NULL, src, d);
- } else if (bptr->type == BBTYPE_EXH) {
- if (!(src->flags & INMEMORY))
- d = src->regoff;
- else
- d = REG_ITMP1;
+ if (!IS_2_WORD_TYPE(var->type)) {
+ if (bptr->type == BBTYPE_EXH) {
+ d = codegen_reg_of_var(0, var, REG_ITMP1);
M_INTMOVE(REG_ITMP1, d);
- emit_store(jd, NULL, src, d);
+ emit_store(jd, NULL, var, d);
}
-
- } else {
- log_text("copy interface registers(EXH, SBR): longs have to be in memory (begin 1)");
+ }
+ else {
+ log_text("copy interface registers(EXH, SBR): longs \
+ have to be in memory (begin 1)");
assert(0);
}
}
}
- } else
+ }
+ else
#endif /* defined(ENABLE_LSRA) || defined(ENABLE_SSA) */
{
while (len) {
len--;
- src = bptr->invars[len];
+ var = VAR(bptr->invars[len]);
if ((len == bptr->indepth-1) && (bptr->type != BBTYPE_STD)) {
- if (!IS_2_WORD_TYPE(src->type)) {
- if (bptr->type == BBTYPE_SBR) {
- d = codegen_reg_of_var(rd, 0, src, REG_ITMP1);
- emit_pop_reg(cd, d);
- emit_store(jd, NULL, src, d);
-
- } else if (bptr->type == BBTYPE_EXH) {
- d = codegen_reg_of_var(rd, 0, src, REG_ITMP1);
+ if (!IS_2_WORD_TYPE(var->type)) {
+ if (bptr->type == BBTYPE_EXH) {
+ d = codegen_reg_of_var(0, var, REG_ITMP1);
M_INTMOVE(REG_ITMP1, d);
- emit_store(jd, NULL, src, d);
+ emit_store(jd, NULL, var, d);
}
- } else {
+ }
+ else {
log_text("copy interface registers: longs have to be in \
memory (begin 1)");
assert(0);
}
- } else {
-#if defined(NEW_VAR)
- assert(src->varkind == STACKVAR);
- /* will be done directly in simplereg lateron */
- /* for now codegen_reg_of_var has to be called here to */
- /* set the regoff and flags for all bptr->invars[] */
- d = codegen_reg_of_var(rd, 0, src, REG_ITMP1);
-#else
- if (IS_LNG_TYPE(src->type))
- d = codegen_reg_of_var(rd, 0, src,
- PACK_REGS(REG_ITMP1, REG_ITMP2));
- else
- d = codegen_reg_of_var(rd, 0, src, REG_ITMP1);
-/* d = codegen_reg_of_var(rd, 0, src, REG_IFTMP); */
-
- if ((src->varkind != STACKVAR)) {
- s2 = src->type;
- s1 = rd->interfaces[len][s2].regoff;
-
- if (IS_FLT_DBL_TYPE(s2)) {
- if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
- M_FLTMOVE(s1, d);
-
- } else {
- if (IS_2_WORD_TYPE(s2))
- M_DLD(d, REG_SP, s1 * 4);
- else
- M_FLD(d, REG_SP, s1 * 4);
- }
-
- } else {
- if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
- if (IS_2_WORD_TYPE(s2))
- M_LNGMOVE(s1, d);
- else
- M_INTMOVE(s1, d);
-
- } else {
- if (IS_2_WORD_TYPE(s2))
- M_LLD(d, REG_SP, s1 * 4);
- else
- M_ILD(d, REG_SP, s1 * 4);
- }
- }
-
- emit_store(jd, NULL, src, d);
- }
-#endif
+ }
+ else {
+ assert((var->flags & INOUT));
}
- }
- }
+ } /* while (len) */
+ } /* */
/* walk through all instructions */
MCODECHECK(1024); /* 1kB should be enough */
switch (iptr->opc) {
- case ICMD_INLINE_START:
-#if 0
- {
- insinfo_inline *insinfo = (insinfo_inline *) iptr->target;
-#if defined(ENABLE_THREADS)
- if (insinfo->synchronize) {
- /* add monitor enter code */
- if (insinfo->method->flags & ACC_STATIC) {
- M_MOV_IMM(&insinfo->method->class->object.header, REG_ITMP1);
- M_AST(REG_ITMP1, REG_SP, 0 * 4);
- }
- else {
- /* nullpointer check must have been performed before */
- /* (XXX not done, yet) */
- var = &(rd->locals[insinfo->synclocal][TYPE_ADR]);
- if (var->flags & INMEMORY) {
- emit_mov_membase_reg(cd, REG_SP, var->regoff * 4, REG_ITMP1);
- M_AST(REG_ITMP1, REG_SP, 0 * 4);
- }
- else {
- M_AST(var->regoff, REG_SP, 0 * 4);
- }
- }
-
- M_MOV_IMM(LOCK_monitor_enter, REG_ITMP3);
- M_CALL(REG_ITMP3);
- }
-#endif
- dseg_addlinenumber_inline_start(cd, iptr);
- }
-#endif
+ case ICMD_NOP: /* ... ==> ... */
+ case ICMD_POP: /* ..., value ==> ... */
+ case ICMD_POP2: /* ..., value, value ==> ... */
break;
- case ICMD_INLINE_END:
-#if 0
- {
- insinfo_inline *insinfo = (insinfo_inline *) iptr->target;
+ case ICMD_INLINE_START:
- dseg_addlinenumber_inline_end(cd, iptr);
- dseg_addlinenumber(cd, iptr->line);
+ REPLACEMENT_POINT_INLINE_START(cd, iptr);
+ break;
-#if defined(ENABLE_THREADS)
- if (insinfo->synchronize) {
- /* add monitor exit code */
- if (insinfo->method->flags & ACC_STATIC) {
- M_MOV_IMM(&insinfo->method->class->object.header, REG_ITMP1);
- M_AST(REG_ITMP1, REG_SP, 0 * 4);
- }
- else {
- var = &(rd->locals[insinfo->synclocal][TYPE_ADR]);
- if (var->flags & INMEMORY) {
- M_ALD(REG_ITMP1, REG_SP, var->regoff * 4);
- M_AST(REG_ITMP1, REG_SP, 0 * 4);
- }
- else {
- M_AST(var->regoff, REG_SP, 0 * 4);
- }
- }
+ case ICMD_INLINE_BODY:
- M_MOV_IMM(LOCK_monitor_exit, REG_ITMP3);
- M_CALL(REG_ITMP3);
- }
-#endif
- }
-#endif
+ REPLACEMENT_POINT_INLINE_BODY(cd, iptr);
+ dseg_addlinenumber_inline_start(cd, iptr);
+ dseg_addlinenumber(cd, iptr->line);
break;
- case ICMD_NOP: /* ... ==> ... */
+ case ICMD_INLINE_END:
+
+ dseg_addlinenumber_inline_end(cd, iptr);
+ dseg_addlinenumber(cd, iptr->line);
break;
case ICMD_CHECKNULL: /* ..., objectref ==> ..., objectref */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
- M_TEST(s1);
- M_BEQ(0);
- codegen_add_nullpointerexception_ref(cd);
+ emit_nullpointer_check(cd, iptr, s1);
break;
/* constant operations ************************************************/
emit_faddp(cd);
} else {
- disp = dseg_addfloat(cd, iptr->sx.val.f);
+ disp = dseg_add_float(cd, iptr->sx.val.f);
emit_mov_imm_reg(cd, 0, REG_ITMP1);
dseg_adddata(cd);
emit_flds_membase(cd, REG_ITMP1, disp);
emit_faddp(cd);
} else {
- disp = dseg_adddouble(cd, iptr->sx.val.d);
+ disp = dseg_add_double(cd, iptr->sx.val.d);
emit_mov_imm_reg(cd, 0, REG_ITMP1);
dseg_adddata(cd);
emit_fldl_membase(cd, REG_ITMP1, disp);
break;
- /* load/store operations **********************************************/
+ /* load/store/copy/move operations ************************************/
- case ICMD_ILOAD: /* ... ==> ..., content of local variable */
- case ICMD_ALOAD: /* op1 = local variable */
+ case ICMD_ILOAD:
+ case ICMD_ALOAD:
+ case ICMD_LLOAD:
+ case ICMD_FLOAD:
+ case ICMD_DLOAD:
+ case ICMD_ISTORE:
+ case ICMD_LSTORE:
+ case ICMD_FSTORE:
+ case ICMD_DSTORE:
+ case ICMD_COPY:
+ case ICMD_MOVE:
- d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
- if ((iptr->dst.var->varkind == LOCALVAR) &&
- (iptr->dst.var->varnum == iptr->s1.localindex))
- break;
- var = &(rd->locals[iptr->s1.localindex][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY)
- M_ILD(d, REG_SP, var->regoff * 4);
- else
- M_INTMOVE(var->regoff, d);
- emit_store_dst(jd, iptr, d);
+ emit_copy(jd, iptr, VAROP(iptr->s1), VAROP(iptr->dst));
break;
- case ICMD_LLOAD: /* ... ==> ..., content of local variable */
- /* s1.localindex = local variable */
-
- d = codegen_reg_of_dst(jd, iptr, REG_ITMP12_PACKED);
- if ((iptr->dst.var->varkind == LOCALVAR) &&
- (iptr->dst.var->varnum == iptr->s1.localindex))
- break;
- var = &(rd->locals[iptr->s1.localindex][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY)
- M_LLD(d, REG_SP, var->regoff * 4);
- else
- M_LNGMOVE(var->regoff, d);
- emit_store_dst(jd, iptr, d);
- break;
-
- case ICMD_FLOAD: /* ... ==> ..., content of local variable */
- /* s1.localindex = local variable */
-
- d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
- if ((iptr->dst.var->varkind == LOCALVAR) &&
- (iptr->dst.var->varnum == iptr->s1.localindex))
- break;
- var = &(rd->locals[iptr->s1.localindex][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY)
- M_FLD(d, REG_SP, var->regoff * 4);
- else
- M_FLTMOVE(var->regoff, d);
- emit_store_dst(jd, iptr, d);
- break;
-
- case ICMD_DLOAD: /* ... ==> ..., content of local variable */
- /* s1.localindex = local variable */
-
- d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
- if ((iptr->dst.var->varkind == LOCALVAR) &&
- (iptr->dst.var->varnum == iptr->s1.localindex))
- break;
- var = &(rd->locals[iptr->s1.localindex][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY)
- M_DLD(d, REG_SP, var->regoff * 4);
- else
- M_FLTMOVE(var->regoff, d);
- emit_store_dst(jd, iptr, d);
- break;
-
- case ICMD_ISTORE: /* ..., value ==> ... */
- case ICMD_ASTORE: /* op1 = local variable */
-
- if ((iptr->s1.var->varkind == LOCALVAR) &&
- (iptr->s1.var->varnum == iptr->dst.localindex))
- break;
- var = &(rd->locals[iptr->dst.localindex][iptr->opc - ICMD_ISTORE]);
- if (var->flags & INMEMORY) {
- s1 = emit_load_s1(jd, iptr, REG_ITMP1);
- M_IST(s1, REG_SP, var->regoff * 4);
- }
- else {
- s1 = emit_load_s1(jd, iptr, var->regoff);
- M_INTMOVE(s1, var->regoff);
- }
- break;
-
- case ICMD_LSTORE: /* ..., value ==> ... */
- /* dst.localindex = local variable */
-
- if ((iptr->s1.var->varkind == LOCALVAR) &&
- (iptr->s1.var->varnum == iptr->dst.localindex))
- break;
- var = &(rd->locals[iptr->dst.localindex][iptr->opc - ICMD_ISTORE]);
- if (var->flags & INMEMORY) {
- s1 = emit_load_s1(jd, iptr, REG_ITMP12_PACKED);
- M_LST(s1, REG_SP, var->regoff * 4);
- }
- else {
- s1 = emit_load_s1(jd, iptr, var->regoff);
- M_LNGMOVE(s1, var->regoff);
- }
- break;
-
- case ICMD_FSTORE: /* ..., value ==> ... */
- /* dst.localindex = local variable */
-
- if ((iptr->s1.var->varkind == LOCALVAR) &&
- (iptr->s1.var->varnum == iptr->dst.localindex))
- break;
- var = &(rd->locals[iptr->dst.localindex][iptr->opc - ICMD_ISTORE]);
- if (var->flags & INMEMORY) {
- s1 = emit_load_s1(jd, iptr, REG_FTMP1);
- M_FST(s1, REG_SP, var->regoff * 4);
- }
- else {
- s1 = emit_load_s1(jd, iptr, var->regoff);
- M_FLTMOVE(s1, var->regoff);
- }
- break;
-
- case ICMD_DSTORE: /* ..., value ==> ... */
- /* dst.localindex = local variable */
-
- if ((iptr->s1.var->varkind == LOCALVAR) &&
- (iptr->s1.var->varnum == iptr->dst.localindex))
- break;
- var = &(rd->locals[iptr->dst.localindex][iptr->opc - ICMD_ISTORE]);
- if (var->flags & INMEMORY) {
- s1 = emit_load_s1(jd, iptr, REG_FTMP1);
- M_DST(s1, REG_SP, var->regoff * 4);
- }
- else {
- s1 = emit_load_s1(jd, iptr, var->regoff);
- M_FLTMOVE(s1, var->regoff);
- }
- break;
-
-
- /* pop/dup/swap operations ********************************************/
-
- /* attention: double and longs are only one entry in CACAO ICMDs */
-
- case ICMD_POP: /* ..., value ==> ... */
- case ICMD_POP2: /* ..., value, value ==> ... */
- break;
-
- case ICMD_DUP: /* ..., a ==> ..., a, a */
-
- M_COPY(iptr->s1.var, iptr->dst.var);
- break;
-
- case ICMD_DUP_X1: /* ..., a, b ==> ..., b, a, b */
-
- M_COPY(iptr->dst.dupslots[ 1], iptr->dst.dupslots[2+2]);
- M_COPY(iptr->dst.dupslots[ 0], iptr->dst.dupslots[2+1]);
- M_COPY(iptr->dst.dupslots[2+2], iptr->dst.dupslots[2+0]);
- break;
-
- case ICMD_DUP_X2: /* ..., a, b, c ==> ..., c, a, b, c */
-
- M_COPY(iptr->dst.dupslots[ 2], iptr->dst.dupslots[3+3]);
- M_COPY(iptr->dst.dupslots[ 1], iptr->dst.dupslots[3+2]);
- M_COPY(iptr->dst.dupslots[ 0], iptr->dst.dupslots[3+1]);
- M_COPY(iptr->dst.dupslots[3+3], iptr->dst.dupslots[3+0]);
- break;
-
- case ICMD_DUP2: /* ..., a, b ==> ..., a, b, a, b */
-
- M_COPY(iptr->dst.dupslots[ 1], iptr->dst.dupslots[2+1]);
- M_COPY(iptr->dst.dupslots[ 0], iptr->dst.dupslots[2+0]);
- break;
-
- case ICMD_DUP2_X1: /* ..., a, b, c ==> ..., b, c, a, b, c */
-
- M_COPY(iptr->dst.dupslots[ 2], iptr->dst.dupslots[3+4]);
- M_COPY(iptr->dst.dupslots[ 1], iptr->dst.dupslots[3+3]);
- M_COPY(iptr->dst.dupslots[ 0], iptr->dst.dupslots[3+2]);
- M_COPY(iptr->dst.dupslots[3+4], iptr->dst.dupslots[3+1]);
- M_COPY(iptr->dst.dupslots[3+3], iptr->dst.dupslots[3+0]);
- break;
-
- case ICMD_DUP2_X2: /* ..., a, b, c, d ==> ..., c, d, a, b, c, d */
-
- M_COPY(iptr->dst.dupslots[ 3], iptr->dst.dupslots[4+5]);
- M_COPY(iptr->dst.dupslots[ 2], iptr->dst.dupslots[4+4]);
- M_COPY(iptr->dst.dupslots[ 1], iptr->dst.dupslots[4+3]);
- M_COPY(iptr->dst.dupslots[ 0], iptr->dst.dupslots[4+2]);
- M_COPY(iptr->dst.dupslots[4+5], iptr->dst.dupslots[4+1]);
- M_COPY(iptr->dst.dupslots[4+4], iptr->dst.dupslots[4+0]);
+ case ICMD_ASTORE:
+ if (!(iptr->flags.bits & INS_FLAG_RETADDR))
+ emit_copy(jd, iptr, VAROP(iptr->s1), VAROP(iptr->dst));
break;
- case ICMD_SWAP: /* ..., a, b ==> ..., b, a */
-
- M_COPY(iptr->dst.dupslots[ 1], iptr->dst.dupslots[2+0]);
- M_COPY(iptr->dst.dupslots[ 0], iptr->dst.dupslots[2+1]);
- break;
-
-
-#if 0
- case ICMD_DUP_X1: /* ..., a, b ==> ..., b, a, b */
-
- M_COPY(src, iptr->dst);
- M_COPY(src->prev, iptr->dst->prev);
-#if defined(ENABLE_SSA)
- if ((ls==NULL) || (iptr->dst->varkind != TEMPVAR) ||
- (ls->lifetime[-iptr->dst->varnum-1].type != -1)) {
-#endif
- M_COPY(iptr->dst, iptr->dst->prev->prev);
-#if defined(ENABLE_SSA)
- } else {
- M_COPY(src, iptr->dst->prev->prev);
- }
-#endif
- break;
-
- case ICMD_DUP_X2: /* ..., a, b, c ==> ..., c, a, b, c */
-
- M_COPY(src, iptr->dst);
- M_COPY(src->prev, iptr->dst->prev);
- M_COPY(src->prev->prev, iptr->dst->prev->prev);
-#if defined(ENABLE_SSA)
- if ((ls==NULL) || (iptr->dst->varkind != TEMPVAR) ||
- (ls->lifetime[-iptr->dst->varnum-1].type != -1)) {
-#endif
- M_COPY(iptr->dst, iptr->dst->prev->prev->prev);
-#if defined(ENABLE_SSA)
- } else {
- M_COPY(src, iptr->dst->prev->prev->prev);
- }
-#endif
- break;
-#endif
/* integer operations *************************************************/
emit_store_dst(jd, iptr, d);
break;
+ case ICMD_IINC:
case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+
+ /* `inc reg' is slower on p4's (regarding to ia32
+ optimization reference manual and benchmarks) and as
+ fast on athlon's. */
+
M_INTMOVE(s1, d);
M_IADD_IMM(iptr->sx.val.i, d);
emit_store_dst(jd, iptr, d);
break;
case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
- /* sx.val.l = constant */
+ /* sx.val.l = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP12_PACKED);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP12_PACKED);
break;
case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
- /* sx.val.l = constant */
+ /* sx.val.l = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP12_PACKED);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP12_PACKED);
break;
case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
break;
case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
- /* sx.val.l = constant */
+ /* sx.val.l = constant */
s1 = emit_load_s1_low(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, EAX_EDX_PACKED);
s1 = emit_load_s1(jd, iptr, EAX);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, EAX);
-
- if (checknull) {
- M_TEST(s2);
- M_BEQ(0);
- codegen_add_arithmeticexception_ref(cd);
- }
+ emit_arithmetic_check(cd, iptr, s2);
M_INTMOVE(s1, EAX); /* we need the first operand in EAX */
s1 = emit_load_s1(jd, iptr, EAX);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, EDX);
-
- if (checknull) {
- M_TEST(s2);
- M_BEQ(0);
- codegen_add_arithmeticexception_ref(cd);
- }
+ emit_arithmetic_check(cd, iptr, s2);
M_INTMOVE(s1, EAX); /* we need the first operand in EAX */
break;
case ICMD_IDIVPOW2: /* ..., value ==> ..., value >> constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
/* TODO: optimize for `/ 2' */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
M_INTMOVE(s1, d);
M_TEST(d);
M_BNS(6);
- M_IADD_IMM32((1 << iptr->sx.val.i) - 1, d); /* 32-bit for jump off. */
+ M_IADD_IMM32((1 << iptr->sx.val.i) - 1, d);/* 32-bit for jump off */
M_SRA_IMM(iptr->sx.val.i, d);
emit_store_dst(jd, iptr, d);
break;
case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
M_BGE(2 + 2 + 6 + 2);
M_MOV(s1, d); /* don't use M_INTMOVE, so we know the jump offset */
M_NEG(d);
- M_AND_IMM32(iptr->sx.val.i, d); /* use 32-bit for jump offset */
+ M_AND_IMM32(iptr->sx.val.i, d); /* use 32-bit for jump offset */
M_NEG(d);
emit_store_dst(jd, iptr, d);
break;
M_INTMOVE(GET_LOW_REG(s2), REG_ITMP3);
M_OR(GET_HIGH_REG(s2), REG_ITMP3);
- M_BEQ(0);
- codegen_add_arithmeticexception_ref(cd);
+ /* XXX could be optimized */
+ emit_arithmetic_check(cd, iptr, REG_ITMP3);
bte = iptr->sx.s23.s3.bte;
md = bte->md;
break;
case ICMD_LDIVPOW2: /* ..., value ==> ..., value >> constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP12_PACKED);
d = codegen_reg_of_dst(jd, iptr, REG_RESULT_PACKED);
#if 0
case ICMD_LREMPOW2: /* ..., value ==> ..., value % constant */
- /* sx.val.l = constant */
+ /* sx.val.l = constant */
d = codegen_reg_of_dst(jd, iptr, REG_NULL);
if (iptr->dst.var->flags & INMEMORY) {
if (iptr->s1.var->flags & INMEMORY) {
/* Alpha algorithm */
disp = 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->vv.regoff * 4);
disp += 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4 + 4);
+ CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->vv.regoff * 4 + 4);
disp += 2;
disp += 3;
disp += 3;
disp += 2;
- emit_mov_membase_reg(cd, REG_SP, iptr->s1.var->regoff * 4, REG_ITMP1);
- emit_mov_membase_reg(cd, REG_SP, iptr->s1.var->regoff * 4 + 4, REG_ITMP2);
+ emit_mov_membase_reg(cd, REG_SP, iptr->s1.var->vv.regoff * 4, REG_ITMP1);
+ emit_mov_membase_reg(cd, REG_SP, iptr->s1.var->vv.regoff * 4 + 4, REG_ITMP2);
emit_alu_imm_reg(cd, ALU_AND, iptr->sx.val.l, REG_ITMP1);
emit_alu_imm_reg(cd, ALU_AND, iptr->sx.val.l >> 32, REG_ITMP2);
- emit_alu_imm_membase(cd, ALU_CMP, 0, REG_SP, iptr->s1.var->regoff * 4 + 4);
+ emit_alu_imm_membase(cd, ALU_CMP, 0, REG_SP, iptr->s1.var->vv.regoff * 4 + 4);
emit_jcc(cd, CC_GE, disp);
- emit_mov_membase_reg(cd, REG_SP, iptr->s1.var->regoff * 4, REG_ITMP1);
- emit_mov_membase_reg(cd, REG_SP, iptr->s1.var->regoff * 4 + 4, REG_ITMP2);
+ emit_mov_membase_reg(cd, REG_SP, iptr->s1.var->vv.regoff * 4, REG_ITMP1);
+ emit_mov_membase_reg(cd, REG_SP, iptr->s1.var->vv.regoff * 4 + 4, REG_ITMP2);
emit_neg_reg(cd, REG_ITMP1);
emit_alu_imm_reg(cd, ALU_ADC, 0, REG_ITMP2);
emit_alu_imm_reg(cd, ALU_ADC, 0, REG_ITMP2);
emit_neg_reg(cd, REG_ITMP2);
- emit_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst.var->regoff * 4);
- emit_mov_reg_membase(cd, REG_ITMP2, REG_SP, iptr->dst.var->regoff * 4 + 4);
+ emit_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst.var->vv.regoff * 4);
+ emit_mov_reg_membase(cd, REG_ITMP2, REG_SP, iptr->dst.var->vv.regoff * 4 + 4);
}
}
break;
case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
break;
case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
break;
case ICMD_IUSHRCONST: /* ..., value ==> ..., value >>> constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
break;
case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP12_PACKED);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP12_PACKED);
if (iptr->sx.val.i & 0x20) {
M_MOV(GET_LOW_REG(d), GET_HIGH_REG(d));
M_CLR(GET_LOW_REG(d));
- M_SLLD_IMM(iptr->sx.val.i & 0x3f, GET_LOW_REG(d), GET_HIGH_REG(d));
+ M_SLLD_IMM(iptr->sx.val.i & 0x3f, GET_LOW_REG(d),
+ GET_HIGH_REG(d));
}
else {
- M_SLLD_IMM(iptr->sx.val.i & 0x3f, GET_LOW_REG(d), GET_HIGH_REG(d));
+ M_SLLD_IMM(iptr->sx.val.i & 0x3f, GET_LOW_REG(d),
+ GET_HIGH_REG(d));
M_SLL_IMM(iptr->sx.val.i & 0x3f, GET_LOW_REG(d));
}
emit_store_dst(jd, iptr, d);
break;
case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
break;
case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
- /* sx.val.l = constant */
+ /* sx.val.l = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP12_PACKED);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP12_PACKED);
break;
case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
break;
case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
- /* sx.val.l = constant */
+ /* sx.val.l = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP12_PACKED);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP12_PACKED);
break;
case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
- /* sx.val.i = constant */
+ /* sx.val.i = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
break;
case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
- /* sx.val.l = constant */
+ /* sx.val.l = constant */
s1 = emit_load_s1(jd, iptr, REG_ITMP12_PACKED);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP12_PACKED);
emit_store_dst(jd, iptr, d);
break;
- case ICMD_IINC: /* ..., value ==> ..., value + constant */
- /* s1.localindex = variable, sx.val.i = constant */
-
-#if defined(ENABLE_SSA)
- if ( ls != NULL ) {
- varinfo *var_t;
-
-
- var = &(rd->locals[iptr->s1.localindex][TYPE_INT]);
- var_t = &(rd->locals[iptr->val._i.op1_t][TYPE_INT]);
-
- /* set s1 to reg of destination or REG_ITMP1 */
- if (var_t->flags & INMEMORY)
- s1 = REG_ITMP1;
- else
- s1 = var_t->regoff;
-
- /* move source value to s1 */
- if (var->flags & INMEMORY)
- M_ILD( s1, REG_SP, var->regoff * 4);
- else
- M_INTMOVE(var->regoff, s1);
-
- /* `inc reg' is slower on p4's (regarding to ia32
- optimization reference manual and benchmarks) and as
- fast on athlon's. */
-
- M_IADD_IMM(iptr->val._i.i, s1);
-
- if (var_t->flags & INMEMORY)
- M_IST(s1, REG_SP, var_t->regoff * 4);
-
- } else
-#endif /* defined(ENABLE_SSA) */
- {
- var = &(rd->locals[iptr->s1.localindex][TYPE_INT]);
- if (var->flags & INMEMORY) {
- s1 = REG_ITMP1;
- M_ILD(s1, REG_SP, var->regoff * 4);
- }
- else
- s1 = var->regoff;
-
- /* `inc reg' is slower on p4's (regarding to ia32
- optimization reference manual and benchmarks) and as
- fast on athlon's. */
-
- M_IADD_IMM(iptr->sx.val.i, s1);
-
- if (var->flags & INMEMORY)
- M_IST(s1, REG_SP, var->regoff * 4);
- }
- break;
-
/* floating operations ************************************************/
case ICMD_I2F: /* ..., value ==> ..., (float) value */
case ICMD_I2D: /* ..., value ==> ..., (double) value */
+ var = VAROP(iptr->s1);
d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
- if (iptr->s1.var->flags & INMEMORY) {
- emit_fildl_membase(cd, REG_SP, iptr->s1.var->regoff * 4);
+ if (var->flags & INMEMORY) {
+ emit_fildl_membase(cd, REG_SP, var->vv.regoff * 4);
} else {
- disp = dseg_adds4(cd, 0);
+ /* XXX not thread safe! */
+ disp = dseg_add_unique_s4(cd, 0);
emit_mov_imm_reg(cd, 0, REG_ITMP1);
dseg_adddata(cd);
- emit_mov_reg_membase(cd, iptr->s1.var->regoff, REG_ITMP1, disp);
+ emit_mov_reg_membase(cd, var->vv.regoff, REG_ITMP1, disp);
emit_fildl_membase(cd, REG_ITMP1, disp);
}
+
emit_store_dst(jd, iptr, d);
break;
case ICMD_L2F: /* ..., value ==> ..., (float) value */
case ICMD_L2D: /* ..., value ==> ..., (double) value */
+ var = VAROP(iptr->s1);
d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
- if (iptr->s1.var->flags & INMEMORY) {
- emit_fildll_membase(cd, REG_SP, iptr->s1.var->regoff * 4);
+ if (var->flags & INMEMORY) {
+ emit_fildll_membase(cd, REG_SP, var->vv.regoff * 4);
} else {
log_text("L2F: longs have to be in memory");
dseg_adddata(cd);
/* Round to zero, 53-bit mode, exception masked */
- disp = dseg_adds4(cd, 0x0e7f);
+ disp = dseg_add_s4(cd, 0x0e7f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- if (iptr->dst.var->flags & INMEMORY) {
- emit_fistpl_membase(cd, REG_SP, iptr->dst.var->regoff * 4);
+ var = VAROP(iptr->dst);
+ var1 = VAROP(iptr->s1);
+
+ if (var->flags & INMEMORY) {
+ emit_fistpl_membase(cd, REG_SP, var->vv.regoff * 4);
/* Round to nearest, 53-bit mode, exceptions masked */
- disp = dseg_adds4(cd, 0x027f);
+ disp = dseg_add_s4(cd, 0x027f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- emit_alu_imm_membase(cd, ALU_CMP, 0x80000000, REG_SP, iptr->dst.var->regoff * 4);
+ emit_alu_imm_membase(cd, ALU_CMP, 0x80000000,
+ REG_SP, var->vv.regoff * 4);
disp = 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var1->vv.regoff * 4);
disp += 5 + 2 + 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4);
} else {
- disp = dseg_adds4(cd, 0);
+ /* XXX not thread safe! */
+ disp = dseg_add_unique_s4(cd, 0);
emit_fistpl_membase(cd, REG_ITMP1, disp);
- emit_mov_membase_reg(cd, REG_ITMP1, disp, iptr->dst.var->regoff);
+ emit_mov_membase_reg(cd, REG_ITMP1, disp, var->vv.regoff);
/* Round to nearest, 53-bit mode, exceptions masked */
- disp = dseg_adds4(cd, 0x027f);
+ disp = dseg_add_s4(cd, 0x027f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- emit_alu_imm_reg(cd, ALU_CMP, 0x80000000, iptr->dst.var->regoff);
+ emit_alu_imm_reg(cd, ALU_CMP, 0x80000000, var->vv.regoff);
disp = 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4);
- disp += 5 + 2 + ((REG_RESULT == iptr->dst.var->regoff) ? 0 : 2);
+ CALCOFFSETBYTES(disp, REG_SP, var1->vv.regoff * 4);
+ disp += 5 + 2 + ((REG_RESULT == var->vv.regoff) ? 0 : 2);
}
emit_jcc(cd, CC_NE, disp);
/* XXX: change this when we use registers */
- emit_flds_membase(cd, REG_SP, iptr->s1.var->regoff * 4);
+ emit_flds_membase(cd, REG_SP, var1->vv.regoff * 4);
emit_mov_imm_reg(cd, (ptrint) asm_builtin_f2i, REG_ITMP1);
emit_call_reg(cd, REG_ITMP1);
- if (iptr->dst.var->flags & INMEMORY) {
- emit_mov_reg_membase(cd, REG_RESULT, REG_SP, iptr->dst.var->regoff * 4);
+ if (var->flags & INMEMORY) {
+ emit_mov_reg_membase(cd, REG_RESULT, REG_SP, var->vv.regoff * 4);
} else {
- M_INTMOVE(REG_RESULT, iptr->dst.var->regoff);
+ M_INTMOVE(REG_RESULT, var->vv.regoff);
}
break;
dseg_adddata(cd);
/* Round to zero, 53-bit mode, exception masked */
- disp = dseg_adds4(cd, 0x0e7f);
+ disp = dseg_add_s4(cd, 0x0e7f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- if (iptr->dst.var->flags & INMEMORY) {
- emit_fistpl_membase(cd, REG_SP, iptr->dst.var->regoff * 4);
+ var = VAROP(iptr->dst);
+ var1 = VAROP(iptr->s1);
+
+ if (var->flags & INMEMORY) {
+ emit_fistpl_membase(cd, REG_SP, var->vv.regoff * 4);
/* Round to nearest, 53-bit mode, exceptions masked */
- disp = dseg_adds4(cd, 0x027f);
+ disp = dseg_add_s4(cd, 0x027f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- emit_alu_imm_membase(cd, ALU_CMP, 0x80000000, REG_SP, iptr->dst.var->regoff * 4);
+ emit_alu_imm_membase(cd, ALU_CMP, 0x80000000,
+ REG_SP, var->vv.regoff * 4);
disp = 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var1->vv.regoff * 4);
disp += 5 + 2 + 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4);
} else {
- disp = dseg_adds4(cd, 0);
+ /* XXX not thread safe! */
+ disp = dseg_add_unique_s4(cd, 0);
emit_fistpl_membase(cd, REG_ITMP1, disp);
- emit_mov_membase_reg(cd, REG_ITMP1, disp, iptr->dst.var->regoff);
+ emit_mov_membase_reg(cd, REG_ITMP1, disp, var->vv.regoff);
/* Round to nearest, 53-bit mode, exceptions masked */
- disp = dseg_adds4(cd, 0x027f);
+ disp = dseg_add_s4(cd, 0x027f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- emit_alu_imm_reg(cd, ALU_CMP, 0x80000000, iptr->dst.var->regoff);
+ emit_alu_imm_reg(cd, ALU_CMP, 0x80000000, var->vv.regoff);
disp = 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4);
- disp += 5 + 2 + ((REG_RESULT == iptr->dst.var->regoff) ? 0 : 2);
+ CALCOFFSETBYTES(disp, REG_SP, var1->vv.regoff * 4);
+ disp += 5 + 2 + ((REG_RESULT == var->vv.regoff) ? 0 : 2);
}
emit_jcc(cd, CC_NE, disp);
/* XXX: change this when we use registers */
- emit_fldl_membase(cd, REG_SP, iptr->s1.var->regoff * 4);
+ emit_fldl_membase(cd, REG_SP, var1->vv.regoff * 4);
emit_mov_imm_reg(cd, (ptrint) asm_builtin_d2i, REG_ITMP1);
emit_call_reg(cd, REG_ITMP1);
- if (iptr->dst.var->flags & INMEMORY) {
- emit_mov_reg_membase(cd, REG_RESULT, REG_SP, iptr->dst.var->regoff * 4);
+ if (var->flags & INMEMORY) {
+ emit_mov_reg_membase(cd, REG_RESULT, REG_SP, var->vv.regoff * 4);
} else {
- M_INTMOVE(REG_RESULT, iptr->dst.var->regoff);
+ M_INTMOVE(REG_RESULT, var->vv.regoff);
}
break;
dseg_adddata(cd);
/* Round to zero, 53-bit mode, exception masked */
- disp = dseg_adds4(cd, 0x0e7f);
+ disp = dseg_add_s4(cd, 0x0e7f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- if (iptr->dst.var->flags & INMEMORY) {
- emit_fistpll_membase(cd, REG_SP, iptr->dst.var->regoff * 4);
+ var = VAROP(iptr->dst);
+ var1 = VAROP(iptr->s1);
+
+ if (var->flags & INMEMORY) {
+ emit_fistpll_membase(cd, REG_SP, var->vv.regoff * 4);
/* Round to nearest, 53-bit mode, exceptions masked */
- disp = dseg_adds4(cd, 0x027f);
+ disp = dseg_add_s4(cd, 0x027f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- emit_alu_imm_membase(cd, ALU_CMP, 0x80000000, REG_SP, iptr->dst.var->regoff * 4 + 4);
+ emit_alu_imm_membase(cd, ALU_CMP, 0x80000000,
+ REG_SP, var->vv.regoff * 4 + 4);
disp = 6 + 4;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4);
disp += 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var1->vv.regoff * 4);
disp += 5 + 2;
disp += 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4);
disp += 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4 + 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4 + 4);
emit_jcc(cd, CC_NE, disp);
- emit_alu_imm_membase(cd, ALU_CMP, 0, REG_SP, iptr->dst.var->regoff * 4);
+ emit_alu_imm_membase(cd, ALU_CMP, 0,
+ REG_SP, var->vv.regoff * 4);
disp = 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var1->vv.regoff * 4);
disp += 5 + 2 + 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4);
emit_jcc(cd, CC_NE, disp);
/* XXX: change this when we use registers */
- emit_flds_membase(cd, REG_SP, iptr->s1.var->regoff * 4);
+ emit_flds_membase(cd, REG_SP, var1->vv.regoff * 4);
emit_mov_imm_reg(cd, (ptrint) asm_builtin_f2l, REG_ITMP1);
emit_call_reg(cd, REG_ITMP1);
- emit_mov_reg_membase(cd, REG_RESULT, REG_SP, iptr->dst.var->regoff * 4);
- emit_mov_reg_membase(cd, REG_RESULT2, REG_SP, iptr->dst.var->regoff * 4 + 4);
+ emit_mov_reg_membase(cd, REG_RESULT, REG_SP, var->vv.regoff * 4);
+ emit_mov_reg_membase(cd, REG_RESULT2,
+ REG_SP, var->vv.regoff * 4 + 4);
} else {
log_text("F2L: longs have to be in memory");
dseg_adddata(cd);
/* Round to zero, 53-bit mode, exception masked */
- disp = dseg_adds4(cd, 0x0e7f);
+ disp = dseg_add_s4(cd, 0x0e7f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- if (iptr->dst.var->flags & INMEMORY) {
- emit_fistpll_membase(cd, REG_SP, iptr->dst.var->regoff * 4);
+ var = VAROP(iptr->dst);
+ var1 = VAROP(iptr->s1);
+
+ if (var->flags & INMEMORY) {
+ emit_fistpll_membase(cd, REG_SP, var->vv.regoff * 4);
/* Round to nearest, 53-bit mode, exceptions masked */
- disp = dseg_adds4(cd, 0x027f);
+ disp = dseg_add_s4(cd, 0x027f);
emit_fldcw_membase(cd, REG_ITMP1, disp);
- emit_alu_imm_membase(cd, ALU_CMP, 0x80000000, REG_SP, iptr->dst.var->regoff * 4 + 4);
+ emit_alu_imm_membase(cd, ALU_CMP, 0x80000000,
+ REG_SP, var->vv.regoff * 4 + 4);
disp = 6 + 4;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4);
disp += 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var1->vv.regoff * 4);
disp += 5 + 2;
disp += 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4);
disp += 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4 + 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4 + 4);
emit_jcc(cd, CC_NE, disp);
- emit_alu_imm_membase(cd, ALU_CMP, 0, REG_SP, iptr->dst.var->regoff * 4);
+ emit_alu_imm_membase(cd, ALU_CMP, 0, REG_SP, var->vv.regoff * 4);
disp = 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->s1.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var1->vv.regoff * 4);
disp += 5 + 2 + 3;
- CALCOFFSETBYTES(disp, REG_SP, iptr->dst.var->regoff * 4);
+ CALCOFFSETBYTES(disp, REG_SP, var->vv.regoff * 4);
emit_jcc(cd, CC_NE, disp);
/* XXX: change this when we use registers */
- emit_fldl_membase(cd, REG_SP, iptr->s1.var->regoff * 4);
+ emit_fldl_membase(cd, REG_SP, var1->vv.regoff * 4);
emit_mov_imm_reg(cd, (ptrint) asm_builtin_d2l, REG_ITMP1);
emit_call_reg(cd, REG_ITMP1);
- emit_mov_reg_membase(cd, REG_RESULT, REG_SP, iptr->dst.var->regoff * 4);
- emit_mov_reg_membase(cd, REG_RESULT2, REG_SP, iptr->dst.var->regoff * 4 + 4);
+ emit_mov_reg_membase(cd, REG_RESULT, REG_SP, var->vv.regoff * 4);
+ emit_mov_reg_membase(cd, REG_RESULT2,
+ REG_SP, var->vv.regoff * 4 + 4);
} else {
log_text("D2L: longs have to be in memory");
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
- gen_nullptr_check(s1);
+ emit_nullpointer_check(cd, iptr, s1);
M_ILD(d, s1, OFFSET(java_arrayheader, size));
emit_store_dst(jd, iptr, d);
break;
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_movsbl_memindex_reg(cd, OFFSET(java_bytearray, data[0]), s1, s2, 0, d);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_movsbl_memindex_reg(cd, OFFSET(java_bytearray, data[0]),
+ s1, s2, 0, d);
emit_store_dst(jd, iptr, d);
break;
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_movzwl_memindex_reg(cd, OFFSET(java_chararray, data[0]), s1, s2, 1, d);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_movzwl_memindex_reg(cd, OFFSET(java_chararray, data[0]),
+ s1, s2, 1, d);
emit_store_dst(jd, iptr, d);
break;
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_movswl_memindex_reg(cd, OFFSET(java_shortarray, data[0]), s1, s2, 1, d);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_movswl_memindex_reg(cd, OFFSET(java_shortarray, data[0]),
+ s1, s2, 1, d);
emit_store_dst(jd, iptr, d);
break;
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_mov_memindex_reg(cd, OFFSET(java_intarray, data[0]), s1, s2, 2, d);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_mov_memindex_reg(cd, OFFSET(java_intarray, data[0]),
+ s1, s2, 2, d);
emit_store_dst(jd, iptr, d);
break;
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- assert(iptr->dst.var->flags & INMEMORY);
- emit_mov_memindex_reg(cd, OFFSET(java_longarray, data[0]), s1, s2, 3, REG_ITMP3);
- emit_mov_reg_membase(cd, REG_ITMP3, REG_SP, iptr->dst.var->regoff * 4);
- emit_mov_memindex_reg(cd, OFFSET(java_longarray, data[0]) + 4, s1, s2, 3, REG_ITMP3);
- emit_mov_reg_membase(cd, REG_ITMP3, REG_SP, iptr->dst.var->regoff * 4 + 4);
+ emit_array_checks(cd, iptr, s1, s2);
+
+ var = VAROP(iptr->dst);
+
+ assert(var->flags & INMEMORY);
+ emit_mov_memindex_reg(cd, OFFSET(java_longarray, data[0]),
+ s1, s2, 3, REG_ITMP3);
+ emit_mov_reg_membase(cd, REG_ITMP3, REG_SP, var->vv.regoff * 4);
+ emit_mov_memindex_reg(cd, OFFSET(java_longarray, data[0]) + 4,
+ s1, s2, 3, REG_ITMP3);
+ emit_mov_reg_membase(cd, REG_ITMP3, REG_SP, var->vv.regoff * 4 + 4);
break;
case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
+ emit_array_checks(cd, iptr, s1, s2);
emit_flds_memindex(cd, OFFSET(java_floatarray, data[0]), s1, s2, 2);
emit_store_dst(jd, iptr, d);
break;
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_fldl_memindex(cd, OFFSET(java_doublearray, data[0]), s1, s2, 3);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_fldl_memindex(cd, OFFSET(java_doublearray, data[0]), s1, s2,3);
emit_store_dst(jd, iptr, d);
break;
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_mov_memindex_reg(cd, OFFSET(java_objectarray, data[0]), s1, s2, 2, d);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_mov_memindex_reg(cd, OFFSET(java_objectarray, data[0]),
+ s1, s2, 2, d);
emit_store_dst(jd, iptr, d);
break;
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
+ emit_array_checks(cd, iptr, s1, s2);
s3 = emit_load_s3(jd, iptr, REG_ITMP3);
- if (s3 >= EBP) { /* because EBP, ESI, EDI have no xH and xL nibbles */
+ if (s3 >= EBP) {
+ /* because EBP, ESI, EDI have no xH and xL nibbles */
M_INTMOVE(s3, REG_ITMP3);
s3 = REG_ITMP3;
}
- emit_movb_reg_memindex(cd, s3, OFFSET(java_bytearray, data[0]), s1, s2, 0);
+ emit_movb_reg_memindex(cd, s3, OFFSET(java_bytearray, data[0]),
+ s1, s2, 0);
break;
case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
+ emit_array_checks(cd, iptr, s1, s2);
s3 = emit_load_s3(jd, iptr, REG_ITMP3);
- emit_movw_reg_memindex(cd, s3, OFFSET(java_chararray, data[0]), s1, s2, 1);
+ emit_movw_reg_memindex(cd, s3, OFFSET(java_chararray, data[0]),
+ s1, s2, 1);
break;
case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
+ emit_array_checks(cd, iptr, s1, s2);
s3 = emit_load_s3(jd, iptr, REG_ITMP3);
- emit_movw_reg_memindex(cd, s3, OFFSET(java_shortarray, data[0]), s1, s2, 1);
+ emit_movw_reg_memindex(cd, s3, OFFSET(java_shortarray, data[0]),
+ s1, s2, 1);
break;
case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
+ emit_array_checks(cd, iptr, s1, s2);
s3 = emit_load_s3(jd, iptr, REG_ITMP3);
- emit_mov_reg_memindex(cd, s3, OFFSET(java_intarray, data[0]), s1, s2, 2);
+ emit_mov_reg_memindex(cd, s3, OFFSET(java_intarray, data[0]),
+ s1, s2, 2);
break;
case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- assert(iptr->sx.s23.s3.var->flags & INMEMORY);
- emit_mov_membase_reg(cd, REG_SP, iptr->sx.s23.s3.var->regoff * 4, REG_ITMP3);
- emit_mov_reg_memindex(cd, REG_ITMP3, OFFSET(java_longarray, data[0]), s1, s2, 3);
- emit_mov_membase_reg(cd, REG_SP, iptr->sx.s23.s3.var->regoff * 4 + 4, REG_ITMP3);
- emit_mov_reg_memindex(cd, REG_ITMP3, OFFSET(java_longarray, data[0]) + 4, s1, s2, 3);
+ emit_array_checks(cd, iptr, s1, s2);
+
+ var = VAROP(iptr->sx.s23.s3);
+
+ assert(var->flags & INMEMORY);
+ emit_mov_membase_reg(cd, REG_SP, var->vv.regoff * 4, REG_ITMP3);
+ emit_mov_reg_memindex(cd, REG_ITMP3, OFFSET(java_longarray, data[0])
+ , s1, s2, 3);
+ emit_mov_membase_reg(cd, REG_SP, var->vv.regoff * 4 + 4, REG_ITMP3);
+ emit_mov_reg_memindex(cd, REG_ITMP3,
+ OFFSET(java_longarray, data[0]) + 4, s1, s2, 3);
break;
case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
+ emit_array_checks(cd, iptr, s1, s2);
s3 = emit_load_s3(jd, iptr, REG_FTMP1);
- emit_fstps_memindex(cd, OFFSET(java_floatarray, data[0]), s1, s2, 2);
+ emit_fstps_memindex(cd, OFFSET(java_floatarray, data[0]), s1, s2,2);
break;
case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
+ emit_array_checks(cd, iptr, s1, s2);
s3 = emit_load_s3(jd, iptr, REG_FTMP1);
- emit_fstpl_memindex(cd, OFFSET(java_doublearray, data[0]), s1, s2, 3);
+ emit_fstpl_memindex(cd, OFFSET(java_doublearray, data[0]),
+ s1, s2, 3);
break;
case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
+ emit_array_checks(cd, iptr, s1, s2);
s3 = emit_load_s3(jd, iptr, REG_ITMP3);
M_AST(s1, REG_SP, 0 * 4);
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
s3 = emit_load_s3(jd, iptr, REG_ITMP3);
- emit_mov_reg_memindex(cd, s3, OFFSET(java_objectarray, data[0]), s1, s2, 2);
+ emit_mov_reg_memindex(cd, s3, OFFSET(java_objectarray, data[0]),
+ s1, s2, 2);
break;
case ICMD_BASTORECONST: /* ..., arrayref, index ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_movb_imm_memindex(cd, iptr->sx.s23.s3.constval, OFFSET(java_bytearray, data[0]), s1, s2, 0);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_movb_imm_memindex(cd, iptr->sx.s23.s3.constval,
+ OFFSET(java_bytearray, data[0]), s1, s2, 0);
break;
case ICMD_CASTORECONST: /* ..., arrayref, index ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_movw_imm_memindex(cd, iptr->sx.s23.s3.constval, OFFSET(java_chararray, data[0]), s1, s2, 1);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_movw_imm_memindex(cd, iptr->sx.s23.s3.constval,
+ OFFSET(java_chararray, data[0]), s1, s2, 1);
break;
case ICMD_SASTORECONST: /* ..., arrayref, index ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_movw_imm_memindex(cd, iptr->sx.s23.s3.constval, OFFSET(java_shortarray, data[0]), s1, s2, 1);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_movw_imm_memindex(cd, iptr->sx.s23.s3.constval,
+ OFFSET(java_shortarray, data[0]), s1, s2, 1);
break;
case ICMD_IASTORECONST: /* ..., arrayref, index ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_mov_imm_memindex(cd, iptr->sx.s23.s3.constval, OFFSET(java_intarray, data[0]), s1, s2, 2);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_mov_imm_memindex(cd, iptr->sx.s23.s3.constval,
+ OFFSET(java_intarray, data[0]), s1, s2, 2);
break;
case ICMD_LASTORECONST: /* ..., arrayref, index ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_mov_imm_memindex(cd, (u4) (iptr->sx.s23.s3.constval & 0x00000000ffffffff), OFFSET(java_longarray, data[0]), s1, s2, 3);
- emit_mov_imm_memindex(cd, ((s4)iptr->sx.s23.s3.constval) >> 31, OFFSET(java_longarray, data[0]) + 4, s1, s2, 3);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_mov_imm_memindex(cd,
+ (u4) (iptr->sx.s23.s3.constval & 0x00000000ffffffff),
+ OFFSET(java_longarray, data[0]), s1, s2, 3);
+ emit_mov_imm_memindex(cd,
+ ((s4)iptr->sx.s23.s3.constval) >> 31,
+ OFFSET(java_longarray, data[0]) + 4, s1, s2, 3);
break;
case ICMD_AASTORECONST: /* ..., arrayref, index ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
s2 = emit_load_s2(jd, iptr, REG_ITMP2);
- if (INSTRUCTION_MUST_CHECK(iptr)) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- emit_mov_imm_memindex(cd, 0, OFFSET(java_objectarray, data[0]), s1, s2, 2);
+ emit_array_checks(cd, iptr, s1, s2);
+ emit_mov_imm_memindex(cd, 0,
+ OFFSET(java_objectarray, data[0]), s1, s2, 2);
break;
case ICMD_GETFIELD: /* .., objectref. ==> ..., value */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
- gen_nullptr_check(s1);
+ emit_nullpointer_check(cd, iptr, s1);
if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
unresolved_field *uf = iptr->sx.s23.s3.uf;
case ICMD_PUTFIELD: /* ..., objectref, value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
- gen_nullptr_check(s1);
+ emit_nullpointer_check(cd, iptr, s1);
/* must be done here because of code patching */
/* following NOP) */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
- gen_nullptr_check(s1);
+ emit_nullpointer_check(cd, iptr, s1);
if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
unresolved_field *uf = iptr->sx.s23.s3.uf;
M_JMP(REG_ITMP3);
break;
- case ICMD_INLINE_GOTO:
-#if 0
- M_COPY(src, iptr->dst.var);
-#endif
- /* FALLTHROUGH! */
-
case ICMD_GOTO: /* ... ==> ... */
+ case ICMD_RET: /* ... ==> ... */
#if defined(ENABLE_SSA)
if ( ls != NULL ) {
last_cmd_was_goto = true;
/* In case of a Goto phimoves have to be inserted before the */
/* jump */
- codegen_insert_phi_moves(cd, rd, ls, bptr);
+ codegen_insert_phi_moves(jd, bptr);
}
#endif
M_JMP_IMM(0);
case ICMD_JSR: /* ... ==> ... */
- M_CALL_IMM(0);
+ M_JMP_IMM(0);
codegen_addreference(cd, iptr->sx.s23.s3.jsrtarget.block);
break;
- case ICMD_RET: /* ... ==> ... */
- /* s1.localindex = local variable */
-
- var = &(rd->locals[iptr->s1.localindex][TYPE_ADR]);
- if (var->flags & INMEMORY) {
- M_ALD(REG_ITMP1, REG_SP, var->regoff * 4);
- M_JMP(REG_ITMP1);
- }
- else
- M_JMP(var->regoff);
- break;
-
case ICMD_IFNULL: /* ..., value ==> ... */
s1 = emit_load_s1(jd, iptr, REG_ITMP1);
case ICMD_IRETURN: /* ..., retvalue ==> ... */
+ REPLACEMENT_POINT_RETURN(cd, iptr);
s1 = emit_load_s1(jd, iptr, REG_RESULT);
M_INTMOVE(s1, REG_RESULT);
goto nowperformreturn;
case ICMD_LRETURN: /* ..., retvalue ==> ... */
+ REPLACEMENT_POINT_RETURN(cd, iptr);
s1 = emit_load_s1(jd, iptr, REG_RESULT_PACKED);
M_LNGMOVE(s1, REG_RESULT_PACKED);
goto nowperformreturn;
case ICMD_ARETURN: /* ..., retvalue ==> ... */
+ REPLACEMENT_POINT_RETURN(cd, iptr);
s1 = emit_load_s1(jd, iptr, REG_RESULT);
M_INTMOVE(s1, REG_RESULT);
case ICMD_FRETURN: /* ..., retvalue ==> ... */
case ICMD_DRETURN:
+ REPLACEMENT_POINT_RETURN(cd, iptr);
s1 = emit_load_s1(jd, iptr, REG_FRESULT);
goto nowperformreturn;
case ICMD_RETURN: /* ... ==> ... */
+ REPLACEMENT_POINT_RETURN(cd, iptr);
+
nowperformreturn:
{
s4 i, p;
p = cd->stackframesize;
#if !defined(NDEBUG)
- if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
- emit_verbosecall_exit(jd);
+ emit_verbosecall_exit(jd);
#endif
#if defined(ENABLE_THREADS)
table += i;
while (--i >= 0) {
- dseg_addtarget(cd, table->block);
+ dseg_add_target(cd, table->block);
--table;
}
case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
case ICMD_INVOKEINTERFACE:
+ REPLACEMENT_POINT_INVOKE(cd, iptr);
+
if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
md = iptr->sx.s23.s3.um->methodref->parseddesc.md;
lm = NULL;
/* copy arguments to registers or stack location */
for (s3 = s3 - 1; s3 >= 0; s3--) {
- src = iptr->sx.s23.s2.args[s3];
-
- if (src->varkind == ARGVAR)
+ var = VAR(iptr->sx.s23.s2.args[s3]);
+
+ /* Already Preallocated (ARGVAR) ? */
+ if (var->flags & PREALLOC)
continue;
- if (IS_INT_LNG_TYPE(src->type)) {
+ if (IS_INT_LNG_TYPE(var->type)) {
if (!md->params[s3].inmemory) {
log_text("No integer argument registers available!");
assert(0);
} else {
- if (IS_2_WORD_TYPE(src->type)) {
- d = emit_load(jd, iptr, src, REG_ITMP12_PACKED);
+ if (IS_2_WORD_TYPE(var->type)) {
+ d = emit_load(jd, iptr, var, REG_ITMP12_PACKED);
M_LST(d, REG_SP, md->params[s3].regoff * 4);
} else {
- d = emit_load(jd, iptr, src, REG_ITMP1);
+ d = emit_load(jd, iptr, var, REG_ITMP1);
M_IST(d, REG_SP, md->params[s3].regoff * 4);
}
}
} else {
if (!md->params[s3].inmemory) {
s1 = rd->argfltregs[md->params[s3].regoff];
- d = emit_load(jd, iptr, src, s1);
+ d = emit_load(jd, iptr, var, s1);
M_FLTMOVE(d, s1);
} else {
- d = emit_load(jd, iptr, src, REG_FTMP1);
- if (IS_2_WORD_TYPE(src->type))
+ d = emit_load(jd, iptr, var, REG_FTMP1);
+ if (IS_2_WORD_TYPE(var->type))
M_DST(d, REG_SP, md->params[s3].regoff * 4);
else
M_FST(d, REG_SP, md->params[s3].regoff * 4);
case ICMD_INVOKEVIRTUAL:
M_ALD(REG_ITMP1, REG_SP, 0 * 4);
- gen_nullptr_check(REG_ITMP1);
+ emit_nullpointer_check(cd, iptr, s1);
if (lm == NULL) {
unresolved_method *um = iptr->sx.s23.s3.um;
case ICMD_INVOKEINTERFACE:
M_ALD(REG_ITMP1, REG_SP, 0 * 4);
- gen_nullptr_check(REG_ITMP1);
+ emit_nullpointer_check(cd, iptr, s1);
if (lm == NULL) {
unresolved_method *um = iptr->sx.s23.s3.um;
break;
}
+ /* store size of call code in replacement point */
+
+ REPLACEMENT_POINT_INVOKE_RETURN(cd, iptr);
+
/* d contains return type */
if (d != TYPE_VOID) {
#if defined(ENABLE_SSA)
- if ((ls == NULL) || (iptr->dst->varkind != TEMPVAR) ||
- (ls->lifetime[-iptr->dst->varnum-1].type != -1))
+ if ((ls == NULL) || (!IS_TEMPVAR_INDEX(iptr->dst.varindex)) ||
+ (ls->lifetime[-iptr->dst.varindex-1].type != -1))
/* a "living" stackslot */
#endif
{
for (s1 = iptr->s1.argcount; --s1 >= 0; ) {
/* copy SAVEDVAR sizes to stack */
- src = iptr->sx.s23.s2.args[s1];
+ var = VAR(iptr->sx.s23.s2.args[s1]);
- if (src->varkind != ARGVAR) {
- if (src->flags & INMEMORY) {
- M_ILD(REG_ITMP1, REG_SP, src->regoff * 4);
+ /* Already Preallocated? */
+ if (!(var->flags & PREALLOC)) {
+ if (var->flags & INMEMORY) {
+ M_ILD(REG_ITMP1, REG_SP, var->vv.regoff * 4);
M_IST(REG_ITMP1, REG_SP, (s1 + 3) * 4);
}
else
- M_IST(src->regoff, REG_SP, (s1 + 3) * 4);
+ M_IST(var->vv.regoff, REG_SP, (s1 + 3) * 4);
}
}
} /* for instruction */
- /* copy values to interface registers */
+ MCODECHECK(64);
- len = bptr->outdepth;
- MCODECHECK(64+len);
#if defined(ENABLE_LSRA) && !defined(ENABLE_SSA)
if (!opt_lsra)
#endif
/* by edge splitting, in Blocks with phi moves there can only */
/* be a goto as last command, no other Jump/Branch Command */
if (!last_cmd_was_goto)
- codegen_insert_phi_moves(cd, rd, ls, bptr);
- }
- #if !defined(NEW_VAR)
- else
- #endif
-#endif
-#if !defined(NEW_VAR)
- while (len) {
- len--;
- src = bptr->outvars[len];
- if ((src->varkind != STACKVAR)) {
- s2 = src->type;
- if (IS_FLT_DBL_TYPE(s2)) {
- s1 = emit_load(jd, iptr, src, REG_FTMP1);
- if (!(rd->interfaces[len][s2].flags & INMEMORY))
- M_FLTMOVE(s1, rd->interfaces[len][s2].regoff);
- else
- M_DST(s1, REG_SP, rd->interfaces[len][s2].regoff * 4);
-
- } else {
- if (IS_2_WORD_TYPE(s2))
- assert(0);
-/* s1 = emit_load(jd, iptr, src,
- PACK_REGS(REG_ITMP1, REG_ITMP2)); */
- else
- s1 = emit_load(jd, iptr, src, REG_ITMP1);
-
- if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
- if (IS_2_WORD_TYPE(s2))
- M_LNGMOVE(s1, rd->interfaces[len][s2].regoff);
- else
- M_INTMOVE(s1, rd->interfaces[len][s2].regoff);
-
- } else {
- if (IS_2_WORD_TYPE(s2))
- M_LST(s1, REG_SP, rd->interfaces[len][s2].regoff * 4);
- else
- M_IST(s1, REG_SP, rd->interfaces[len][s2].regoff * 4);
- }
- }
- }
- src = src->prev;
+ codegen_insert_phi_moves(jd, bptr);
}
+
#endif
/* At the end of a basic block we may have to append some nops,
emit_exception_stubs(jd);
emit_patcher_stubs(jd);
-#if 0
- emit_replacement_stubs(jd);
-#endif
+ REPLACEMENT_EMIT_STUBS(jd);
codegen_finish(jd);
}
#if defined(ENABLE_SSA)
-void codegen_insert_phi_moves(codegendata *cd, registerdata *rd, lsradata *ls, basicblock *bptr) {
+void codegen_insert_phi_moves(jitdata *jd, basicblock *bptr) {
/* look for phi moves */
int t_a,s_a,i, type;
int t_lt, s_lt; /* lifetime indices of phi_moves */
- bool t_inmemory, s_inmemory;
s4 t_regoff, s_regoff, s_flags, t_flags;
+ codegendata *cd;
+ lsradata *ls;
+
MCODECHECK(512);
+
+ ls = jd->ls;
+ cd = jd->cd;
/* Moves from phi functions with highest indices have to be */
/* inserted first, since this is the order as is used for */
s_a = ls->phi_moves[bptr->nr][i][1];
#if defined(SSA_DEBUG_VERBOSE)
if (compileverbose)
- printf("BB %3i Move %3i <- %3i ",bptr->nr,t_a,s_a);
+ printf("BB %3i Move %3i <- %3i ", bptr->nr, t_a, s_a);
#endif
if (t_a >= 0) {
/* local var lifetimes */
t_lt = ls->maxlifetimes + t_a;
type = ls->lifetime[t_lt].type;
- } else {
+ }
+ else {
t_lt = -t_a-1;
type = ls->lifetime[t_lt].local_ss->s->type;
/* stackslot lifetime */
}
+
if (type == -1) {
#if defined(SSA_DEBUG_VERBOSE)
- if (compileverbose)
- printf("...returning - phi lifetimes where joined\n");
+ if (compileverbose)
+ printf("...returning - phi lifetimes where joined\n");
#endif
return;
}
+
if (s_a >= 0) {
/* local var lifetimes */
s_lt = ls->maxlifetimes + s_a;
type = ls->lifetime[s_lt].type;
- } else {
+ }
+ else {
s_lt = -s_a-1;
type = ls->lifetime[s_lt].type;
/* stackslot lifetime */
}
+
if (type == -1) {
#if defined(SSA_DEBUG_VERBOSE)
- if (compileverbose)
- printf("...returning - phi lifetimes where joined\n");
+ if (compileverbose)
+ printf("...returning - phi lifetimes where joined\n");
#endif
return;
}
- if (t_a >= 0) {
- t_inmemory = rd->locals[t_a][type].flags & INMEMORY;
- t_flags = rd->locals[t_a][type].flags;
- t_regoff = rd->locals[t_a][type].regoff;
+ if (t_a >= 0) {
+ t_flags = VAR(t_a)->flags;
+ t_regoff = VAR(t_a)->vv.regoff;
- } else {
- t_inmemory = ls->lifetime[t_lt].local_ss->s->flags & INMEMORY;
+ }
+ else {
t_flags = ls->lifetime[t_lt].local_ss->s->flags;
t_regoff = ls->lifetime[t_lt].local_ss->s->regoff;
}
if (s_a >= 0) {
/* local var move */
-
- s_inmemory = rd->locals[s_a][type].flags & INMEMORY;
- s_flags = rd->locals[s_a][type].flags;
- s_regoff = rd->locals[s_a][type].regoff;
+ s_flags = VAR(s_a)->flags;
+ s_regoff = VAR(s_a)->vv.regoff;
} else {
/* stackslot lifetime */
- s_inmemory = ls->lifetime[s_lt].local_ss->s->flags & INMEMORY;
s_flags = ls->lifetime[s_lt].local_ss->s->flags;
s_regoff = ls->lifetime[s_lt].local_ss->s->regoff;
}
+
if (type == -1) {
#if defined(SSA_DEBUG_VERBOSE)
- if (compileverbose)
- printf("...returning - phi lifetimes where joined\n");
+ if (compileverbose)
+ printf("...returning - phi lifetimes where joined\n");
#endif
return;
}
#if defined(SSA_DEBUG_VERBOSE)
if (compileverbose) {
- if ((t_inmemory) && (s_inmemory)) {
+ if (IS_INMEMORY(t_flags) && IS_INMEMORY(s_flags)) {
/* mem -> mem */
printf("M%3i <- M%3i",t_regoff,s_regoff);
- } else if (s_inmemory) {
+ }
+ else if (IS_INMEMORY(s_flags)) {
/* mem -> reg */
printf("R%3i <- M%3i",t_regoff,s_regoff);
- } else if (t_inmemory) {
+ }
+ else if (IS_INMEMORY(t_flags)) {
/* reg -> mem */
printf("M%3i <- R%3i",t_regoff,s_regoff);
- } else {
+ }
+ else {
/* reg -> reg */
printf("R%3i <- R%3i",t_regoff,s_regoff);
}
void cg_move(codegendata *cd, s4 type, s4 src_regoff, s4 src_flags,
s4 dst_regoff, s4 dst_flags) {
- if ((dst_flags & INMEMORY) && (src_flags & INMEMORY)) {
+ if ((IS_INMEMORY(dst_flags)) && (IS_INMEMORY(src_flags))) {
/* mem -> mem */
if (dst_regoff != src_regoff) {
if (!IS_2_WORD_TYPE(type)) {
log_text("cg_move: longs have to be in memory\n");
/* assert(0); */
}
- if (src_flags & INMEMORY) {
+ if (IS_INMEMORY(src_flags)) {
/* mem -> reg */
emit_mov_membase_reg(cd, REG_SP, src_regoff * 4, dst_regoff);
- } else if (dst_flags & INMEMORY) {
+ } else if (IS_INMEMORY(dst_flags)) {
/* reg -> mem */
emit_mov_reg_membase(cd, src_regoff, REG_SP, dst_regoff * 4);
} else {
{
u1 *s; /* memory to hold the stub */
ptrint *d;
- codeinfo *code;
codegendata *cd;
s4 dumpsize;
cd = DNEW(codegendata);
cd->mcodeptr = s;
- /* Store the codeinfo pointer in the same place as in the
- methodheader for compiled methods. */
-
- code = code_codeinfo_new(m);
+ /* The codeinfo pointer is actually a pointer to the
+ methodinfo. This fakes a codeinfo structure. */
d[0] = (ptrint) asm_call_jit_compiler;
d[1] = (ptrint) m;
- d[2] = (ptrint) code;
+ d[2] = (ptrint) &d[1]; /* fake code->m */
/* code for the stub */
s4 nativeparams;
s4 i, j; /* count variables */
s4 t;
- s4 s1, s2, disp;
+ s4 s1, s2;
/* get required compiler data */
/* create method header */
- (void) dseg_addaddress(cd, code); /* CodeinfoPointer */
- (void) dseg_adds4(cd, cd->stackframesize * 4); /* FrameSize */
- (void) dseg_adds4(cd, 0); /* IsSync */
- (void) dseg_adds4(cd, 0); /* IsLeaf */
- (void) dseg_adds4(cd, 0); /* IntSave */
- (void) dseg_adds4(cd, 0); /* FltSave */
+ (void) dseg_add_unique_address(cd, code); /* CodeinfoPointer */
+ (void) dseg_add_unique_s4(cd, cd->stackframesize * 4); /* FrameSize */
+ (void) dseg_add_unique_s4(cd, 0); /* IsSync */
+ (void) dseg_add_unique_s4(cd, 0); /* IsLeaf */
+ (void) dseg_add_unique_s4(cd, 0); /* IntSave */
+ (void) dseg_add_unique_s4(cd, 0); /* FltSave */
(void) dseg_addlinenumbertablesize(cd);
- (void) dseg_adds4(cd, 0); /* ExTableSize */
+ (void) dseg_add_unique_s4(cd, 0); /* ExTableSize */
/* generate native method profiling code */
M_ASUB_IMM(cd->stackframesize * 4, REG_SP);
#if !defined(NDEBUG)
- if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
- emit_verbosecall_enter(jd);
+ emit_verbosecall_enter(jd);
#endif
/* get function address (this must happen before the stackframeinfo) */
}
#if !defined(NDEBUG)
- if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
- emit_verbosecall_exit(jd);
+ emit_verbosecall_exit(jd);
#endif
/* remove native stackframe info */