Changes: Christian Ullrich
Edwin Steiner
- $Id: codegen.c 4644 2006-03-16 18:44:46Z edwin $
+ $Id: codegen.c 4943 2006-05-23 08:51:33Z twisti $
*/
#include "vm/jit/x86_64/arch.h"
#include "vm/jit/x86_64/codegen.h"
-#include "vm/jit/x86_64/emitfuncs.h"
+#include "vm/jit/x86_64/md-emit.h"
+#include "mm/memory.h"
+#include "native/jni.h"
#include "native/native.h"
#include "vm/builtin.h"
#include "vm/exceptions.h"
#include "vm/jit/asmpart.h"
#include "vm/jit/codegen-common.h"
#include "vm/jit/dseg.h"
+#include "vm/jit/emit.h"
#include "vm/jit/jit.h"
#include "vm/jit/methodheader.h"
#include "vm/jit/parse.h"
#endif
-
-
/* codegen *********************************************************************
Generates machine code.
*******************************************************************************/
-bool codegen(methodinfo *m, codegendata *cd, registerdata *rd)
+bool codegen(jitdata *jd)
{
+ methodinfo *m;
+ codegendata *cd;
+ registerdata *rd;
s4 len, s1, s2, s3, d, disp;
u2 currentline;
ptrint a;
- s4 parentargs_base;
+ s4 stackframesize;
stackptr src;
varinfo *var;
basicblock *bptr;
methoddesc *md;
rplpoint *replacementpoint;
+ /* get required compiler data */
+
+ m = jd->m;
+ cd = jd->cd;
+ rd = jd->rd;
+
/* prevent compiler warnings */
d = 0;
savedregs_num += (INT_SAV_CNT - rd->savintreguse);
savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
- parentargs_base = rd->memuse + savedregs_num;
+ stackframesize = rd->memuse + savedregs_num;
-#if defined(USE_THREADS)
+#if defined(ENABLE_THREADS)
/* space to save argument of monitor_enter */
if (checksync && (m->flags & ACC_SYNCHRONIZED))
- parentargs_base++;
+ stackframesize++;
#endif
/* Keep stack of non-leaf functions 16-byte aligned for calls into native */
/* code e.g. libc or jni (alignment problems with movaps). */
if (!m->isleafmethod || opt_verbosecall)
- parentargs_base |= 0x1;
+ stackframesize |= 0x1;
/* create method header */
(void) dseg_addaddress(cd, m); /* MethodPointer */
- (void) dseg_adds4(cd, parentargs_base * 8); /* FrameSize */
+ (void) dseg_adds4(cd, stackframesize * 8); /* FrameSize */
-#if defined(USE_THREADS)
+#if defined(ENABLE_THREADS)
/* IsSync contains the offset relative to the stack pointer for the
argument of monitor_exit used in the exception handler. Since the
offset could be zero and give a wrong meaning of the flag it is
(void) dseg_addaddress(cd, ex->catchtype.cls);
}
- /* initialize mcode variables */
-
- cd->mcodeptr = (u1 *) cd->mcodebase;
- cd->mcodeend = (s4 *) (cd->mcodebase + cd->mcodesize);
-
- /* initialize the last patcher pointer */
-
- cd->lastmcodeptr = cd->mcodeptr;
-
/* generate method profiling code */
if (opt_prof) {
/* create stack frame (if necessary) */
- if (parentargs_base)
- M_ASUB_IMM(parentargs_base * 8, REG_SP);
+ if (stackframesize)
+ M_ASUB_IMM(stackframesize * 8, REG_SP);
/* save used callee saved registers */
- p = parentargs_base;
+ p = stackframesize;
for (i = INT_SAV_CNT - 1; i >= rd->savintreguse; i--) {
p--; M_LST(rd->savintregs[i], REG_SP, p * 8);
}
} else { /* stack arguments */
if (!(var->flags & INMEMORY)) { /* stack arg -> register */
/* + 8 for return address */
- M_LLD(var->regoff, REG_SP, (parentargs_base + s1) * 8 + 8);
+ M_LLD(var->regoff, REG_SP, (stackframesize + s1) * 8 + 8);
} else { /* stack arg -> spilled */
- var->regoff = parentargs_base + s1 + 1;
+ var->regoff = stackframesize + s1 + 1;
}
}
} else { /* stack arguments */
if (!(var->flags & INMEMORY)) { /* stack-arg -> register */
- M_DLD(var->regoff, REG_SP, (parentargs_base + s1) * 8 + 8);
+ M_DLD(var->regoff, REG_SP, (stackframesize + s1) * 8 + 8);
} else {
- var->regoff = parentargs_base + s1 + 1;
+ var->regoff = stackframesize + s1 + 1;
}
}
}
/* save monitorenter argument */
-#if defined(USE_THREADS)
+#if defined(ENABLE_THREADS)
if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
/* stack offset for monitor argument */
} else {
M_TEST(rd->argintregs[0]);
M_BEQ(0);
- codegen_add_nullpointerexception_ref(cd, cd->mcodeptr);
+ codegen_add_nullpointerexception_ref(cd);
M_AST(rd->argintregs[0], REG_SP, s1 * 8);
M_MOV_IMM(BUILTIN_monitorenter, REG_ITMP1);
M_CALL(REG_ITMP1);
}
#endif
+#if !defined(NDEBUG)
/* Copy argument registers to stack and call trace function with
pointer to arguments on stack. */
following integer registers. */
if (IS_FLT_DBL_TYPE(md->paramtypes[p].type)) {
- for (s1 = INT_ARG_CNT - 2; s1 >= p; s1--) {
+ for (s1 = INT_ARG_CNT - 2; s1 >= p; s1--)
M_MOV(rd->argintregs[s1], rd->argintregs[s1 + 1]);
- }
- x86_64_movd_freg_reg(cd, rd->argfltregs[l], rd->argintregs[p]);
+ emit_movd_freg_reg(cd, rd->argfltregs[l], rd->argintregs[p]);
l++;
}
}
M_LADD_IMM((INT_ARG_CNT + FLT_ARG_CNT + INT_TMP_CNT + FLT_TMP_CNT + 1 + 1) * 8, REG_SP);
}
+#endif /* !defined(NDEBUG) */
}
/* end of header generation */
- replacementpoint = cd->code->rplpoints;
+ replacementpoint = jd->code->rplpoints;
/* walk through all basic blocks */
len--;
if ((len == 0) && (bptr->type != BBTYPE_STD)) {
if (bptr->type == BBTYPE_SBR) {
- /* d = reg_of_var(rd, src, REG_ITMP1); */
+/* d = reg_of_var(rd, src, REG_ITMP1); */
if (!(src->flags & INMEMORY))
- d= src->regoff;
+ d = src->regoff;
else
- d=REG_ITMP1;
- x86_64_pop_reg(cd, d);
- store_reg_to_var_int(src, d);
+ d = REG_ITMP1;
+ M_POP(d);
+ emit_store(jd, NULL, src, d);
} else if (bptr->type == BBTYPE_EXH) {
- /* d = reg_of_var(rd, src, REG_ITMP1); */
+/* d = reg_of_var(rd, src, REG_ITMP1); */
if (!(src->flags & INMEMORY))
d= src->regoff;
else
d=REG_ITMP1;
M_INTMOVE(REG_ITMP1, d);
- store_reg_to_var_int(src, d);
+ emit_store(jd, NULL, src, d);
}
}
src = src->prev;
len--;
if ((len == 0) && (bptr->type != BBTYPE_STD)) {
if (bptr->type == BBTYPE_SBR) {
- d = reg_of_var(rd, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, 0, src, REG_ITMP1);
M_POP(d);
- store_reg_to_var_int(src, d);
+ emit_store(jd, NULL, src, d);
} else if (bptr->type == BBTYPE_EXH) {
- d = reg_of_var(rd, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, 0, src, REG_ITMP1);
M_INTMOVE(REG_ITMP1, d);
- store_reg_to_var_int(src, d);
+ emit_store(jd, NULL, src, d);
}
} else {
- d = reg_of_var(rd, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, 0, src, REG_ITMP1);
if ((src->varkind != STACKVAR)) {
s2 = src->type;
if (IS_FLT_DBL_TYPE(s2)) {
else
M_DLD(d, REG_SP, s1 * 8);
- store_reg_to_var_flt(src, d);
+ emit_store(jd, NULL, src, d);
} else {
s1 = rd->interfaces[len][s2].regoff;
else
M_LLD(d, REG_SP, s1 * 8);
- store_reg_to_var_int(src, d);
+ emit_store(jd, NULL, src, d);
}
}
}
for (iptr = bptr->iinstr; len > 0; src = iptr->dst, len--, iptr++) {
if (iptr->line != currentline) {
- dseg_addlinenumber(cd, iptr->line, cd->mcodeptr);
+ dseg_addlinenumber(cd, iptr->line);
currentline = iptr->line;
}
case ICMD_CHECKNULL: /* ..., objectref ==> ..., objectref */
- if (src->flags & INMEMORY)
- M_CMP_IMM_MEMBASE(0, REG_SP, src->regoff * 8);
- else
- M_TEST(src->regoff);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_TEST(s1);
M_BEQ(0);
- codegen_add_nullpointerexception_ref(cd, cd->mcodeptr);
+ codegen_add_nullpointerexception_ref(cd);
break;
/* constant operations ************************************************/
case ICMD_ICONST: /* ... ==> ..., constant */
/* op1 = 0, val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- if (iptr->val.i == 0)
- M_CLR(d);
- else
- M_IMOV_IMM(iptr->val.i, d);
- store_reg_to_var_int(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ ICONST(d, iptr->val.i);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LCONST: /* ... ==> ..., constant */
/* op1 = 0, val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- if (iptr->val.l == 0)
- M_CLR(d);
- else
- M_MOV_IMM(iptr->val.l, d);
- store_reg_to_var_int(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ LCONST(d, iptr->val.l);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_FCONST: /* ... ==> ..., constant */
/* op1 = 0, val.f = constant */
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
disp = dseg_addfloat(cd, iptr->val.f);
- x86_64_movdl_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + ((d > 7) ? 9 : 8)) - (s8) cd->mcodebase) + disp, d);
- store_reg_to_var_flt(iptr->dst, d);
+ emit_movdl_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + ((d > 7) ? 9 : 8)) - (s8) cd->mcodebase) + disp, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_DCONST: /* ... ==> ..., constant */
/* op1 = 0, val.d = constant */
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
disp = dseg_adddouble(cd, iptr->val.d);
- x86_64_movd_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 9) - (s8) cd->mcodebase) + disp, d);
- store_reg_to_var_flt(iptr->dst, d);
+ emit_movd_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 9) - (s8) cd->mcodebase) + disp, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_ACONST: /* ... ==> ..., constant */
/* op1 = 0, val.a = constant */
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
- if ((iptr->target != NULL) && (iptr->val.a == NULL)) {
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
/* PROFILE_CYCLE_STOP; */
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_aconst,
- (unresolved_class *) iptr->target, 0);
+ codegen_addpatchref(cd, PATCHER_aconst,
+ ICMD_ACONST_UNRESOLVED_CLASSREF(iptr), 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
/* PROFILE_CYCLE_START; */
- M_MOV_IMM(iptr->val.a, d);
+ M_MOV_IMM(NULL, d);
} else {
if (iptr->val.a == 0)
else
M_MOV_IMM(iptr->val.a, d);
}
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_ILOAD: /* ... ==> ..., content of local variable */
/* op1 = local variable */
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
if ((iptr->dst->varkind == LOCALVAR) &&
- (iptr->dst->varnum == iptr->op1)) {
+ (iptr->dst->varnum == iptr->op1))
break;
- }
var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, var->regoff * 8, d);
- store_reg_to_var_int(iptr->dst, d);
-
- } else {
- if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, var->regoff, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- M_INTMOVE(var->regoff, d);
- }
- }
+ if (var->flags & INMEMORY)
+ M_ILD(d, REG_SP, var->regoff * 8);
+ else
+ M_INTMOVE(var->regoff, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LLOAD: /* ... ==> ..., content of local variable */
case ICMD_ALOAD: /* op1 = local variable */
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
if ((iptr->dst->varkind == LOCALVAR) &&
- (iptr->dst->varnum == iptr->op1)) {
+ (iptr->dst->varnum == iptr->op1))
break;
- }
var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY) {
- x86_64_mov_membase_reg(cd, REG_SP, var->regoff * 8, d);
- store_reg_to_var_int(iptr->dst, d);
+ if (var->flags & INMEMORY)
+ M_LLD(d, REG_SP, var->regoff * 8);
+ else
+ M_INTMOVE(var->regoff, d);
+ emit_store(jd, iptr, iptr->dst, d);
+ break;
- } else {
- if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, var->regoff, REG_SP, iptr->dst->regoff * 8);
+ case ICMD_FLOAD: /* ... ==> ..., content of local variable */
+ /* op1 = local variable */
- } else {
- M_INTMOVE(var->regoff, d);
- }
- }
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
+ if ((iptr->dst->varkind == LOCALVAR) &&
+ (iptr->dst->varnum == iptr->op1))
+ break;
+ var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ILOAD]);
+ if (var->flags & INMEMORY)
+ M_FLD(d, REG_SP, var->regoff * 8);
+ else
+ M_FLTMOVE(var->regoff, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
- case ICMD_FLOAD: /* ... ==> ..., content of local variable */
- case ICMD_DLOAD: /* op1 = local variable */
+ case ICMD_DLOAD: /* ... ==> ..., content of local variable */
+ /* op1 = local variable */
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
if ((iptr->dst->varkind == LOCALVAR) &&
- (iptr->dst->varnum == iptr->op1)) {
+ (iptr->dst->varnum == iptr->op1))
break;
- }
var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY) {
- x86_64_movq_membase_reg(cd, REG_SP, var->regoff * 8, d);
- store_reg_to_var_flt(iptr->dst, d);
+ if (var->flags & INMEMORY)
+ M_DLD(d, REG_SP, var->regoff * 8);
+ else
+ M_FLTMOVE(var->regoff, d);
+ emit_store(jd, iptr, iptr->dst, d);
+ break;
+ case ICMD_ISTORE: /* ..., value ==> ... */
+ /* op1 = local variable */
+
+ if ((src->varkind == LOCALVAR) && (src->varnum == iptr->op1))
+ break;
+ var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ISTORE]);
+ if (var->flags & INMEMORY) {
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_IST(s1, REG_SP, var->regoff * 8);
} else {
- if (iptr->dst->flags & INMEMORY) {
- x86_64_movq_reg_membase(cd, var->regoff, REG_SP, iptr->dst->regoff * 8);
+ s1 = emit_load_s1(jd, iptr, src, var->regoff);
+ M_INTMOVE(s1, var->regoff);
+ }
+ break;
- } else {
- M_FLTMOVE(var->regoff, d);
- }
+ case ICMD_LSTORE: /* ..., value ==> ... */
+ case ICMD_ASTORE: /* op1 = local variable */
+
+ if ((src->varkind == LOCALVAR) && (src->varnum == iptr->op1))
+ break;
+ var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ISTORE]);
+ if (var->flags & INMEMORY) {
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_LST(s1, REG_SP, var->regoff * 8);
+ } else {
+ s1 = emit_load_s1(jd, iptr, src, var->regoff);
+ M_INTMOVE(s1, var->regoff);
}
break;
- case ICMD_ISTORE: /* ..., value ==> ... */
- case ICMD_LSTORE: /* op1 = local variable */
- case ICMD_ASTORE:
+ case ICMD_FSTORE: /* ..., value ==> ... */
+ /* op1 = local variable */
if ((src->varkind == LOCALVAR) &&
(src->varnum == iptr->op1)) {
}
var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ISTORE]);
if (var->flags & INMEMORY) {
- var_to_reg_int(s1, src, REG_ITMP1);
- x86_64_mov_reg_membase(cd, s1, REG_SP, var->regoff * 8);
-
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ M_FST(s1, REG_SP, var->regoff * 8);
} else {
- var_to_reg_int(s1, src, var->regoff);
- M_INTMOVE(s1, var->regoff);
+ s1 = emit_load_s1(jd, iptr, src, var->regoff);
+ M_FLTMOVE(s1, var->regoff);
}
break;
- case ICMD_FSTORE: /* ..., value ==> ... */
- case ICMD_DSTORE: /* op1 = local variable */
+ case ICMD_DSTORE: /* ..., value ==> ... */
+ /* op1 = local variable */
if ((src->varkind == LOCALVAR) &&
(src->varnum == iptr->op1)) {
}
var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ISTORE]);
if (var->flags & INMEMORY) {
- var_to_reg_flt(s1, src, REG_FTMP1);
- x86_64_movq_reg_membase(cd, s1, REG_SP, var->regoff * 8);
-
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ M_DST(s1, REG_SP, var->regoff * 8);
} else {
- var_to_reg_flt(s1, src, var->regoff);
+ s1 = emit_load_s1(jd, iptr, src, var->regoff);
M_FLTMOVE(s1, var->regoff);
}
break;
case ICMD_INEG: /* ..., value ==> ..., - value */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if (src->flags & INMEMORY) {
- if (src->regoff == iptr->dst->regoff) {
- x86_64_negl_membase(cd, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_negl_reg(cd, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- x86_64_movl_reg_membase(cd, src->regoff, REG_SP, iptr->dst->regoff * 8);
- x86_64_negl_membase(cd, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if (src->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
- x86_64_negl_reg(cd, d);
-
- } else {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_negl_reg(cd, iptr->dst->regoff);
- }
- }
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_INEG(d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LNEG: /* ..., value ==> ..., - value */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if (src->flags & INMEMORY) {
- if (src->regoff == iptr->dst->regoff) {
- x86_64_neg_membase(cd, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_neg_reg(cd, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- x86_64_mov_reg_membase(cd, src->regoff, REG_SP, iptr->dst->regoff * 8);
- x86_64_neg_membase(cd, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if (src->flags & INMEMORY) {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
- x86_64_neg_reg(cd, iptr->dst->regoff);
-
- } else {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_neg_reg(cd, iptr->dst->regoff);
- }
- }
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_LNEG(d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_I2L: /* ..., value ==> ..., value */
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (src->flags & INMEMORY) {
- x86_64_movslq_membase_reg(cd, REG_SP, src->regoff * 8, d);
-
- } else {
- x86_64_movslq_reg_reg(cd, src->regoff, d);
- }
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
+ M_ISEXT(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_L2I: /* ..., value ==> ..., value */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- M_INTMOVE(s1, d);
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_IMOV(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_INT2BYTE: /* ..., value ==> ..., value */
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (src->flags & INMEMORY) {
- x86_64_movsbq_membase_reg(cd, REG_SP, src->regoff * 8, d);
-
- } else {
- x86_64_movsbq_reg_reg(cd, src->regoff, d);
- }
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
+ M_BSEXT(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_INT2CHAR: /* ..., value ==> ..., value */
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (src->flags & INMEMORY) {
- x86_64_movzwq_membase_reg(cd, REG_SP, src->regoff * 8, d);
-
- } else {
- x86_64_movzwq_reg_reg(cd, src->regoff, d);
- }
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
+ M_CZEXT(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_INT2SHORT: /* ..., value ==> ..., value */
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (src->flags & INMEMORY) {
- x86_64_movswq_membase_reg(cd, REG_SP, src->regoff * 8, d);
-
- } else {
- x86_64_movswq_reg_reg(cd, src->regoff, d);
- }
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
+ M_SSEXT(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialu(cd, X86_64_ADD, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_IADD(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IADD(s2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_ADD, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_IADD_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lalu(cd, X86_64_ADD, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_LADD(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LADD(s2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
/* val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_ADD, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->val.l))
+ M_LADD_IMM(iptr->val.l, d);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LADD(REG_ITMP2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- if (src->prev->regoff == iptr->dst->regoff) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_alul_reg_membase(cd, X86_64_SUB, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alul_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_alul_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- if (src->prev->regoff == iptr->dst->regoff) {
- x86_64_alul_reg_membase(cd, X86_64_SUB, src->regoff, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- x86_64_movl_reg_membase(cd, src->prev->regoff, REG_SP, iptr->dst->regoff * 8);
- x86_64_alul_reg_membase(cd, X86_64_SUB, src->regoff, REG_SP, iptr->dst->regoff * 8);
- }
-
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d) {
+ M_INTMOVE(s1, REG_ITMP1);
+ M_ISUB(s2, REG_ITMP1);
+ M_INTMOVE(REG_ITMP1, d);
} else {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, d);
- x86_64_alul_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, d);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, d);
- x86_64_alul_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, d);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- /* workaround for reg alloc */
- if (src->regoff == iptr->dst->regoff) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- M_INTMOVE(REG_ITMP1, d);
-
- } else {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, d);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, d);
- }
-
- } else {
- /* workaround for reg alloc */
- if (src->regoff == iptr->dst->regoff) {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- M_INTMOVE(REG_ITMP1, d);
-
- } else {
- M_INTMOVE(src->prev->regoff, d);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, d);
- }
- }
+ M_INTMOVE(s1, d);
+ M_ISUB(s2, d);
}
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_ISUBCONST: /* ..., value ==> ..., value + constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_SUB, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_ISUB_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- if (src->prev->regoff == iptr->dst->regoff) {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_alu_reg_membase(cd, X86_64_SUB, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alu_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_alu_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- if (src->prev->regoff == iptr->dst->regoff) {
- x86_64_alu_reg_membase(cd, X86_64_SUB, src->regoff, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- x86_64_mov_reg_membase(cd, src->prev->regoff, REG_SP, iptr->dst->regoff * 8);
- x86_64_alu_reg_membase(cd, X86_64_SUB, src->regoff, REG_SP, iptr->dst->regoff * 8);
- }
-
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d) {
+ M_INTMOVE(s1, REG_ITMP1);
+ M_LSUB(s2, REG_ITMP1);
+ M_INTMOVE(REG_ITMP1, d);
} else {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, d);
- x86_64_alu_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, d);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, d);
- x86_64_alu_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, d);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- /* workaround for reg alloc */
- if (src->regoff == iptr->dst->regoff) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- M_INTMOVE(REG_ITMP1, d);
-
- } else {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, d);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, d);
- }
-
- } else {
- /* workaround for reg alloc */
- if (src->regoff == iptr->dst->regoff) {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- M_INTMOVE(REG_ITMP1, d);
-
- } else {
- M_INTMOVE(src->prev->regoff, d);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, d);
- }
- }
+ M_INTMOVE(s1, d);
+ M_LSUB(s2, d);
}
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
/* val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_SUB, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->val.l))
+ M_LSUB_IMM(iptr->val.l, d);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LSUB(REG_ITMP2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_imull_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_imull_reg_reg(cd, src->prev->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_imull_reg_reg(cd, src->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_imull_reg_reg(cd, src->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, iptr->dst->regoff);
- x86_64_imull_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, iptr->dst->regoff);
- x86_64_imull_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_imull_membase_reg(cd, REG_SP, src->prev->regoff * 8, iptr->dst->regoff);
-
- } else {
- if (src->regoff == iptr->dst->regoff) {
- x86_64_imull_reg_reg(cd, src->prev->regoff, iptr->dst->regoff);
-
- } else {
- M_INTMOVE(src->prev->regoff, iptr->dst->regoff);
- x86_64_imull_reg_reg(cd, src->regoff, iptr->dst->regoff);
- }
- }
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_IMUL(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IMUL(s2, d);
}
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if (src->flags & INMEMORY) {
- x86_64_imull_imm_membase_reg(cd, iptr->val.i, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_imull_imm_reg_reg(cd, iptr->val.i, src->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if (src->flags & INMEMORY) {
- x86_64_imull_imm_membase_reg(cd, iptr->val.i, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else {
- if (iptr->val.i == 2) {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_alul_reg_reg(cd, X86_64_ADD, iptr->dst->regoff, iptr->dst->regoff);
-
- } else {
- x86_64_imull_imm_reg_reg(cd, iptr->val.i, src->regoff, iptr->dst->regoff); /* 3 cycles */
- }
- }
- }
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ if (iptr->val.i == 2) {
+ M_INTMOVE(s1, d);
+ M_ISLL_IMM(1, d);
+ } else
+ M_IMUL_IMM(s1, iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_imul_reg_reg(cd, src->prev->regoff, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_imul_reg_reg(cd, src->regoff, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_mov_reg_reg(cd, src->prev->regoff, REG_ITMP1);
- x86_64_imul_reg_reg(cd, src->regoff, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, iptr->dst->regoff);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, iptr->dst->regoff);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_imul_membase_reg(cd, REG_SP, src->prev->regoff * 8, iptr->dst->regoff);
-
- } else {
- if (src->regoff == iptr->dst->regoff) {
- x86_64_imul_reg_reg(cd, src->prev->regoff, iptr->dst->regoff);
-
- } else {
- M_INTMOVE(src->prev->regoff, iptr->dst->regoff);
- x86_64_imul_reg_reg(cd, src->regoff, iptr->dst->regoff);
- }
- }
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_LMUL(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LMUL(s2, d);
}
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
/* val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if (src->flags & INMEMORY) {
- if (IS_IMM32(iptr->val.l)) {
- x86_64_imul_imm_membase_reg(cd, iptr->val.l, REG_SP, src->regoff * 8, REG_ITMP1);
-
- } else {
- x86_64_mov_imm_reg(cd, iptr->val.l, REG_ITMP1);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- }
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- if (IS_IMM32(iptr->val.l)) {
- x86_64_imul_imm_reg_reg(cd, iptr->val.l, src->regoff, REG_ITMP1);
-
- } else {
- x86_64_mov_imm_reg(cd, iptr->val.l, REG_ITMP1);
- x86_64_imul_reg_reg(cd, src->regoff, REG_ITMP1);
- }
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if (src->flags & INMEMORY) {
- if (IS_IMM32(iptr->val.l)) {
- x86_64_imul_imm_membase_reg(cd, iptr->val.l, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else {
- x86_64_mov_imm_reg(cd, iptr->val.l, iptr->dst->regoff);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
- }
-
- } else {
- /* should match in many cases */
- if (iptr->val.l == 2) {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_alul_reg_reg(cd, X86_64_ADD, iptr->dst->regoff, iptr->dst->regoff);
-
- } else {
- if (IS_IMM32(iptr->val.l)) {
- x86_64_imul_imm_reg_reg(cd, iptr->val.l, src->regoff, iptr->dst->regoff); /* 4 cycles */
-
- } else {
- x86_64_mov_imm_reg(cd, iptr->val.l, REG_ITMP1);
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_imul_reg_reg(cd, REG_ITMP1, iptr->dst->regoff);
- }
- }
- }
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ if (IS_IMM32(iptr->val.l))
+ M_LMUL_IMM(s1, iptr->val.l, d);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_LMUL(REG_ITMP2, d);
}
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (src->prev->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, RAX);
-
- } else {
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
+ if (src->prev->flags & INMEMORY)
+ M_ILD(RAX, REG_SP, src->prev->regoff * 8);
+ else
M_INTMOVE(src->prev->regoff, RAX);
- }
- if (src->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP3);
-
- } else {
+ if (src->flags & INMEMORY)
+ M_ILD(REG_ITMP3, REG_SP, src->regoff * 8);
+ else
M_INTMOVE(src->regoff, REG_ITMP3);
+
+ if (checknull) {
+ M_ITEST(REG_ITMP3);
+ M_BEQ(0);
+ codegen_add_arithmeticexception_ref(cd);
}
- gen_div_check(src);
- x86_64_alul_imm_reg(cd, X86_64_CMP, 0x80000000, RAX); /* check as described in jvm spec */
- x86_64_jcc(cd, X86_64_CC_NE, 4 + 6);
- x86_64_alul_imm_reg(cd, X86_64_CMP, -1, REG_ITMP3); /* 4 bytes */
- x86_64_jcc(cd, X86_64_CC_E, 3 + 1 + 3); /* 6 bytes */
+ emit_alul_imm_reg(cd, ALU_CMP, 0x80000000, RAX); /* check as described in jvm spec */
+ emit_jcc(cd, CC_NE, 4 + 6);
+ emit_alul_imm_reg(cd, ALU_CMP, -1, REG_ITMP3); /* 4 bytes */
+ emit_jcc(cd, CC_E, 3 + 1 + 3); /* 6 bytes */
- x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
- x86_64_cltd(cd);
- x86_64_idivl_reg(cd, REG_ITMP3);
+ emit_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
+ emit_cltd(cd);
+ emit_idivl_reg(cd, REG_ITMP3);
if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, RAX, REG_SP, iptr->dst->regoff * 8);
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
+ emit_mov_reg_membase(cd, RAX, REG_SP, iptr->dst->regoff * 8);
+ emit_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
} else {
M_INTMOVE(RAX, iptr->dst->regoff);
if (iptr->dst->regoff != RDX) {
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
+ emit_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
}
}
break;
case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (src->prev->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, RAX);
- } else {
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
+ if (src->prev->flags & INMEMORY)
+ M_ILD(RAX, REG_SP, src->prev->regoff * 8);
+ else
M_INTMOVE(src->prev->regoff, RAX);
- }
- if (src->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP3);
-
- } else {
+ if (src->flags & INMEMORY)
+ M_ILD(REG_ITMP3, REG_SP, src->regoff * 8);
+ else
M_INTMOVE(src->regoff, REG_ITMP3);
+
+ if (checknull) {
+ M_ITEST(REG_ITMP3);
+ M_BEQ(0);
+ codegen_add_arithmeticexception_ref(cd);
}
- gen_div_check(src);
- x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
+ emit_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
- x86_64_alul_imm_reg(cd, X86_64_CMP, 0x80000000, RAX); /* check as described in jvm spec */
- x86_64_jcc(cd, X86_64_CC_NE, 2 + 4 + 6);
+ emit_alul_imm_reg(cd, ALU_CMP, 0x80000000, RAX); /* check as described in jvm spec */
+ emit_jcc(cd, CC_NE, 2 + 4 + 6);
- x86_64_alul_reg_reg(cd, X86_64_XOR, RDX, RDX); /* 2 bytes */
- x86_64_alul_imm_reg(cd, X86_64_CMP, -1, REG_ITMP3); /* 4 bytes */
- x86_64_jcc(cd, X86_64_CC_E, 1 + 3); /* 6 bytes */
+ emit_alul_reg_reg(cd, ALU_XOR, RDX, RDX); /* 2 bytes */
+ emit_alul_imm_reg(cd, ALU_CMP, -1, REG_ITMP3); /* 4 bytes */
+ emit_jcc(cd, CC_E, 1 + 3); /* 6 bytes */
- x86_64_cltd(cd);
- x86_64_idivl_reg(cd, REG_ITMP3);
+ emit_cltd(cd);
+ emit_idivl_reg(cd, REG_ITMP3);
if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, RDX, REG_SP, iptr->dst->regoff * 8);
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
+ emit_mov_reg_membase(cd, RDX, REG_SP, iptr->dst->regoff * 8);
+ emit_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
} else {
M_INTMOVE(RDX, iptr->dst->regoff);
if (iptr->dst->regoff != RDX) {
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
+ emit_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
}
}
break;
case ICMD_IDIVPOW2: /* ..., value ==> ..., value >> constant */
/* val.i = constant */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
M_INTMOVE(s1, REG_ITMP1);
- x86_64_alul_imm_reg(cd, X86_64_CMP, -1, REG_ITMP1);
- x86_64_leal_membase_reg(cd, REG_ITMP1, (1 << iptr->val.i) - 1, REG_ITMP2);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_LE, REG_ITMP2, REG_ITMP1);
- x86_64_shiftl_imm_reg(cd, X86_64_SAR, iptr->val.i, REG_ITMP1);
- x86_64_mov_reg_reg(cd, REG_ITMP1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_alul_imm_reg(cd, ALU_CMP, -1, REG_ITMP1);
+ emit_leal_membase_reg(cd, REG_ITMP1, (1 << iptr->val.i) - 1, REG_ITMP2);
+ emit_cmovccl_reg_reg(cd, CC_LE, REG_ITMP2, REG_ITMP1);
+ emit_shiftl_imm_reg(cd, SHIFT_SAR, iptr->val.i, REG_ITMP1);
+ emit_mov_reg_reg(cd, REG_ITMP1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
/* val.i = constant */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
M_INTMOVE(s1, REG_ITMP1);
- x86_64_alul_imm_reg(cd, X86_64_CMP, -1, REG_ITMP1);
- x86_64_leal_membase_reg(cd, REG_ITMP1, iptr->val.i, REG_ITMP2);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_G, REG_ITMP1, REG_ITMP2);
- x86_64_alul_imm_reg(cd, X86_64_AND, -1 - (iptr->val.i), REG_ITMP2);
- x86_64_alul_reg_reg(cd, X86_64_SUB, REG_ITMP2, REG_ITMP1);
- x86_64_mov_reg_reg(cd, REG_ITMP1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_alul_imm_reg(cd, ALU_CMP, -1, REG_ITMP1);
+ emit_leal_membase_reg(cd, REG_ITMP1, iptr->val.i, REG_ITMP2);
+ emit_cmovccl_reg_reg(cd, CC_G, REG_ITMP1, REG_ITMP2);
+ emit_alul_imm_reg(cd, ALU_AND, -1 - (iptr->val.i), REG_ITMP2);
+ emit_alul_reg_reg(cd, ALU_SUB, REG_ITMP2, REG_ITMP1);
+ emit_mov_reg_reg(cd, REG_ITMP1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
- if (src->prev->flags & INMEMORY) {
+ if (src->prev->flags & INMEMORY)
M_LLD(RAX, REG_SP, src->prev->regoff * 8);
-
- } else {
+ else
M_INTMOVE(src->prev->regoff, RAX);
- }
- if (src->flags & INMEMORY) {
+ if (src->flags & INMEMORY)
M_LLD(REG_ITMP3, REG_SP, src->regoff * 8);
-
- } else {
+ else
M_INTMOVE(src->regoff, REG_ITMP3);
+
+ if (checknull) {
+ M_TEST(REG_ITMP3);
+ M_BEQ(0);
+ codegen_add_arithmeticexception_ref(cd);
}
- gen_div_check(src);
/* check as described in jvm spec */
disp = dseg_adds8(cd, 0x8000000000000000LL);
- M_CMP_MEMBASE(RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + disp, RAX);
+ M_LCMP_MEMBASE(RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + disp, RAX);
M_BNE(4 + 6);
- M_CMP_IMM(-1, REG_ITMP3); /* 4 bytes */
+ M_LCMP_IMM(-1, REG_ITMP3); /* 4 bytes */
M_BEQ(3 + 2 + 3); /* 6 bytes */
M_MOV(RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
- x86_64_cqto(cd);
- x86_64_idiv_reg(cd, REG_ITMP3);
+ emit_cqto(cd);
+ emit_idiv_reg(cd, REG_ITMP3);
if (iptr->dst->flags & INMEMORY) {
M_LST(RAX, REG_SP, iptr->dst->regoff * 8);
case ICMD_LREM: /* ..., val1, val2 ==> ..., val1 % val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (src->prev->flags & INMEMORY) {
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
+ if (src->prev->flags & INMEMORY)
M_LLD(REG_ITMP1, REG_SP, src->prev->regoff * 8);
-
- } else {
+ else
M_INTMOVE(src->prev->regoff, REG_ITMP1);
- }
- if (src->flags & INMEMORY) {
+ if (src->flags & INMEMORY)
M_LLD(REG_ITMP3, REG_SP, src->regoff * 8);
-
- } else {
+ else
M_INTMOVE(src->regoff, REG_ITMP3);
+
+ if (checknull) {
+ M_ITEST(REG_ITMP3);
+ M_BEQ(0);
+ codegen_add_arithmeticexception_ref(cd);
}
- gen_div_check(src);
M_MOV(RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
/* check as described in jvm spec */
disp = dseg_adds8(cd, 0x8000000000000000LL);
- M_CMP_MEMBASE(RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + disp, REG_ITMP1);
+ M_LCMP_MEMBASE(RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + disp, REG_ITMP1);
M_BNE(3 + 4 + 6);
#if 0
- x86_64_alul_reg_reg(cd, X86_64_XOR, RDX, RDX); /* 2 bytes */
+ emit_alul_reg_reg(cd, ALU_XOR, RDX, RDX); /* 2 bytes */
#endif
- M_XOR(RDX, RDX); /* 3 bytes */
- M_CMP_IMM(-1, REG_ITMP3); /* 4 bytes */
+ M_LXOR(RDX, RDX); /* 3 bytes */
+ M_LCMP_IMM(-1, REG_ITMP3); /* 4 bytes */
M_BEQ(2 + 3); /* 6 bytes */
- x86_64_cqto(cd);
- x86_64_idiv_reg(cd, REG_ITMP3);
+ emit_cqto(cd);
+ emit_idiv_reg(cd, REG_ITMP3);
if (iptr->dst->flags & INMEMORY) {
M_LST(RDX, REG_SP, iptr->dst->regoff * 8);
case ICMD_LDIVPOW2: /* ..., value ==> ..., value >> constant */
/* val.i = constant */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
M_INTMOVE(s1, REG_ITMP1);
- x86_64_alu_imm_reg(cd, X86_64_CMP, -1, REG_ITMP1);
- x86_64_lea_membase_reg(cd, REG_ITMP1, (1 << iptr->val.i) - 1, REG_ITMP2);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_LE, REG_ITMP2, REG_ITMP1);
- x86_64_shift_imm_reg(cd, X86_64_SAR, iptr->val.i, REG_ITMP1);
- x86_64_mov_reg_reg(cd, REG_ITMP1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_alu_imm_reg(cd, ALU_CMP, -1, REG_ITMP1);
+ emit_lea_membase_reg(cd, REG_ITMP1, (1 << iptr->val.i) - 1, REG_ITMP2);
+ emit_cmovcc_reg_reg(cd, CC_LE, REG_ITMP2, REG_ITMP1);
+ emit_shift_imm_reg(cd, SHIFT_SAR, iptr->val.i, REG_ITMP1);
+ emit_mov_reg_reg(cd, REG_ITMP1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LREMPOW2: /* ..., value ==> ..., value % constant */
/* val.l = constant */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
M_INTMOVE(s1, REG_ITMP1);
- x86_64_alu_imm_reg(cd, X86_64_CMP, -1, REG_ITMP1);
- x86_64_lea_membase_reg(cd, REG_ITMP1, iptr->val.i, REG_ITMP2);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_G, REG_ITMP1, REG_ITMP2);
- x86_64_alu_imm_reg(cd, X86_64_AND, -1 - (iptr->val.i), REG_ITMP2);
- x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP2, REG_ITMP1);
- x86_64_mov_reg_reg(cd, REG_ITMP1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_alu_imm_reg(cd, ALU_CMP, -1, REG_ITMP1);
+ emit_lea_membase_reg(cd, REG_ITMP1, iptr->val.i, REG_ITMP2);
+ emit_cmovcc_reg_reg(cd, CC_G, REG_ITMP1, REG_ITMP2);
+ emit_alu_imm_reg(cd, ALU_AND, -1 - (iptr->val.i), REG_ITMP2);
+ emit_alu_reg_reg(cd, ALU_SUB, REG_ITMP2, REG_ITMP1);
+ emit_mov_reg_reg(cd, REG_ITMP1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_ISHL: /* ..., val1, val2 ==> ..., val1 << val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishift(cd, X86_64_SHL, src, iptr);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
+ emit_ishift(cd, SHIFT_SHL, src, iptr);
break;
case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishiftconst(cd, X86_64_SHL, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_ISLL_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_ISHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishift(cd, X86_64_SAR, src, iptr);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
+ emit_ishift(cd, SHIFT_SAR, src, iptr);
break;
case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishiftconst(cd, X86_64_SAR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_ISRA_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishift(cd, X86_64_SHR, src, iptr);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
+ emit_ishift(cd, SHIFT_SHR, src, iptr);
break;
case ICMD_IUSHRCONST: /* ..., value ==> ..., value >>> constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishiftconst(cd, X86_64_SHR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_ISRL_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LSHL: /* ..., val1, val2 ==> ..., val1 << val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshift(cd, X86_64_SHL, src, iptr);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
+ emit_lshift(cd, SHIFT_SHL, src, iptr);
break;
case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshiftconst(cd, X86_64_SHL, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_LSLL_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshift(cd, X86_64_SAR, src, iptr);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
+ emit_lshift(cd, SHIFT_SAR, src, iptr);
break;
case ICMD_LSHRCONST: /* ..., value ==> ..., value >> constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshiftconst(cd, X86_64_SAR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_LSRA_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshift(cd, X86_64_SHR, src, iptr);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_NULL);
+ emit_lshift(cd, SHIFT_SHR, src, iptr);
break;
case ICMD_LUSHRCONST: /* ..., value ==> ..., value >>> constant */
/* val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshiftconst(cd, X86_64_SHR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_LSRL_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IAND: /* ..., val1, val2 ==> ..., val1 & val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialu(cd, X86_64_AND, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_IAND(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IAND(s2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_AND, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_IAND_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LAND: /* ..., val1, val2 ==> ..., val1 & val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lalu(cd, X86_64_AND, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_LAND(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LAND(s2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
/* val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_AND, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->val.l))
+ M_LAND_IMM(iptr->val.l, d);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LAND(REG_ITMP2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IOR: /* ..., val1, val2 ==> ..., val1 | val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialu(cd, X86_64_OR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_IOR(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IOR(s2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_OR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_IOR_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LOR: /* ..., val1, val2 ==> ..., val1 | val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lalu(cd, X86_64_OR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_LOR(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LOR(s2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
/* val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_OR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->val.l))
+ M_LOR_IMM(iptr->val.l, d);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LOR(REG_ITMP2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialu(cd, X86_64_XOR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_IXOR(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IXOR(s2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
/* val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_XOR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_IXOR_IMM(iptr->val.i, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lalu(cd, X86_64_XOR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ if (s2 == d)
+ M_LXOR(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LXOR(s2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
/* val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_XOR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->val.l))
+ M_LXOR_IMM(iptr->val.l, d);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LXOR(REG_ITMP2, d);
+ }
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IINC: /* ..., value ==> ..., value + constant */
/* op1 = variable, val.i = constant */
- /* using inc and dec is definitely faster than add -- tested */
- /* with sieve */
-
var = &(rd->locals[iptr->op1][TYPE_INT]);
- d = var->regoff;
if (var->flags & INMEMORY) {
- if (iptr->val.i == 1) {
- x86_64_incl_membase(cd, REG_SP, d * 8);
-
- } else if (iptr->val.i == -1) {
- x86_64_decl_membase(cd, REG_SP, d * 8);
+ s1 = REG_ITMP1;
+ M_ILD(s1, REG_SP, var->regoff * 8);
+ } else
+ s1 = var->regoff;
- } else {
- x86_64_alul_imm_membase(cd, X86_64_ADD, iptr->val.i, REG_SP, d * 8);
- }
+ /* Using inc and dec is not faster than add (tested with
+ sieve). */
- } else {
- if (iptr->val.i == 1) {
- x86_64_incl_reg(cd, d);
-
- } else if (iptr->val.i == -1) {
- x86_64_decl_reg(cd, d);
+ M_IADD_IMM(iptr->val.i, s1);
- } else {
- x86_64_alul_imm_reg(cd, X86_64_ADD, iptr->val.i, d);
- }
- }
+ if (var->flags & INMEMORY)
+ M_IST(s1, REG_SP, var->regoff * 8);
break;
case ICMD_FNEG: /* ..., value ==> ..., - value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
disp = dseg_adds4(cd, 0x80000000);
M_FLTMOVE(s1, d);
- x86_64_movss_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 9) - (s8) cd->mcodebase) + disp, REG_FTMP2);
- x86_64_xorps_reg_reg(cd, REG_FTMP2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ emit_movss_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 9) - (s8) cd->mcodebase) + disp, REG_FTMP2);
+ emit_xorps_reg_reg(cd, REG_FTMP2, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_DNEG: /* ..., value ==> ..., - value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
disp = dseg_adds8(cd, 0x8000000000000000);
M_FLTMOVE(s1, d);
- x86_64_movd_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 9) - (s8) cd->mcodebase) + disp, REG_FTMP2);
- x86_64_xorpd_reg_reg(cd, REG_FTMP2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ emit_movd_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 9) - (s8) cd->mcodebase) + disp, REG_FTMP2);
+ emit_xorpd_reg_reg(cd, REG_FTMP2, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_FADD: /* ..., val1, val2 ==> ..., val1 + val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (s1 == d) {
- x86_64_addss_reg_reg(cd, s2, d);
- } else if (s2 == d) {
- x86_64_addss_reg_reg(cd, s1, d);
- } else {
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
+ if (s2 == d)
+ M_FADD(s1, d);
+ else {
M_FLTMOVE(s1, d);
- x86_64_addss_reg_reg(cd, s2, d);
+ M_FADD(s2, d);
}
- store_reg_to_var_flt(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_DADD: /* ..., val1, val2 ==> ..., val1 + val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (s1 == d) {
- x86_64_addsd_reg_reg(cd, s2, d);
- } else if (s2 == d) {
- x86_64_addsd_reg_reg(cd, s1, d);
- } else {
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
+ if (s2 == d)
+ M_DADD(s1, d);
+ else {
M_FLTMOVE(s1, d);
- x86_64_addsd_reg_reg(cd, s2, d);
+ M_DADD(s2, d);
}
- store_reg_to_var_flt(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_FSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
if (s2 == d) {
M_FLTMOVE(s2, REG_FTMP2);
s2 = REG_FTMP2;
}
M_FLTMOVE(s1, d);
- x86_64_subss_reg_reg(cd, s2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ M_FSUB(s2, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_DSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
if (s2 == d) {
M_FLTMOVE(s2, REG_FTMP2);
s2 = REG_FTMP2;
}
M_FLTMOVE(s1, d);
- x86_64_subsd_reg_reg(cd, s2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ M_DSUB(s2, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_FMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (s1 == d) {
- x86_64_mulss_reg_reg(cd, s2, d);
- } else if (s2 == d) {
- x86_64_mulss_reg_reg(cd, s1, d);
- } else {
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
+ if (s2 == d)
+ M_FMUL(s1, d);
+ else {
M_FLTMOVE(s1, d);
- x86_64_mulss_reg_reg(cd, s2, d);
+ M_FMUL(s2, d);
}
- store_reg_to_var_flt(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_DMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (s1 == d) {
- x86_64_mulsd_reg_reg(cd, s2, d);
- } else if (s2 == d) {
- x86_64_mulsd_reg_reg(cd, s1, d);
- } else {
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
+ if (s2 == d)
+ M_DMUL(s1, d);
+ else {
M_FLTMOVE(s1, d);
- x86_64_mulsd_reg_reg(cd, s2, d);
+ M_DMUL(s2, d);
}
- store_reg_to_var_flt(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_FDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
if (s2 == d) {
M_FLTMOVE(s2, REG_FTMP2);
s2 = REG_FTMP2;
}
M_FLTMOVE(s1, d);
- x86_64_divss_reg_reg(cd, s2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ M_FDIV(s2, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_DDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
if (s2 == d) {
M_FLTMOVE(s2, REG_FTMP2);
s2 = REG_FTMP2;
}
M_FLTMOVE(s1, d);
- x86_64_divsd_reg_reg(cd, s2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ M_DDIV(s2, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_I2F: /* ..., value ==> ..., (float) value */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_cvtsi2ss_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
+ M_CVTIF(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_I2D: /* ..., value ==> ..., (double) value */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_cvtsi2sd_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
+ M_CVTID(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_L2F: /* ..., value ==> ..., (float) value */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_cvtsi2ssq_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
+ M_CVTLF(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_L2D: /* ..., value ==> ..., (double) value */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_cvtsi2sdq_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
+ M_CVTLD(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_F2I: /* ..., value ==> ..., (int) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_cvttss2si_reg_reg(cd, s1, d);
- x86_64_alul_imm_reg(cd, X86_64_CMP, 0x80000000, d); /* corner cases */
- a = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 + ((REG_RESULT == d) ? 0 : 3);
- x86_64_jcc(cd, X86_64_CC_NE, a);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_CVTFI(s1, d);
+ M_ICMP_IMM(0x80000000, d); /* corner cases */
+ disp = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 +
+ ((REG_RESULT == d) ? 0 : 3);
+ M_BNE(disp);
M_FLTMOVE(s1, REG_FTMP1);
- x86_64_mov_imm_reg(cd, (ptrint) asm_builtin_f2i, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
+ M_MOV_IMM(asm_builtin_f2i, REG_ITMP2);
+ M_CALL(REG_ITMP2);
M_INTMOVE(REG_RESULT, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_D2I: /* ..., value ==> ..., (int) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_cvttsd2si_reg_reg(cd, s1, d);
- x86_64_alul_imm_reg(cd, X86_64_CMP, 0x80000000, d); /* corner cases */
- a = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 + ((REG_RESULT == d) ? 0 : 3);
- x86_64_jcc(cd, X86_64_CC_NE, a);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_CVTDI(s1, d);
+ M_ICMP_IMM(0x80000000, d); /* corner cases */
+ disp = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 +
+ ((REG_RESULT == d) ? 0 : 3);
+ M_BNE(disp);
M_FLTMOVE(s1, REG_FTMP1);
- x86_64_mov_imm_reg(cd, (ptrint) asm_builtin_d2i, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
+ M_MOV_IMM(asm_builtin_d2i, REG_ITMP2);
+ M_CALL(REG_ITMP2);
M_INTMOVE(REG_RESULT, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_F2L: /* ..., value ==> ..., (long) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_cvttss2siq_reg_reg(cd, s1, d);
- x86_64_mov_imm_reg(cd, 0x8000000000000000, REG_ITMP2);
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, d); /* corner cases */
- a = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 + ((REG_RESULT == d) ? 0 : 3);
- x86_64_jcc(cd, X86_64_CC_NE, a);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_CVTFL(s1, d);
+ M_MOV_IMM(0x8000000000000000, REG_ITMP2);
+ M_LCMP(REG_ITMP2, d); /* corner cases */
+ disp = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 +
+ ((REG_RESULT == d) ? 0 : 3);
+ M_BNE(disp);
M_FLTMOVE(s1, REG_FTMP1);
- x86_64_mov_imm_reg(cd, (ptrint) asm_builtin_f2l, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
+ M_MOV_IMM(asm_builtin_f2l, REG_ITMP2);
+ M_CALL(REG_ITMP2);
M_INTMOVE(REG_RESULT, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_D2L: /* ..., value ==> ..., (long) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_cvttsd2siq_reg_reg(cd, s1, d);
- x86_64_mov_imm_reg(cd, 0x8000000000000000, REG_ITMP2);
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, d); /* corner cases */
- a = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 + ((REG_RESULT == d) ? 0 : 3);
- x86_64_jcc(cd, X86_64_CC_NE, a);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_CVTDL(s1, d);
+ M_MOV_IMM(0x8000000000000000, REG_ITMP2);
+ M_LCMP(REG_ITMP2, d); /* corner cases */
+ disp = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 +
+ ((REG_RESULT == d) ? 0 : 3);
+ M_BNE(disp);
M_FLTMOVE(s1, REG_FTMP1);
- x86_64_mov_imm_reg(cd, (ptrint) asm_builtin_d2l, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
+ M_MOV_IMM(asm_builtin_d2l, REG_ITMP2);
+ M_CALL(REG_ITMP2);
M_INTMOVE(REG_RESULT, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_F2D: /* ..., value ==> ..., (double) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- x86_64_cvtss2sd_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
+ M_CVTFD(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_D2F: /* ..., value ==> ..., (float) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- x86_64_cvtsd2ss_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
+ M_CVTDF(s1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_FCMPL: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
/* == => 0, < => 1, > => -1 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
M_CLR(d);
M_MOV_IMM(1, REG_ITMP1);
M_MOV_IMM(-1, REG_ITMP2);
- x86_64_ucomiss_reg_reg(cd, s1, s2);
+ emit_ucomiss_reg_reg(cd, s1, s2);
M_CMOVB(REG_ITMP1, d);
M_CMOVA(REG_ITMP2, d);
M_CMOVP(REG_ITMP2, d); /* treat unordered as GT */
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_FCMPG: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
/* == => 0, < => 1, > => -1 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
M_CLR(d);
M_MOV_IMM(1, REG_ITMP1);
M_MOV_IMM(-1, REG_ITMP2);
- x86_64_ucomiss_reg_reg(cd, s1, s2);
+ emit_ucomiss_reg_reg(cd, s1, s2);
M_CMOVB(REG_ITMP1, d);
M_CMOVA(REG_ITMP2, d);
M_CMOVP(REG_ITMP1, d); /* treat unordered as LT */
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_DCMPL: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
/* == => 0, < => 1, > => -1 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
M_CLR(d);
M_MOV_IMM(1, REG_ITMP1);
M_MOV_IMM(-1, REG_ITMP2);
- x86_64_ucomisd_reg_reg(cd, s1, s2);
+ emit_ucomisd_reg_reg(cd, s1, s2);
M_CMOVB(REG_ITMP1, d);
M_CMOVA(REG_ITMP2, d);
M_CMOVP(REG_ITMP2, d); /* treat unordered as GT */
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_DCMPG: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
/* == => 0, < => 1, > => -1 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
M_CLR(d);
M_MOV_IMM(1, REG_ITMP1);
M_MOV_IMM(-1, REG_ITMP2);
- x86_64_ucomisd_reg_reg(cd, s1, s2);
+ emit_ucomisd_reg_reg(cd, s1, s2);
M_CMOVB(REG_ITMP1, d);
M_CMOVA(REG_ITMP2, d);
M_CMOVP(REG_ITMP1, d); /* treat unordered as LT */
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_ARRAYLENGTH: /* ..., arrayref ==> ..., (int) length */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
gen_nullptr_check(s1);
M_ILD(d, s1, OFFSET(java_arrayheader, size));
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movsbq_memindex_reg(cd, OFFSET(java_bytearray, data[0]), s1, s2, 0, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_movsbq_memindex_reg(cd, OFFSET(java_bytearray, data[0]), s1, s2, 0, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movzwq_memindex_reg(cd, OFFSET(java_chararray, data[0]), s1, s2, 1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_movzwq_memindex_reg(cd, OFFSET(java_chararray, data[0]), s1, s2, 1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movswq_memindex_reg(cd, OFFSET(java_shortarray, data[0]), s1, s2, 1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_movswq_memindex_reg(cd, OFFSET(java_shortarray, data[0]), s1, s2, 1, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movl_memindex_reg(cd, OFFSET(java_intarray, data[0]), s1, s2, 2, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_movl_memindex_reg(cd, OFFSET(java_intarray, data[0]), s1, s2, 2, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_mov_memindex_reg(cd, OFFSET(java_longarray, data[0]), s1, s2, 3, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_mov_memindex_reg(cd, OFFSET(java_longarray, data[0]), s1, s2, 3, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movss_memindex_reg(cd, OFFSET(java_floatarray, data[0]), s1, s2, 2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ emit_movss_memindex_reg(cd, OFFSET(java_floatarray, data[0]), s1, s2, 2, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_DALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP3);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movsd_memindex_reg(cd, OFFSET(java_doublearray, data[0]), s1, s2, 3, d);
- store_reg_to_var_flt(iptr->dst, d);
+ emit_movsd_memindex_reg(cd, OFFSET(java_doublearray, data[0]), s1, s2, 3, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_mov_memindex_reg(cd, OFFSET(java_objectarray, data[0]), s1, s2, 3, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_mov_memindex_reg(cd, OFFSET(java_objectarray, data[0]), s1, s2, 3, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src->prev, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_movb_reg_memindex(cd, s3, OFFSET(java_bytearray, data[0]), s1, s2, 0);
+ s3 = emit_load_s3(jd, iptr, src, REG_ITMP3);
+ emit_movb_reg_memindex(cd, s3, OFFSET(java_bytearray, data[0]), s1, s2, 0);
break;
case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src->prev, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_movw_reg_memindex(cd, s3, OFFSET(java_chararray, data[0]), s1, s2, 1);
+ s3 = emit_load_s3(jd, iptr, src, REG_ITMP3);
+ emit_movw_reg_memindex(cd, s3, OFFSET(java_chararray, data[0]), s1, s2, 1);
break;
case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src->prev, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_movw_reg_memindex(cd, s3, OFFSET(java_shortarray, data[0]), s1, s2, 1);
+ s3 = emit_load_s3(jd, iptr, src, REG_ITMP3);
+ emit_movw_reg_memindex(cd, s3, OFFSET(java_shortarray, data[0]), s1, s2, 1);
break;
case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src->prev, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_movl_reg_memindex(cd, s3, OFFSET(java_intarray, data[0]), s1, s2, 2);
+ s3 = emit_load_s3(jd, iptr, src, REG_ITMP3);
+ emit_movl_reg_memindex(cd, s3, OFFSET(java_intarray, data[0]), s1, s2, 2);
break;
case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src->prev, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_mov_reg_memindex(cd, s3, OFFSET(java_longarray, data[0]), s1, s2, 3);
+ s3 = emit_load_s3(jd, iptr, src, REG_ITMP3);
+ emit_mov_reg_memindex(cd, s3, OFFSET(java_longarray, data[0]), s1, s2, 3);
break;
case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src->prev, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- var_to_reg_flt(s3, src, REG_FTMP3);
- x86_64_movss_reg_memindex(cd, s3, OFFSET(java_floatarray, data[0]), s1, s2, 2);
+ s3 = emit_load_s3(jd, iptr, src, REG_FTMP3);
+ emit_movss_reg_memindex(cd, s3, OFFSET(java_floatarray, data[0]), s1, s2, 2);
break;
case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src->prev, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- var_to_reg_flt(s3, src, REG_FTMP3);
- x86_64_movsd_reg_memindex(cd, s3, OFFSET(java_doublearray, data[0]), s1, s2, 3);
+ s3 = emit_load_s3(jd, iptr, src, REG_FTMP3);
+ emit_movsd_reg_memindex(cd, s3, OFFSET(java_doublearray, data[0]), s1, s2, 3);
break;
case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src->prev, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- var_to_reg_int(s3, src, REG_ITMP3);
+ s3 = emit_load_s3(jd, iptr, src, REG_ITMP3);
M_MOV(s1, rd->argintregs[0]);
M_MOV(s3, rd->argintregs[1]);
M_CALL(REG_ITMP1);
M_TEST(REG_RESULT);
M_BEQ(0);
- codegen_add_arraystoreexception_ref(cd, cd->mcodeptr);
+ codegen_add_arraystoreexception_ref(cd);
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_mov_reg_memindex(cd, s3, OFFSET(java_objectarray, data[0]), s1, s2, 3);
+ s1 = emit_load_s1(jd, iptr, src->prev->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src->prev, REG_ITMP2);
+ s3 = emit_load_s3(jd, iptr, src, REG_ITMP3);
+ emit_mov_reg_memindex(cd, s3, OFFSET(java_objectarray, data[0]), s1, s2, 3);
break;
case ICMD_BASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movb_imm_memindex(cd, iptr->val.i, OFFSET(java_bytearray, data[0]), s1, s2, 0);
+ emit_movb_imm_memindex(cd, iptr->val.i, OFFSET(java_bytearray, data[0]), s1, s2, 0);
break;
case ICMD_CASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movw_imm_memindex(cd, iptr->val.i, OFFSET(java_chararray, data[0]), s1, s2, 1);
+ emit_movw_imm_memindex(cd, iptr->val.i, OFFSET(java_chararray, data[0]), s1, s2, 1);
break;
case ICMD_SASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movw_imm_memindex(cd, iptr->val.i, OFFSET(java_shortarray, data[0]), s1, s2, 1);
+ emit_movw_imm_memindex(cd, iptr->val.i, OFFSET(java_shortarray, data[0]), s1, s2, 1);
break;
case ICMD_IASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_movl_imm_memindex(cd, iptr->val.i, OFFSET(java_intarray, data[0]), s1, s2, 2);
+ emit_movl_imm_memindex(cd, iptr->val.i, OFFSET(java_intarray, data[0]), s1, s2, 2);
break;
case ICMD_LASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
if (IS_IMM32(iptr->val.l)) {
- x86_64_mov_imm_memindex(cd, (u4) (iptr->val.l & 0x00000000ffffffff), OFFSET(java_longarray, data[0]), s1, s2, 3);
+ emit_mov_imm_memindex(cd, (u4) (iptr->val.l & 0x00000000ffffffff), OFFSET(java_longarray, data[0]), s1, s2, 3);
} else {
- x86_64_movl_imm_memindex(cd, (u4) (iptr->val.l & 0x00000000ffffffff), OFFSET(java_longarray, data[0]), s1, s2, 3);
- x86_64_movl_imm_memindex(cd, (u4) (iptr->val.l >> 32), OFFSET(java_longarray, data[0]) + 4, s1, s2, 3);
+ emit_movl_imm_memindex(cd, (u4) (iptr->val.l & 0x00000000ffffffff), OFFSET(java_longarray, data[0]), s1, s2, 3);
+ emit_movl_imm_memindex(cd, (u4) (iptr->val.l >> 32), OFFSET(java_longarray, data[0]) + 4, s1, s2, 3);
}
break;
case ICMD_AASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
if (iptr->op1 == 0) {
gen_nullptr_check(s1);
gen_bound_check;
}
- x86_64_mov_imm_memindex(cd, 0, OFFSET(java_objectarray, data[0]), s1, s2, 3);
+ emit_mov_imm_memindex(cd, 0, OFFSET(java_objectarray, data[0]), s1, s2, 3);
break;
case ICMD_GETSTATIC: /* ... ==> ..., value */
/* op1 = type, val.a = field address */
- if (iptr->val.a == NULL) {
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
disp = dseg_addaddress(cd, NULL);
/* PROFILE_CYCLE_STOP; */
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_get_putstatic,
- (unresolved_field *) iptr->target, disp);
+ codegen_addpatchref(cd, PATCHER_get_putstatic,
+ INSTRUCTION_UNRESOLVED_FIELD(iptr), disp);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
/* PROFILE_CYCLE_START; */
} else {
- fieldinfo *fi = iptr->val.a;
+ fieldinfo *fi = INSTRUCTION_RESOLVED_FIELDINFO(iptr);
disp = dseg_addaddress(cd, &(fi->value));
if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->class)) {
PROFILE_CYCLE_STOP;
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_clinit, fi->class, 0);
+ codegen_addpatchref(cd, PATCHER_clinit, fi->class, 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
/* This approach is much faster than moving the field
address inline into a register. */
- M_ALD(REG_ITMP2, RIP, -(((ptrint) cd->mcodeptr + 7) -
+ M_ALD(REG_ITMP1, RIP, -(((ptrint) cd->mcodeptr + 7) -
(ptrint) cd->mcodebase) + disp);
switch (iptr->op1) {
case TYPE_INT:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- M_ILD(d, REG_ITMP2, 0);
- store_reg_to_var_int(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ M_ILD(d, REG_ITMP1, 0);
break;
case TYPE_LNG:
case TYPE_ADR:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- M_LLD(d, REG_ITMP2, 0);
- store_reg_to_var_int(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
+ M_LLD(d, REG_ITMP1, 0);
break;
case TYPE_FLT:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movss_membase_reg(cd, REG_ITMP2, 0, d);
- store_reg_to_var_flt(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
+ M_FLD(d, REG_ITMP1, 0);
break;
case TYPE_DBL:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movsd_membase_reg(cd, REG_ITMP2, 0, d);
- store_reg_to_var_flt(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
+ M_DLD(d, REG_ITMP1, 0);
break;
}
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_PUTSTATIC: /* ..., value ==> ... */
/* op1 = type, val.a = field address */
- if (iptr->val.a == NULL) {
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
disp = dseg_addaddress(cd, NULL);
/* PROFILE_CYCLE_STOP; */
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_get_putstatic,
- (unresolved_field *) iptr->target, disp);
+ codegen_addpatchref(cd, PATCHER_get_putstatic,
+ INSTRUCTION_UNRESOLVED_FIELD(iptr), disp);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
/* PROFILE_CYCLE_START; */
} else {
- fieldinfo *fi = iptr->val.a;
+ fieldinfo *fi = INSTRUCTION_RESOLVED_FIELDINFO(iptr);
disp = dseg_addaddress(cd, &(fi->value));
if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->class)) {
PROFILE_CYCLE_STOP;
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_clinit, fi->class, 0);
+ codegen_addpatchref(cd, PATCHER_clinit, fi->class, 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
/* This approach is much faster than moving the field
address inline into a register. */
- M_ALD(REG_ITMP2, RIP, -(((ptrint) cd->mcodeptr + 7) -
+ M_ALD(REG_ITMP1, RIP, -(((ptrint) cd->mcodeptr + 7) -
(ptrint) cd->mcodebase) + disp);
switch (iptr->op1) {
case TYPE_INT:
- var_to_reg_int(s2, src, REG_ITMP1);
- M_IST(s2, REG_ITMP2, 0);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_IST(s2, REG_ITMP1, 0);
break;
case TYPE_LNG:
case TYPE_ADR:
- var_to_reg_int(s2, src, REG_ITMP1);
- M_LST(s2, REG_ITMP2, 0);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_LST(s2, REG_ITMP1, 0);
break;
case TYPE_FLT:
- var_to_reg_flt(s2, src, REG_FTMP1);
- x86_64_movss_reg_membase(cd, s2, REG_ITMP2, 0);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP1);
+ M_FST(s2, REG_ITMP1, 0);
break;
case TYPE_DBL:
- var_to_reg_flt(s2, src, REG_FTMP1);
- x86_64_movsd_reg_membase(cd, s2, REG_ITMP2, 0);
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP1);
+ M_DST(s2, REG_ITMP1, 0);
break;
}
break;
/* op1 = type, val.a = field address (in */
/* following NOP) */
- if (iptr[1].val.a == NULL) {
+ if (INSTRUCTION_IS_UNRESOLVED(iptr + 1)) {
disp = dseg_addaddress(cd, NULL);
/* PROFILE_CYCLE_STOP; */
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_get_putstatic,
- (unresolved_field *) iptr[1].target, disp);
+ codegen_addpatchref(cd, PATCHER_get_putstatic,
+ INSTRUCTION_UNRESOLVED_FIELD(iptr + 1), disp);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
/* PROFILE_CYCLE_START; */
} else {
- fieldinfo *fi = iptr[1].val.a;
+ fieldinfo *fi = INSTRUCTION_RESOLVED_FIELDINFO(iptr + 1);
disp = dseg_addaddress(cd, &(fi->value));
if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->class)) {
PROFILE_CYCLE_STOP;
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_clinit, fi->class, 0);
+ codegen_addpatchref(cd, PATCHER_clinit, fi->class, 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
case TYPE_LNG:
case TYPE_ADR:
case TYPE_DBL:
- if (IS_IMM32(iptr->val.l)) {
+ if (IS_IMM32(iptr->val.l))
M_LST_IMM32(iptr->val.l, REG_ITMP1, 0);
- } else {
+ else {
M_IST_IMM(iptr->val.l, REG_ITMP1, 0);
M_IST_IMM(iptr->val.l >> 32, REG_ITMP1, 4);
}
case ICMD_GETFIELD: /* ... ==> ..., value */
/* op1 = type, val.i = field offset */
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
gen_nullptr_check(s1);
- if (iptr->val.a == NULL) {
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
/* PROFILE_CYCLE_STOP; */
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_get_putfield,
- (unresolved_field *) iptr->target, 0);
+ codegen_addpatchref(cd, PATCHER_get_putfield,
+ INSTRUCTION_UNRESOLVED_FIELD(iptr), 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
disp = 0;
- } else {
- disp = ((fieldinfo *) (iptr->val.a))->offset;
- }
+ } else
+ disp = INSTRUCTION_RESOLVED_FIELDINFO(iptr)->offset;
switch (iptr->op1) {
case TYPE_INT:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- if (iptr->val.a == NULL)
- M_ILD32(d, s1, disp);
- else
- M_ILD(d, s1, disp);
- store_reg_to_var_int(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_ILD32(d, s1, disp);
break;
case TYPE_LNG:
case TYPE_ADR:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- if (iptr->val.a == NULL)
- M_LLD32(d, s1, disp);
- else
- M_LLD(d, s1, disp);
- store_reg_to_var_int(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
+ M_LLD32(d, s1, disp);
break;
case TYPE_FLT:
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_movss_membase32_reg(cd, s1, disp, d);
- store_reg_to_var_flt(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
+ M_FLD32(d, s1, disp);
break;
case TYPE_DBL:
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_movsd_membase32_reg(cd, s1, disp, d);
- store_reg_to_var_flt(iptr->dst, d);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FTMP1);
+ M_DLD32(d, s1, disp);
break;
}
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_PUTFIELD: /* ..., objectref, value ==> ... */
/* op1 = type, val.i = field offset */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
gen_nullptr_check(s1);
- if (IS_INT_LNG_TYPE(iptr->op1)) {
- var_to_reg_int(s2, src, REG_ITMP2);
- } else {
- var_to_reg_flt(s2, src, REG_FTMP2);
- }
+ if (IS_INT_LNG_TYPE(iptr->op1))
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ else
+ s2 = emit_load_s2(jd, iptr, src, REG_FTMP2);
- if (iptr->val.a == NULL) {
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
/* PROFILE_CYCLE_STOP; */
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_get_putfield,
- (unresolved_field *) iptr->target, 0);
+ codegen_addpatchref(cd, PATCHER_get_putfield,
+ INSTRUCTION_UNRESOLVED_FIELD(iptr), 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
disp = 0;
- } else {
- disp = ((fieldinfo *) (iptr->val.a))->offset;
- }
+ } else
+ disp = INSTRUCTION_RESOLVED_FIELDINFO(iptr)->offset;
switch (iptr->op1) {
case TYPE_INT:
- if (iptr->val.a == NULL)
- M_IST32(s2, s1, disp);
- else
- M_IST(s2, s1, disp);
+ M_IST32(s2, s1, disp);
break;
case TYPE_LNG:
case TYPE_ADR:
- if (iptr->val.a == NULL)
- M_LST32(s2, s1, disp);
- else
- M_LST(s2, s1, disp);
+ M_LST32(s2, s1, disp);
break;
case TYPE_FLT:
- x86_64_movss_reg_membase32(cd, s2, s1, disp);
+ M_FST32(s2, s1, disp);
break;
case TYPE_DBL:
- x86_64_movsd_reg_membase32(cd, s2, s1, disp);
+ M_DST32(s2, s1, disp);
break;
}
break;
/* op1 = type, val.a = field address (in */
/* following NOP) */
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
gen_nullptr_check(s1);
- if (iptr[1].val.a == NULL) {
+ if (INSTRUCTION_IS_UNRESOLVED(iptr + 1)) {
/* PROFILE_CYCLE_STOP; */
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_putfieldconst,
- (unresolved_field *) iptr[1].target, 0);
+ codegen_addpatchref(cd, PATCHER_putfieldconst,
+ INSTRUCTION_UNRESOLVED_FIELD(iptr + 1), 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
disp = 0;
- } else {
- disp = ((fieldinfo *) (iptr[1].val.a))->offset;
- }
+ } else
+ disp = INSTRUCTION_RESOLVED_FIELDINFO(iptr + 1)->offset;
switch (iptr->op1) {
case TYPE_INT:
case TYPE_FLT:
- if (iptr[1].val.a == NULL)
- M_IST32_IMM(iptr->val.i, s1, disp);
- else
- M_IST_IMM(iptr->val.i, s1, disp);
+ M_IST32_IMM(iptr->val.i, s1, disp);
break;
case TYPE_LNG:
case TYPE_ADR:
case TYPE_DBL:
- /* We can only optimize the move, if the class is
- resolved. Otherwise we don't know what to patch. */
- if (iptr[1].val.a == NULL) {
- M_IST32_IMM(iptr->val.l, s1, disp);
- M_IST32_IMM(iptr->val.l >> 32, s1, disp + 4);
- } else {
- if (IS_IMM32(iptr->val.l)) {
- M_LST_IMM32(iptr->val.l, s1, disp);
- } else {
- M_IST_IMM(iptr->val.l, s1, disp);
- M_IST_IMM(iptr->val.l >> 32, s1, disp + 4);
- }
- }
+ M_IST32_IMM(iptr->val.l, s1, disp);
+ M_IST32_IMM(iptr->val.l >> 32, s1, disp + 4);
break;
}
break;
case ICMD_ATHROW: /* ..., objectref ==> ... (, objectref) */
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
M_INTMOVE(s1, REG_ITMP1_XPTR);
PROFILE_CYCLE_STOP;
#ifdef ENABLE_VERIFIER
if (iptr->val.a) {
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_athrow_areturn,
+ codegen_addpatchref(cd, PATCHER_athrow_areturn,
(unresolved_class *) iptr->val.a, 0);
if (opt_showdisassemble) {
/* op1 = target JavaVM pc */
M_JMP_IMM(0);
- codegen_addreference(cd, (basicblock *) iptr->target, cd->mcodeptr);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_JSR: /* ... ==> ... */
/* op1 = target JavaVM pc */
M_CALL_IMM(0);
- codegen_addreference(cd, (basicblock *) iptr->target, cd->mcodeptr);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_RET: /* ... ==> ... */
/* op1 = local variable */
var = &(rd->locals[iptr->op1][TYPE_ADR]);
- var_to_reg_int(s1, var, REG_ITMP1);
- M_JMP(s1);
+ if (var->flags & INMEMORY) {
+ M_ALD(REG_ITMP1, REG_SP, var->regoff * 8);
+ M_JMP(REG_ITMP1);
+ } else
+ M_JMP(var->regoff);
break;
case ICMD_IFNULL: /* ..., value ==> ... */
/* op1 = target JavaVM pc */
- if (src->flags & INMEMORY)
- M_CMP_IMM_MEMBASE(0, REG_SP, src->regoff * 8);
- else
- M_TEST(src->regoff);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_TEST(s1);
M_BEQ(0);
- codegen_addreference(cd, (basicblock *) iptr->target, cd->mcodeptr);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IFNONNULL: /* ..., value ==> ... */
/* op1 = target JavaVM pc */
- if (src->flags & INMEMORY)
- M_CMP_IMM_MEMBASE(0, REG_SP, src->regoff * 8);
- else
- M_TEST(src->regoff);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_TEST(s1);
M_BNE(0);
- codegen_addreference(cd, (basicblock *) iptr->target, cd->mcodeptr);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IFEQ: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.i = constant */
- x86_64_emit_ifcc(cd, X86_64_CC_E, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_ICMP_IMM(iptr->val.i, s1);
+ M_BEQ(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IFLT: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.i = constant */
- x86_64_emit_ifcc(cd, X86_64_CC_L, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_ICMP_IMM(iptr->val.i, s1);
+ M_BLT(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IFLE: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.i = constant */
- x86_64_emit_ifcc(cd, X86_64_CC_LE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_ICMP_IMM(iptr->val.i, s1);
+ M_BLE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IFNE: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.i = constant */
- x86_64_emit_ifcc(cd, X86_64_CC_NE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_ICMP_IMM(iptr->val.i, s1);
+ M_BNE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IFGT: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.i = constant */
- x86_64_emit_ifcc(cd, X86_64_CC_G, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_ICMP_IMM(iptr->val.i, s1);
+ M_BGT(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IFGE: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.i = constant */
- x86_64_emit_ifcc(cd, X86_64_CC_GE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ M_ICMP_IMM(iptr->val.i, s1);
+ M_BGE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LEQ: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.l = constant */
- x86_64_emit_if_lcc(cd, X86_64_CC_E, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (IS_IMM32(iptr->val.l))
+ M_LCMP_IMM(iptr->val.l, s1);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LCMP(REG_ITMP2, s1);
+ }
+ M_BEQ(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LLT: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.l = constant */
- x86_64_emit_if_lcc(cd, X86_64_CC_L, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (IS_IMM32(iptr->val.l))
+ M_LCMP_IMM(iptr->val.l, s1);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LCMP(REG_ITMP2, s1);
+ }
+ M_BLT(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LLE: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.l = constant */
- x86_64_emit_if_lcc(cd, X86_64_CC_LE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (IS_IMM32(iptr->val.l))
+ M_LCMP_IMM(iptr->val.l, s1);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LCMP(REG_ITMP2, s1);
+ }
+ M_BLE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LNE: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.l = constant */
- x86_64_emit_if_lcc(cd, X86_64_CC_NE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (IS_IMM32(iptr->val.l))
+ M_LCMP_IMM(iptr->val.l, s1);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LCMP(REG_ITMP2, s1);
+ }
+ M_BNE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LGT: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.l = constant */
- x86_64_emit_if_lcc(cd, X86_64_CC_G, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (IS_IMM32(iptr->val.l))
+ M_LCMP_IMM(iptr->val.l, s1);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LCMP(REG_ITMP2, s1);
+ }
+ M_BGT(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LGE: /* ..., value ==> ... */
/* op1 = target JavaVM pc, val.l = constant */
- x86_64_emit_if_lcc(cd, X86_64_CC_GE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ if (IS_IMM32(iptr->val.l))
+ M_LCMP_IMM(iptr->val.l, s1);
+ else {
+ M_MOV_IMM(iptr->val.l, REG_ITMP2);
+ M_LCMP(REG_ITMP2, s1);
+ }
+ M_BGE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_ICMPEQ: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_icmpcc(cd, X86_64_CC_E, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_ICMP(s2, s1);
+ M_BEQ(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
case ICMD_IF_ACMPEQ: /* op1 = target JavaVM pc */
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_E, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_LCMP(s2, s1);
+ M_BEQ(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_ICMPNE: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_icmpcc(cd, X86_64_CC_NE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_ICMP(s2, s1);
+ M_BNE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LCMPNE: /* ..., value, value ==> ... */
case ICMD_IF_ACMPNE: /* op1 = target JavaVM pc */
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_NE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_LCMP(s2, s1);
+ M_BNE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_ICMPLT: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_icmpcc(cd, X86_64_CC_L, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_ICMP(s2, s1);
+ M_BLT(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LCMPLT: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_L, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_LCMP(s2, s1);
+ M_BLT(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_ICMPGT: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_icmpcc(cd, X86_64_CC_G, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_ICMP(s2, s1);
+ M_BGT(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LCMPGT: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_G, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_LCMP(s2, s1);
+ M_BGT(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_ICMPLE: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_icmpcc(cd, X86_64_CC_LE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_ICMP(s2, s1);
+ M_BLE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LCMPLE: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_LE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_LCMP(s2, s1);
+ M_BLE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_ICMPGE: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_icmpcc(cd, X86_64_CC_GE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_ICMP(s2, s1);
+ M_BGE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
case ICMD_IF_LCMPGE: /* ..., value, value ==> ... */
/* op1 = target JavaVM pc */
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_GE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, src->prev, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP2);
+ M_LCMP(s2, s1);
+ M_BGE(0);
+ codegen_addreference(cd, (basicblock *) iptr->target);
break;
/* (value xx 0) ? IFxx_ICONST : ELSE_ICONST */
case ICMD_IFGT_ICONST:
case ICMD_IFLE_ICONST:
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
if (iptr[1].opc == ICMD_ELSE_ICONST) {
if (s1 == d) {
M_INTMOVE(s1, REG_ITMP1);
break;
}
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_IRETURN: /* ..., retvalue ==> ... */
case ICMD_LRETURN:
- var_to_reg_int(s1, src, REG_RESULT);
+ s1 = emit_load_s1(jd, iptr, src, REG_RESULT);
M_INTMOVE(s1, REG_RESULT);
goto nowperformreturn;
case ICMD_ARETURN: /* ..., retvalue ==> ... */
- var_to_reg_int(s1, src, REG_RESULT);
+ s1 = emit_load_s1(jd, iptr, src, REG_RESULT);
M_INTMOVE(s1, REG_RESULT);
#ifdef ENABLE_VERIFIER
if (iptr->val.a) {
PROFILE_CYCLE_STOP;
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_athrow_areturn,
+ codegen_addpatchref(cd, PATCHER_athrow_areturn,
(unresolved_class *) iptr->val.a, 0);
if (opt_showdisassemble) {
case ICMD_FRETURN: /* ..., retvalue ==> ... */
case ICMD_DRETURN:
- var_to_reg_flt(s1, src, REG_FRESULT);
+ s1 = emit_load_s1(jd, iptr, src, REG_FRESULT);
M_FLTMOVE(s1, REG_FRESULT);
goto nowperformreturn;
nowperformreturn:
{
s4 i, p;
-
- p = parentargs_base;
-
- /* call trace function */
+
+ p = stackframesize;
+
+#if !defined(NDEBUG)
+ /* generate call trace */
+
if (opt_verbosecall) {
- x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
+ emit_alu_imm_reg(cd, ALU_SUB, 2 * 8, REG_SP);
- x86_64_mov_reg_membase(cd, REG_RESULT, REG_SP, 0 * 8);
- x86_64_movq_reg_membase(cd, REG_FRESULT, REG_SP, 1 * 8);
+ emit_mov_reg_membase(cd, REG_RESULT, REG_SP, 0 * 8);
+ emit_movq_reg_membase(cd, REG_FRESULT, REG_SP, 1 * 8);
- x86_64_mov_imm_reg(cd, (u8) m, rd->argintregs[0]);
- x86_64_mov_reg_reg(cd, REG_RESULT, rd->argintregs[1]);
+ emit_mov_imm_reg(cd, (u8) m, rd->argintregs[0]);
+ emit_mov_reg_reg(cd, REG_RESULT, rd->argintregs[1]);
M_FLTMOVE(REG_FRESULT, rd->argfltregs[0]);
M_FLTMOVE(REG_FRESULT, rd->argfltregs[1]);
- x86_64_mov_imm_reg(cd, (u8) builtin_displaymethodstop, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
+ M_MOV_IMM(builtin_displaymethodstop, REG_ITMP1);
+ M_CALL(REG_ITMP1);
- x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_RESULT);
- x86_64_movq_membase_reg(cd, REG_SP, 1 * 8, REG_FRESULT);
+ emit_mov_membase_reg(cd, REG_SP, 0 * 8, REG_RESULT);
+ emit_movq_membase_reg(cd, REG_SP, 1 * 8, REG_FRESULT);
- x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
+ emit_alu_imm_reg(cd, ALU_ADD, 2 * 8, REG_SP);
}
+#endif /* !defined(NDEBUG) */
-#if defined(USE_THREADS)
+#if defined(ENABLE_THREADS)
if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
M_ALD(rd->argintregs[0], REG_SP, rd->memuse * 8);
/* deallocate stack */
- if (parentargs_base)
- M_AADD_IMM(parentargs_base * 8, REG_SP);
+ if (stackframesize)
+ M_AADD_IMM(stackframesize * 8, REG_SP);
/* generate method profiling code */
l = s4ptr[1]; /* low */
i = s4ptr[2]; /* high */
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
M_INTMOVE(s1, REG_ITMP1);
- if (l != 0) {
- x86_64_alul_imm_reg(cd, X86_64_SUB, l, REG_ITMP1);
- }
+
+ if (l != 0)
+ M_ISUB_IMM(l, REG_ITMP1);
+
i = i - l + 1;
/* range check */
- x86_64_alul_imm_reg(cd, X86_64_CMP, i - 1, REG_ITMP1);
- x86_64_jcc(cd, X86_64_CC_A, 0);
+ M_ICMP_IMM(i - 1, REG_ITMP1);
+ M_BA(0);
- /* codegen_addreference(cd, BlockPtrOfPC(s4ptr[0]), cd->mcodeptr); */
- codegen_addreference(cd, (basicblock *) tptr[0], cd->mcodeptr);
+ codegen_addreference(cd, (basicblock *) tptr[0]);
/* build jump table top down and use address of lowest entry */
--tptr;
}
- /* length of dataseg after last dseg_addtarget is used by load */
+ /* length of dataseg after last dseg_addtarget is used
+ by load */
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2);
- dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_memindex_reg(cd, -(cd->dseglen), REG_ITMP2, REG_ITMP1, 3, REG_ITMP1);
- x86_64_jmp_reg(cd, REG_ITMP1);
+ M_MOV_IMM(0, REG_ITMP2);
+ dseg_adddata(cd);
+ emit_mov_memindex_reg(cd, -(cd->dseglen), REG_ITMP2, REG_ITMP1, 3, REG_ITMP1);
+ M_JMP(REG_ITMP1);
}
break;
i = s4ptr[1]; /* count */
MCODECHECK(8 + ((7 + 6) * i) + 5);
- var_to_reg_int(s1, src, REG_ITMP1); /* reg compare should always be faster */
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+
while (--i >= 0) {
s4ptr += 2;
++tptr;
val = s4ptr[0];
- x86_64_alul_imm_reg(cd, X86_64_CMP, val, s1);
- x86_64_jcc(cd, X86_64_CC_E, 0);
- codegen_addreference(cd, (basicblock *) tptr[0], cd->mcodeptr);
+ M_ICMP_IMM(val, s1);
+ M_BEQ(0);
+ codegen_addreference(cd, (basicblock *) tptr[0]);
}
- x86_64_jmp_imm(cd, 0);
+ M_JMP_IMM(0);
tptr = (void **) iptr->target;
- codegen_addreference(cd, (basicblock *) tptr[0], cd->mcodeptr);
+ codegen_addreference(cd, (basicblock *) tptr[0]);
}
break;
case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
case ICMD_INVOKEINTERFACE:
- lm = iptr->val.a;
-
- if (lm == NULL) {
- unresolved_method *um = iptr->target;
- md = um->methodref->parseddesc.md;
- } else {
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ md = INSTRUCTION_UNRESOLVED_METHOD(iptr)->methodref->parseddesc.md;
+ lm = NULL;
+ }
+ else {
+ lm = INSTRUCTION_RESOLVED_METHODINFO(iptr);
md = lm->parseddesc;
}
if (IS_INT_LNG_TYPE(src->type)) {
if (!md->params[s3].inmemory) {
s1 = rd->argintregs[md->params[s3].regoff];
- var_to_reg_int(d, src, s1);
+ d = emit_load_s1(jd, iptr, src, s1);
M_INTMOVE(d, s1);
} else {
- var_to_reg_int(d, src, REG_ITMP1);
+ d = emit_load_s1(jd, iptr, src, REG_ITMP1);
M_LST(d, REG_SP, md->params[s3].regoff * 8);
}
} else {
if (!md->params[s3].inmemory) {
s1 = rd->argfltregs[md->params[s3].regoff];
- var_to_reg_flt(d, src, s1);
+ d = emit_load_s1(jd, iptr, src, s1);
M_FLTMOVE(d, s1);
} else {
- var_to_reg_flt(d, src, REG_FTMP1);
- M_DST(d, REG_SP, md->params[s3].regoff * 8);
+ d = emit_load_s1(jd, iptr, src, REG_FTMP1);
+
+ if (IS_2_WORD_TYPE(src->type))
+ M_DST(d, REG_SP, md->params[s3].regoff * 8);
+ else
+ M_FST(d, REG_SP, md->params[s3].regoff * 8);
}
}
}
if (iptr->op1 == true) {
M_TEST(REG_RESULT);
M_BEQ(0);
- codegen_add_fillinstacktrace_ref(cd, cd->mcodeptr);
+ codegen_add_fillinstacktrace_ref(cd);
}
break;
case ICMD_INVOKESPECIAL:
M_TEST(rd->argintregs[0]);
M_BEQ(0);
- codegen_add_nullpointerexception_ref(cd, cd->mcodeptr);
+ codegen_add_nullpointerexception_ref(cd);
/* first argument contains pointer */
/* gen_nullptr_check(rd->argintregs[0]); */
/* access memory for hardware nullptr */
-/* x86_64_mov_membase_reg(cd, rd->argintregs[0], 0, REG_ITMP2); */
+/* emit_mov_membase_reg(cd, rd->argintregs[0], 0, REG_ITMP2); */
/* fall through */
case ICMD_INVOKESTATIC:
if (lm == NULL) {
- unresolved_method *um = iptr->target;
+ unresolved_method *um = INSTRUCTION_UNRESOLVED_METHOD(iptr);
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_invokestatic_special, um, 0);
+ codegen_addpatchref(cd, PATCHER_invokestatic_special,
+ um, 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
gen_nullptr_check(rd->argintregs[0]);
if (lm == NULL) {
- unresolved_method *um = iptr->target;
+ unresolved_method *um = INSTRUCTION_UNRESOLVED_METHOD(iptr);
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_invokevirtual, um, 0);
+ codegen_addpatchref(cd, PATCHER_invokevirtual, um, 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
d = lm->parseddesc->returntype.type;
}
- x86_64_mov_membase_reg(cd, rd->argintregs[0],
- OFFSET(java_objectheader, vftbl),
- REG_ITMP2);
- x86_64_mov_membase32_reg(cd, REG_ITMP2, s1, REG_ITMP1);
- M_CALL(REG_ITMP1);
+ M_ALD(REG_METHODPTR, rd->argintregs[0],
+ OFFSET(java_objectheader, vftbl));
+ M_ALD32(REG_ITMP3, REG_METHODPTR, s1);
+ M_CALL(REG_ITMP3);
break;
case ICMD_INVOKEINTERFACE:
gen_nullptr_check(rd->argintregs[0]);
if (lm == NULL) {
- unresolved_method *um = iptr->target;
+ unresolved_method *um = INSTRUCTION_UNRESOLVED_METHOD(iptr);
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_invokeinterface, um, 0);
+ codegen_addpatchref(cd, PATCHER_invokeinterface, um, 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
d = lm->parseddesc->returntype.type;
}
- M_ALD(REG_ITMP2, rd->argintregs[0],
+ M_ALD(REG_METHODPTR, rd->argintregs[0],
OFFSET(java_objectheader, vftbl));
- x86_64_mov_membase32_reg(cd, REG_ITMP2, s1, REG_ITMP2);
- x86_64_mov_membase32_reg(cd, REG_ITMP2, s2, REG_ITMP1);
- M_CALL(REG_ITMP1);
+ M_ALD32(REG_METHODPTR, REG_METHODPTR, s1);
+ M_ALD32(REG_ITMP3, REG_METHODPTR, s2);
+ M_CALL(REG_ITMP3);
break;
}
if (d != TYPE_VOID) {
if (IS_INT_LNG_TYPE(iptr->dst->type)) {
- s1 = reg_of_var(rd, iptr->dst, REG_RESULT);
+ s1 = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_RESULT);
M_INTMOVE(REG_RESULT, s1);
- store_reg_to_var_int(iptr->dst, s1);
+ emit_store(jd, iptr, iptr->dst, s1);
} else {
- s1 = reg_of_var(rd, iptr->dst, REG_FRESULT);
+ s1 = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_FRESULT);
M_FLTMOVE(REG_FRESULT, s1);
- store_reg_to_var_flt(iptr->dst, s1);
+ emit_store(jd, iptr, iptr->dst, s1);
}
}
break;
supervftbl = super->vftbl;
}
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
codegen_threadcritrestart(cd, cd->mcodeptr - cd->mcodebase);
#endif
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
/* calculate interface checkcast code size */
M_TEST(s1);
M_BEQ(6 + (opt_showdisassemble ? 5 : 0) + 7 + 6 + s2 + 5 + s3);
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_checkcast_instanceof_flags,
+ codegen_addpatchref(cd, PATCHER_checkcast_instanceof_flags,
(constant_classref *) iptr->target, 0);
if (opt_showdisassemble) {
M_ALD(REG_ITMP2, s1, OFFSET(java_objectheader, vftbl));
if (!super) {
- codegen_addpatchref(cd, cd->mcodeptr,
+ codegen_addpatchref(cd,
PATCHER_checkcast_instanceof_interface,
- (constant_classref *) iptr->target, 0);
+ (constant_classref *) iptr->target,
+ 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
}
}
- x86_64_movl_membase32_reg(cd, REG_ITMP2,
+ emit_movl_membase32_reg(cd, REG_ITMP2,
OFFSET(vftbl_t, interfacetablelength),
REG_ITMP3);
/* XXX TWISTI: should this be int arithmetic? */
M_LSUB_IMM32(superindex, REG_ITMP3);
M_TEST(REG_ITMP3);
M_BLE(0);
- codegen_add_classcastexception_ref(cd, cd->mcodeptr);
- x86_64_mov_membase32_reg(cd, REG_ITMP2,
+ codegen_add_classcastexception_ref(cd);
+ emit_mov_membase32_reg(cd, REG_ITMP2,
OFFSET(vftbl_t, interfacetable[0]) -
superindex * sizeof(methodptr*),
REG_ITMP3);
M_TEST(REG_ITMP3);
M_BEQ(0);
- codegen_add_classcastexception_ref(cd, cd->mcodeptr);
+ codegen_add_classcastexception_ref(cd);
if (!super)
M_JMP_IMM(s3);
M_ALD(REG_ITMP2, s1, OFFSET(java_objectheader, vftbl));
if (!super) {
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_checkcast_class,
+ codegen_addpatchref(cd, PATCHER_checkcast_class,
(constant_classref *) iptr->target,
0);
}
M_MOV_IMM(supervftbl, REG_ITMP3);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
codegen_threadcritstart(cd, cd->mcodeptr - cd->mcodebase);
#endif
- x86_64_movl_membase32_reg(cd, REG_ITMP2,
+ emit_movl_membase32_reg(cd, REG_ITMP2,
OFFSET(vftbl_t, baseval),
REG_ITMP2);
/* if (s1 != REG_ITMP1) { */
- /* x86_64_movl_membase_reg(cd, REG_ITMP3, */
+ /* emit_movl_membase_reg(cd, REG_ITMP3, */
/* OFFSET(vftbl_t, baseval), */
/* REG_ITMP1); */
- /* x86_64_movl_membase_reg(cd, REG_ITMP3, */
+ /* emit_movl_membase_reg(cd, REG_ITMP3, */
/* OFFSET(vftbl_t, diffval), */
/* REG_ITMP3); */
- /* #if defined(USE_THREADS) && defined(NATIVE_THREADS) */
+ /* #if defined(ENABLE_THREADS) */
/* codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase); */
/* #endif */
- /* x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP1, REG_ITMP2); */
+ /* emit_alu_reg_reg(cd, ALU_SUB, REG_ITMP1, REG_ITMP2); */
/* } else { */
- x86_64_movl_membase32_reg(cd, REG_ITMP3,
+ emit_movl_membase32_reg(cd, REG_ITMP3,
OFFSET(vftbl_t, baseval),
REG_ITMP3);
M_LSUB(REG_ITMP3, REG_ITMP2);
M_MOV_IMM(supervftbl, REG_ITMP3);
M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, diffval));
/* } */
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
#endif
- M_CMP(REG_ITMP3, REG_ITMP2);
+ M_LCMP(REG_ITMP3, REG_ITMP2);
M_BA(0); /* (u) REG_ITMP1 > (u) REG_ITMP2 -> jump */
- codegen_add_classcastexception_ref(cd, cd->mcodeptr);
+ codegen_add_classcastexception_ref(cd);
}
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP3);
} else {
/* array type cast-check */
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
M_INTMOVE(s1, rd->argintregs[0]);
if (iptr->val.a == NULL) {
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_builtin_arraycheckcast,
+ codegen_addpatchref(cd, PATCHER_builtin_arraycheckcast,
(constant_classref *) iptr->target, 0);
if (opt_showdisassemble) {
M_CALL(REG_ITMP1);
M_TEST(REG_RESULT);
M_BEQ(0);
- codegen_add_classcastexception_ref(cd, cd->mcodeptr);
+ codegen_add_classcastexception_ref(cd);
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP1);
}
M_INTMOVE(s1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
break;
case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
supervftbl = super->vftbl;
}
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
codegen_threadcritrestart(cd, cd->mcodeptr - cd->mcodebase);
#endif
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP2);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
+ d = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_ITMP2);
if (s1 == d) {
M_INTMOVE(s1, REG_ITMP1);
s1 = REG_ITMP1;
if (!super)
s3 += (opt_showdisassemble ? 5 : 0);
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
+ emit_alu_reg_reg(cd, ALU_XOR, d, d);
/* if class is not resolved, check which code to call */
if (!super) {
- x86_64_test_reg_reg(cd, s1, s1);
- x86_64_jcc(cd, X86_64_CC_Z, (6 + (opt_showdisassemble ? 5 : 0) +
+ emit_test_reg_reg(cd, s1, s1);
+ emit_jcc(cd, CC_Z, (6 + (opt_showdisassemble ? 5 : 0) +
7 + 6 + s2 + 5 + s3));
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_checkcast_instanceof_flags,
+ codegen_addpatchref(cd, PATCHER_checkcast_instanceof_flags,
(constant_classref *) iptr->target, 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
}
- x86_64_movl_imm_reg(cd, 0, REG_ITMP3); /* super->flags */
- x86_64_alul_imm_reg(cd, X86_64_AND, ACC_INTERFACE, REG_ITMP3);
- x86_64_jcc(cd, X86_64_CC_Z, s2 + 5);
+ emit_movl_imm_reg(cd, 0, REG_ITMP3); /* super->flags */
+ emit_alul_imm_reg(cd, ALU_AND, ACC_INTERFACE, REG_ITMP3);
+ emit_jcc(cd, CC_Z, s2 + 5);
}
/* interface instanceof code */
if (!super || (super->flags & ACC_INTERFACE)) {
if (super) {
- x86_64_test_reg_reg(cd, s1, s1);
- x86_64_jcc(cd, X86_64_CC_Z, s2);
+ emit_test_reg_reg(cd, s1, s1);
+ emit_jcc(cd, CC_Z, s2);
}
- x86_64_mov_membase_reg(cd, s1,
+ emit_mov_membase_reg(cd, s1,
OFFSET(java_objectheader, vftbl),
REG_ITMP1);
if (!super) {
- codegen_addpatchref(cd, cd->mcodeptr,
+ codegen_addpatchref(cd,
PATCHER_checkcast_instanceof_interface,
(constant_classref *) iptr->target, 0);
}
}
- x86_64_movl_membase32_reg(cd, REG_ITMP1,
+ emit_movl_membase32_reg(cd, REG_ITMP1,
OFFSET(vftbl_t, interfacetablelength),
REG_ITMP3);
- x86_64_alu_imm32_reg(cd, X86_64_SUB, superindex, REG_ITMP3);
- x86_64_test_reg_reg(cd, REG_ITMP3, REG_ITMP3);
+ emit_alu_imm32_reg(cd, ALU_SUB, superindex, REG_ITMP3);
+ emit_test_reg_reg(cd, REG_ITMP3, REG_ITMP3);
a = 3 + 4 /* mov_membase32_reg */ + 3 /* test */ + 4 /* setcc */;
- x86_64_jcc(cd, X86_64_CC_LE, a);
- x86_64_mov_membase32_reg(cd, REG_ITMP1,
+ emit_jcc(cd, CC_LE, a);
+ emit_mov_membase32_reg(cd, REG_ITMP1,
OFFSET(vftbl_t, interfacetable[0]) -
superindex * sizeof(methodptr*),
REG_ITMP1);
- x86_64_test_reg_reg(cd, REG_ITMP1, REG_ITMP1);
- x86_64_setcc_reg(cd, X86_64_CC_NE, d);
+ emit_test_reg_reg(cd, REG_ITMP1, REG_ITMP1);
+ emit_setcc_reg(cd, CC_NE, d);
if (!super)
- x86_64_jmp_imm(cd, s3);
+ emit_jmp_imm(cd, s3);
}
/* class instanceof code */
if (!super || !(super->flags & ACC_INTERFACE)) {
if (super) {
- x86_64_test_reg_reg(cd, s1, s1);
- x86_64_jcc(cd, X86_64_CC_E, s3);
+ emit_test_reg_reg(cd, s1, s1);
+ emit_jcc(cd, CC_E, s3);
}
- x86_64_mov_membase_reg(cd, s1,
+ emit_mov_membase_reg(cd, s1,
OFFSET(java_objectheader, vftbl),
REG_ITMP1);
if (!super) {
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_instanceof_class,
+ codegen_addpatchref(cd, PATCHER_instanceof_class,
(constant_classref *) iptr->target, 0);
if (opt_showdisassemble) {
}
}
- x86_64_mov_imm_reg(cd, (ptrint) supervftbl, REG_ITMP2);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+ emit_mov_imm_reg(cd, (ptrint) supervftbl, REG_ITMP2);
+#if defined(ENABLE_THREADS)
codegen_threadcritstart(cd, cd->mcodeptr - cd->mcodebase);
#endif
- x86_64_movl_membase_reg(cd, REG_ITMP1,
+ emit_movl_membase_reg(cd, REG_ITMP1,
OFFSET(vftbl_t, baseval),
REG_ITMP1);
- x86_64_movl_membase_reg(cd, REG_ITMP2,
+ emit_movl_membase_reg(cd, REG_ITMP2,
OFFSET(vftbl_t, diffval),
REG_ITMP3);
- x86_64_movl_membase_reg(cd, REG_ITMP2,
+ emit_movl_membase_reg(cd, REG_ITMP2,
OFFSET(vftbl_t, baseval),
REG_ITMP2);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
#endif
- x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP2, REG_ITMP1);
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d); /* may be REG_ITMP2 */
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP3, REG_ITMP1);
- x86_64_setcc_reg(cd, X86_64_CC_BE, d);
+ emit_alu_reg_reg(cd, ALU_SUB, REG_ITMP2, REG_ITMP1);
+ emit_alu_reg_reg(cd, ALU_XOR, d, d); /* may be REG_ITMP2 */
+ emit_alu_reg_reg(cd, ALU_CMP, REG_ITMP3, REG_ITMP1);
+ emit_setcc_reg(cd, CC_BE, d);
}
- store_reg_to_var_int(iptr->dst, d);
+ emit_store(jd, iptr, iptr->dst, d);
}
break;
/* copy SAVEDVAR sizes to stack */
if (src->varkind != ARGVAR) {
- var_to_reg_int(s2, src, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, src, REG_ITMP1);
M_LST(s2, REG_SP, s1 * 8);
}
}
/* is a patcher function set? */
if (iptr->val.a == NULL) {
- codegen_addpatchref(cd, cd->mcodeptr,
- PATCHER_builtin_multianewarray,
+ codegen_addpatchref(cd, PATCHER_builtin_multianewarray,
(constant_classref *) iptr->target, 0);
if (opt_showdisassemble) {
M_TEST(REG_RESULT);
M_BEQ(0);
- codegen_add_fillinstacktrace_ref(cd, cd->mcodeptr);
+ codegen_add_fillinstacktrace_ref(cd);
- s1 = reg_of_var(rd, iptr->dst, REG_RESULT);
+ s1 = codegen_reg_of_var(rd, iptr->opc, iptr->dst, REG_RESULT);
M_INTMOVE(REG_RESULT, s1);
- store_reg_to_var_int(iptr->dst, s1);
+ emit_store(jd, iptr, iptr->dst, s1);
break;
default:
if ((src->varkind != STACKVAR)) {
s2 = src->type;
if (IS_FLT_DBL_TYPE(s2)) {
- var_to_reg_flt(s1, src, REG_FTMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_FTMP1);
if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
M_FLTMOVE(s1, rd->interfaces[len][s2].regoff);
} else {
- x86_64_movq_reg_membase(cd, s1, REG_SP, rd->interfaces[len][s2].regoff * 8);
+ emit_movq_reg_membase(cd, s1, REG_SP, rd->interfaces[len][s2].regoff * 8);
}
} else {
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, src, REG_ITMP1);
if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
M_INTMOVE(s1, rd->interfaces[len][s2].regoff);
} else {
- x86_64_mov_reg_membase(cd, s1, REG_SP, rd->interfaces[len][s2].regoff * 8);
+ emit_mov_reg_membase(cd, s1, REG_SP, rd->interfaces[len][s2].regoff * 8);
}
}
}
/* Check if the exception is an
ArrayIndexOutOfBoundsException. If so, move index register
- into REG_ITMP1. */
+ into a4. */
if (eref->reg != -1)
- M_MOV(eref->reg, REG_ITMP1);
+ M_MOV(eref->reg, rd->argintregs[4]);
/* calcuate exception address */
- M_MOV_IMM(0, REG_ITMP2_XPC);
- dseg_adddata(cd, cd->mcodeptr);
- M_AADD_IMM32(eref->branchpos - 6, REG_ITMP2_XPC);
+ M_MOV_IMM(0, rd->argintregs[3]);
+ dseg_adddata(cd);
+ M_AADD_IMM32(eref->branchpos - 6, rd->argintregs[3]);
/* move function to call into REG_ITMP3 */
} else {
savedmcodeptr = cd->mcodeptr;
- x86_64_lea_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase), rd->argintregs[0]);
+ emit_lea_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase), rd->argintregs[0]);
M_MOV(REG_SP, rd->argintregs[1]);
- M_ALD(rd->argintregs[2], REG_SP, parentargs_base * 8);
- M_MOV(REG_ITMP2_XPC, rd->argintregs[3]);
- M_MOV(REG_ITMP1, rd->argintregs[4]); /* for AIOOBE */
+ M_ALD(rd->argintregs[2], REG_SP, stackframesize * 8);
M_ASUB_IMM(2 * 8, REG_SP);
- M_AST(REG_ITMP2_XPC, REG_SP, 0 * 8);
+ M_AST(rd->argintregs[3], REG_SP, 0 * 8); /* store XPC */
M_CALL(REG_ITMP3);
/* move pointer to java_objectheader onto stack */
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
/* create a virtual java_objectheader */
- (void) dseg_addaddress(cd, get_dummyLR()); /* monitorPtr */
- a = dseg_addaddress(cd, NULL); /* vftbl */
+ (void) dseg_addaddress(cd, NULL); /* flcword */
+ (void) dseg_addaddress(cd, lock_get_initial_lock_word()); /* monitorPtr */
+ a = dseg_addaddress(cd, NULL); /* vftbl */
- x86_64_lea_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + a, REG_ITMP3);
+ emit_lea_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + a, REG_ITMP3);
M_PUSH(REG_ITMP3);
#else
M_PUSH_IMM(0);
{
int i;
- replacementpoint = cd->code->rplpoints;
- for (i=0; i<cd->code->rplpointcount; ++i, ++replacementpoint) {
+ replacementpoint = jd->code->rplpoints;
+
+ for (i = 0; i < jd->code->rplpointcount; ++i, ++replacementpoint) {
/* check code segment size */
MCODECHECK(512);
}
}
- codegen_finish(m, cd, (s4) ((u1 *) cd->mcodeptr - cd->mcodebase));
+ codegen_finish(jd);
/* everything's ok */
*******************************************************************************/
-u1 *createnativestub(functionptr f, methodinfo *m, codegendata *cd,
- registerdata *rd, methoddesc *nmd)
+u1 *createnativestub(functionptr f, jitdata *jd, methoddesc *nmd)
{
- methoddesc *md;
- s4 stackframesize; /* size of stackframe if needed */
- s4 nativeparams;
- s4 i, j; /* count variables */
- s4 t;
- s4 s1, s2;
+ methodinfo *m;
+ codegendata *cd;
+ registerdata *rd;
+ methoddesc *md;
+ s4 stackframesize; /* size of stackframe if needed */
+ s4 nativeparams;
+ s4 i, j; /* count variables */
+ s4 t;
+ s4 s1, s2;
+
+ /* get required compiler data */
+
+ m = jd->m;
+ cd = jd->cd;
+ rd = jd->rd;
/* initialize variables */
(void) dseg_addlinenumbertablesize(cd);
(void) dseg_adds4(cd, 0); /* ExTableSize */
- /* initialize mcode variables */
-
- cd->mcodeptr = (u1 *) cd->mcodebase;
- cd->mcodeend = (s4 *) (cd->mcodebase + cd->mcodesize);
-
/* generate native method profiling code */
if (opt_prof) {
M_ASUB_IMM(stackframesize * 8, REG_SP);
+#if !defined(NDEBUG)
+ /* generate call trace */
+
if (opt_verbosecall) {
/* save integer and float argument registers */
- for (i = 0, j = 0; i < md->paramcount && j < INT_ARG_CNT; i++)
- if (IS_INT_LNG_TYPE(md->paramtypes[i].type))
- M_LST(rd->argintregs[j++], REG_SP, (1 + i) * 8);
+ for (i = 0, j = 1; i < md->paramcount; i++) {
+ if (!md->params[i].inmemory) {
+ s1 = md->params[i].regoff;
- for (i = 0, j = 0; i < md->paramcount && j < FLT_ARG_CNT; i++)
- if (IS_FLT_DBL_TYPE(md->paramtypes[i].type))
- M_DST(rd->argfltregs[j++], REG_SP, (1 + INT_ARG_CNT + i) * 8);
+ if (IS_INT_LNG_TYPE(md->paramtypes[i].type))
+ M_LST(rd->argintregs[s1], REG_SP, j * 8);
+ else
+ M_DST(rd->argfltregs[s1], REG_SP, j * 8);
+
+ j++;
+ }
+ }
/* show integer hex code for float arguments */
for (s1 = INT_ARG_CNT - 2; s1 >= i; s1--)
M_MOV(rd->argintregs[s1], rd->argintregs[s1 + 1]);
- x86_64_movd_freg_reg(cd, rd->argfltregs[j], rd->argintregs[i]);
+ emit_movd_freg_reg(cd, rd->argfltregs[j], rd->argintregs[i]);
j++;
}
}
/* restore integer and float argument registers */
- for (i = 0, j = 0; i < md->paramcount && j < INT_ARG_CNT; i++)
- if (IS_INT_LNG_TYPE(md->paramtypes[i].type))
- M_LLD(rd->argintregs[j++], REG_SP, (1 + i) * 8);
+ for (i = 0, j = 1; i < md->paramcount; i++) {
+ if (!md->params[i].inmemory) {
+ s1 = md->params[i].regoff;
- for (i = 0, j = 0; i < md->paramcount && j < FLT_ARG_CNT; i++)
- if (IS_FLT_DBL_TYPE(md->paramtypes[i].type))
- M_DLD(rd->argfltregs[j++], REG_SP, (1 + INT_ARG_CNT + i) * 8);
- }
+ if (IS_INT_LNG_TYPE(md->paramtypes[i].type))
+ M_LLD(rd->argintregs[s1], REG_SP, j * 8);
+ else
+ M_DLD(rd->argfltregs[s1], REG_SP, j * 8);
+ j++;
+ }
+ }
+ }
+#endif /* !defined(NDEBUG) */
/* get function address (this must happen before the stackframeinfo) */
#if !defined(WITH_STATIC_CLASSPATH)
if (f == NULL) {
- codegen_addpatchref(cd, cd->mcodeptr, PATCHER_resolve_native, m, 0);
+ codegen_addpatchref(cd, PATCHER_resolve_native, m, 0);
if (opt_showdisassemble) {
M_NOP; M_NOP; M_NOP; M_NOP; M_NOP;
/* save integer and float argument registers */
- for (i = 0, j = 0; i < md->paramcount && j < INT_ARG_CNT; i++)
- if (IS_INT_LNG_TYPE(md->paramtypes[i].type))
- M_LST(rd->argintregs[j++], REG_SP, i * 8);
+ for (i = 0, j = 0; i < md->paramcount; i++) {
+ if (!md->params[i].inmemory) {
+ s1 = md->params[i].regoff;
- for (i = 0, j = 0; i < md->paramcount && j < FLT_ARG_CNT; i++)
- if (IS_FLT_DBL_TYPE(md->paramtypes[i].type))
- M_DST(rd->argfltregs[j++], REG_SP, (INT_ARG_CNT + i) * 8);
+ if (IS_INT_LNG_TYPE(md->paramtypes[i].type))
+ M_LST(rd->argintregs[s1], REG_SP, j * 8);
+ else
+ M_DST(rd->argfltregs[s1], REG_SP, j * 8);
+
+ j++;
+ }
+ }
M_AST(REG_ITMP3, REG_SP, (INT_ARG_CNT + FLT_ARG_CNT) * 8);
/* create dynamic stack info */
M_ALEA(REG_SP, stackframesize * 8, rd->argintregs[0]);
- x86_64_lea_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase), rd->argintregs[1]);
+ emit_lea_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase), rd->argintregs[1]);
M_ALEA(REG_SP, stackframesize * 8 + SIZEOF_VOID_P, rd->argintregs[2]);
M_ALD(rd->argintregs[3], REG_SP, stackframesize * 8);
M_MOV_IMM(codegen_start_native_call, REG_ITMP1);
/* restore integer and float argument registers */
- for (i = 0, j = 0; i < md->paramcount && j < INT_ARG_CNT; i++)
- if (IS_INT_LNG_TYPE(md->paramtypes[i].type))
- M_LLD(rd->argintregs[j++], REG_SP, i * 8);
+ for (i = 0, j = 0; i < md->paramcount; i++) {
+ if (!md->params[i].inmemory) {
+ s1 = md->params[i].regoff;
- for (i = 0, j = 0; i < md->paramcount && j < FLT_ARG_CNT; i++)
- if (IS_FLT_DBL_TYPE(md->paramtypes[i].type))
- M_DLD(rd->argfltregs[j++], REG_SP, (INT_ARG_CNT + i) * 8);
+ if (IS_INT_LNG_TYPE(md->paramtypes[i].type))
+ M_LLD(rd->argintregs[s1], REG_SP, j * 8);
+ else
+ M_DLD(rd->argfltregs[s1], REG_SP, j * 8);
+
+ j++;
+ }
+ }
M_ALD(REG_ITMP3, REG_SP, (INT_ARG_CNT + FLT_ARG_CNT) * 8);
if (md->params[i].inmemory) {
s1 = md->params[i].regoff + stackframesize + 1; /* + 1 (RA) */
s2 = nmd->params[j].regoff;
- M_DLD(REG_FTMP1, REG_SP, s1 * 8);
- M_DST(REG_FTMP1, REG_SP, s2 * 8);
+
+ if (IS_2_WORD_TYPE(t)) {
+ M_DLD(REG_FTMP1, REG_SP, s1 * 8);
+ M_DST(REG_FTMP1, REG_SP, s2 * 8);
+ } else {
+ M_FLD(REG_FTMP1, REG_SP, s1 * 8);
+ M_FST(REG_FTMP1, REG_SP, s2 * 8);
+ }
}
}
}
M_MOV_IMM(codegen_finish_native_call, REG_ITMP1);
M_CALL(REG_ITMP1);
+#if !defined(NDEBUG)
/* generate call trace */
if (opt_verbosecall) {
M_MOV_IMM(builtin_displaymethodstop, REG_ITMP1);
M_CALL(REG_ITMP1);
}
+#endif /* !defined(NDEBUG) */
/* check for exception */
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
M_MOV_IMM(builtin_get_exceptionptrptr, REG_ITMP3);
M_CALL(REG_ITMP3);
#else
/* handle exception */
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
M_LST(REG_ITMP2, REG_SP, 0 * 8);
M_MOV_IMM(builtin_get_exceptionptrptr, REG_ITMP3);
M_CALL(REG_ITMP3);
ptrint mcode;
u1 *savedmcodeptr;
u1 *tmpmcodeptr;
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
s4 disp;
#endif
/* move pointer to java_objectheader onto stack */
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
+#if defined(ENABLE_THREADS)
/* create a virtual java_objectheader */
- (void) dseg_addaddress(cd, get_dummyLR()); /* monitorPtr */
- disp = dseg_addaddress(cd, NULL); /* vftbl */
+ (void) dseg_addaddress(cd, NULL); /* flcword */
+ (void) dseg_addaddress(cd, lock_get_initial_lock_word()); /* monitorPtr */
+ disp = dseg_addaddress(cd, NULL); /* vftbl */
- x86_64_lea_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + disp, REG_ITMP3);
+ emit_lea_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + disp, REG_ITMP3);
M_PUSH(REG_ITMP3);
#else
M_PUSH_IMM(0);
}
}
- codegen_finish(m, cd, (s4) ((u1 *) cd->mcodeptr - cd->mcodebase));
+ codegen_finish(jd);
- return cd->code->entrypoint;
+ return jd->code->entrypoint;
}