-/* vm/jit/x86_64/emitfuncs.c - x86_64 code emitter functions
+/* src/vm/jit/x86_64/emitfuncs.c - x86_64 code emitter functions
- Copyright (C) 1996-2005 R. Grafl, A. Krall, C. Kruegel, C. Oates,
- R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner,
- C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich, J. Wenninger,
- Institut f. Computersprachen - TU Wien
+ Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+ C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
+ E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
+ J. Wenninger, Institut f. Computersprachen - TU Wien
This file is part of CACAO.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- 02111-1307, USA.
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
- Contact: cacao@complang.tuwien.ac.at
+ Contact: cacao@cacaojvm.org
Authors: Christian Thalinger
- $Id: emitfuncs.c 2071 2005-03-24 17:56:17Z christian $
+ Changes:
+
+ $Id: emitfuncs.c 4789 2006-04-18 20:34:52Z twisti $
*/
+#include "vm/types.h"
+
+#include "md-abi.h"
+
+#include "vm/jit/codegen-common.h"
#include "vm/jit/jit.h"
#include "vm/jit/x86_64/codegen.h"
#include "vm/jit/x86_64/emitfuncs.h"
-#include "vm/jit/x86_64/types.h"
+
+
+/* code generation functions **************************************************/
+
+/* emit_load_s1 ****************************************************************
+
+ Emits a possible load of the first source operand.
+
+*******************************************************************************/
+
+s4 emit_load_s1(jitdata *jd, instruction *iptr, stackptr src, s4 tempreg)
+{
+ codegendata *cd;
+ s4 disp;
+ s4 reg;
+
+ /* get required compiler data */
+
+ cd = jd->cd;
+
+ if (src->flags & INMEMORY) {
+ COUNT_SPILLS;
+
+ disp = src->regoff * 8;
+
+ if (IS_FLT_DBL_TYPE(src->type)) {
+ M_DLD(tempreg, REG_SP, disp);
+
+ } else {
+ if (IS_INT_TYPE(src->type))
+ M_ILD(tempreg, REG_SP, disp);
+ else
+ M_LLD(tempreg, REG_SP, disp);
+ }
+
+ reg = tempreg;
+ } else
+ reg = src->regoff;
+
+ return reg;
+}
+
+
+/* emit_load_s2 ****************************************************************
+
+ Emits a possible load of the second source operand.
+
+*******************************************************************************/
+
+s4 emit_load_s2(jitdata *jd, instruction *iptr, stackptr src, s4 tempreg)
+{
+ codegendata *cd;
+ s4 disp;
+ s4 reg;
+
+ /* get required compiler data */
+
+ cd = jd->cd;
+
+ if (src->flags & INMEMORY) {
+ COUNT_SPILLS;
+
+ disp = src->regoff * 8;
+
+ if (IS_FLT_DBL_TYPE(src->type)) {
+ M_DLD(tempreg, REG_SP, disp);
+
+ } else {
+ if (IS_INT_TYPE(src->type))
+ M_ILD(tempreg, REG_SP, disp);
+ else
+ M_LLD(tempreg, REG_SP, disp);
+ }
+
+ reg = tempreg;
+ } else
+ reg = src->regoff;
+
+ return reg;
+}
+
+
+/* emit_load_s3 ****************************************************************
+
+ Emits a possible load of the third source operand.
+
+*******************************************************************************/
+
+s4 emit_load_s3(jitdata *jd, instruction *iptr, stackptr src, s4 tempreg)
+{
+ codegendata *cd;
+ s4 disp;
+ s4 reg;
+
+ /* get required compiler data */
+
+ cd = jd->cd;
+
+ if (src->flags & INMEMORY) {
+ COUNT_SPILLS;
+
+ disp = src->regoff * 8;
+
+ if (IS_FLT_DBL_TYPE(src->type)) {
+ M_DLD(tempreg, REG_SP, disp);
+
+ } else {
+ if (IS_INT_TYPE(src->type))
+ M_ILD(tempreg, REG_SP, disp);
+ else
+ M_LLD(tempreg, REG_SP, disp);
+ }
+
+ reg = tempreg;
+ } else
+ reg = src->regoff;
+
+ return reg;
+}
+
+
+/* emit_store ******************************************************************
+
+ This function generates the code to store the result of an
+ operation back into a spilled pseudo-variable. If the
+ pseudo-variable has not been spilled in the first place, this
+ function will generate nothing.
+
+*******************************************************************************/
+
+void emit_store(jitdata *jd, instruction *iptr, stackptr dst, s4 d)
+{
+ codegendata *cd;
+ registerdata *rd;
+ s4 disp;
+ s4 s;
+ u2 opcode;
+
+ /* get required compiler data */
+
+ cd = jd->cd;
+ rd = jd->rd;
+
+ /* do we have to generate a conditional move? */
+
+ if ((iptr != NULL) && (iptr->opc & ICMD_CONDITION_MASK)) {
+ /* the passed register d is actually the source register */
+
+ s = d;
+
+ /* Only pass the opcode to codegen_reg_of_var to get the real
+ destination register. */
+
+ opcode = iptr->opc & ICMD_OPCODE_MASK;
+
+ /* get the real destination register */
+
+ d = codegen_reg_of_var(rd, opcode, dst, REG_ITMP1);
+
+ /* and emit the conditional move */
+
+ emit_cmovxx(cd, iptr, s, d);
+ }
+
+ if (dst->flags & INMEMORY) {
+ COUNT_SPILLS;
+
+ disp = dst->regoff * 8;
+
+ if (IS_FLT_DBL_TYPE(dst->type))
+ M_DST(d, REG_SP, disp);
+ else
+ M_LST(d, REG_SP, disp);
+ }
+}
+
+
+/* emit_copy *******************************************************************
+
+ XXX
+
+*******************************************************************************/
+
+void emit_copy(jitdata *jd, instruction *iptr, stackptr src, stackptr dst)
+{
+ codegendata *cd;
+ registerdata *rd;
+ s4 s1, d;
+
+ /* get required compiler data */
+
+ cd = jd->cd;
+ rd = jd->rd;
+
+ if ((src->regoff != dst->regoff) ||
+ ((src->flags ^ dst->flags) & INMEMORY)) {
+ d = codegen_reg_of_var(rd, iptr->opc, dst, REG_IFTMP);
+ s1 = emit_load_s1(jd, iptr, src, d);
+
+ if (s1 != d) {
+ if (IS_FLT_DBL_TYPE(src->type))
+ M_FMOV(s1, d);
+ else
+ M_MOV(s1, d);
+ }
+
+ emit_store(jd, iptr, dst, d);
+ }
+}
+
+
+void emit_cmovxx(codegendata *cd, instruction *iptr, s4 s, s4 d)
+{
+ switch ((iptr->opc & ICMD_CONDITION_MASK) >> 8) {
+ case ICMD_IFEQ:
+ M_CMOVEQ(s, d);
+ break;
+ case ICMD_IFNE:
+ M_CMOVNE(s, d);
+ break;
+ case ICMD_IFLT:
+ M_CMOVLT(s, d);
+ break;
+ case ICMD_IFGE:
+ M_CMOVGE(s, d);
+ break;
+ case ICMD_IFGT:
+ M_CMOVGT(s, d);
+ break;
+ case ICMD_IFLE:
+ M_CMOVLE(s, d);
+ break;
+ }
+}
/* code generation functions */
x86_64_alul_imm_reg(cd, alu_op, iptr->val.i, d);
} else {
+#if 0
M_INTMOVE(s1, d);
x86_64_alul_imm_reg(cd, alu_op, iptr->val.i, d);
+#else
+ /* lea addition optimization */
+
+ if ((alu_op == X86_64_ADD) && (s1 != d)) {
+ M_ILEA(s1, iptr->val.i, d);
+
+ } else {
+ M_INTMOVE(s1, d);
+ x86_64_alul_imm_reg(cd, alu_op, iptr->val.i, d);
+ }
+#endif
}
}
}
}
} else {
+#if 0
if (src->flags & INMEMORY) {
x86_64_mov_membase_reg(cd, REG_SP, s1 * 8, d);
x86_64_mov_imm_reg(cd, iptr->val.l, REG_ITMP1);
x86_64_alu_reg_reg(cd, alu_op, REG_ITMP1, d);
}
+#else
+ if (src->flags & INMEMORY) {
+ x86_64_mov_membase_reg(cd, REG_SP, s1 * 8, d);
+
+ if (IS_IMM32(iptr->val.l)) {
+ x86_64_alu_imm_reg(cd, alu_op, iptr->val.l, d);
+
+ } else {
+ x86_64_mov_imm_reg(cd, iptr->val.l, REG_ITMP1);
+ x86_64_alu_reg_reg(cd, alu_op, REG_ITMP1, d);
+ }
+
+ } else {
+ if (IS_IMM32(iptr->val.l)) {
+ /* lea addition optimization */
+
+ if ((alu_op == X86_64_ADD) && (s1 != d)) {
+ M_LLEA(s1, iptr->val.l, d);
+
+ } else {
+ M_INTMOVE(s1, d);
+ x86_64_alu_imm_reg(cd, alu_op, iptr->val.l, d);
+ }
+
+ } else {
+ M_INTMOVE(s1, d);
+ x86_64_mov_imm_reg(cd, iptr->val.l, REG_ITMP1);
+ x86_64_alu_reg_reg(cd, alu_op, REG_ITMP1, d);
+ }
+ }
+#endif
}
}
s4 d = iptr->dst->regoff;
s4 d_old;
- M_INTMOVE(RCX, REG_ITMP1); /* save RCX */
+ M_INTMOVE(RCX, REG_ITMP1); /* save RCX */
+
if (iptr->dst->flags & INMEMORY) {
if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
if (s1 == d) {
- x86_64_movl_membase_reg(cd, REG_SP, s2 * 8, RCX);
+ M_ILD(RCX, REG_SP, s2 * 8);
x86_64_shiftl_membase(cd, shift_op, REG_SP, d * 8);
} else {
- x86_64_movl_membase_reg(cd, REG_SP, s2 * 8, RCX);
- x86_64_movl_membase_reg(cd, REG_SP, s1 * 8, REG_ITMP2);
+ M_ILD(RCX, REG_SP, s2 * 8);
+ M_ILD(REG_ITMP2, REG_SP, s1 * 8);
x86_64_shiftl_reg(cd, shift_op, REG_ITMP2);
- x86_64_movl_reg_membase(cd, REG_ITMP2, REG_SP, d * 8);
+ M_IST(REG_ITMP2, REG_SP, d * 8);
}
} else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, s2 * 8, RCX);
- x86_64_movl_reg_membase(cd, s1, REG_SP, d * 8);
+ /* s1 may be equal to RCX */
+ if (s1 == RCX) {
+ if (s2 == d) {
+ M_ILD(REG_ITMP1, REG_SP, s2 * 8);
+ M_IST(s1, REG_SP, d * 8);
+ M_INTMOVE(REG_ITMP1, RCX);
+
+ } else {
+ M_IST(s1, REG_SP, d * 8);
+ M_ILD(RCX, REG_SP, s2 * 8);
+ }
+
+ } else {
+ M_ILD(RCX, REG_SP, s2 * 8);
+ M_IST(s1, REG_SP, d * 8);
+ }
+
x86_64_shiftl_membase(cd, shift_op, REG_SP, d * 8);
} else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
} else {
M_INTMOVE(s2, RCX);
- x86_64_movl_membase_reg(cd, REG_SP, s1 * 8, REG_ITMP2);
+ M_ILD(REG_ITMP2, REG_SP, s1 * 8);
x86_64_shiftl_reg(cd, shift_op, REG_ITMP2);
- x86_64_movl_reg_membase(cd, REG_ITMP2, REG_SP, d * 8);
+ M_IST(REG_ITMP2, REG_SP, d * 8);
}
} else {
+ /* s1 may be equal to RCX */
+ M_IST(s1, REG_SP, d * 8);
M_INTMOVE(s2, RCX);
- x86_64_movl_reg_membase(cd, s1, REG_SP, d * 8);
x86_64_shiftl_membase(cd, shift_op, REG_SP, d * 8);
}
- M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
+
+ M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
} else {
+ d_old = d;
if (d == RCX) {
- d_old = d;
d = REG_ITMP3;
}
if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, s2 * 8, RCX);
- x86_64_movl_membase_reg(cd, REG_SP, s1 * 8, d);
+ M_ILD(RCX, REG_SP, s2 * 8);
+ M_ILD(d, REG_SP, s1 * 8);
x86_64_shiftl_reg(cd, shift_op, d);
} else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(s1, d); /* maybe src is RCX */
- x86_64_movl_membase_reg(cd, REG_SP, s2 * 8, RCX);
+ /* s1 may be equal to RCX */
+ M_INTMOVE(s1, d);
+ M_ILD(RCX, REG_SP, s2 * 8);
x86_64_shiftl_reg(cd, shift_op, d);
} else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
M_INTMOVE(s2, RCX);
- x86_64_movl_membase_reg(cd, REG_SP, s1 * 8, d);
+ M_ILD(d, REG_SP, s1 * 8);
x86_64_shiftl_reg(cd, shift_op, d);
} else {
+ /* s1 may be equal to RCX */
if (s1 == RCX) {
- M_INTMOVE(s1, d);
- M_INTMOVE(s2, RCX);
+ if (s2 == d) {
+ /* d cannot be used to backup s1 since this would
+ overwrite s2. */
+ M_INTMOVE(s1, REG_ITMP3);
+ M_INTMOVE(s2, RCX);
+ M_INTMOVE(REG_ITMP3, d);
+
+ } else {
+ M_INTMOVE(s1, d);
+ M_INTMOVE(s2, RCX);
+ }
} else {
+ /* d may be equal to s2 */
M_INTMOVE(s2, RCX);
M_INTMOVE(s1, d);
}
x86_64_shiftl_reg(cd, shift_op, d);
}
- if (d_old == RCX) {
+ if (d_old == RCX)
M_INTMOVE(REG_ITMP3, RCX);
-
- } else {
- M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
- }
+ else
+ M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
}
}
s4 s2 = src->regoff;
s4 d = iptr->dst->regoff;
s4 d_old;
+
+ M_INTMOVE(RCX, REG_ITMP1); /* save RCX */
- M_INTMOVE(RCX, REG_ITMP1); /* save RCX */
if (iptr->dst->flags & INMEMORY) {
if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
if (s1 == d) {
- x86_64_mov_membase_reg(cd, REG_SP, s2 * 8, RCX);
+ M_ILD(RCX, REG_SP, s2 * 8);
x86_64_shift_membase(cd, shift_op, REG_SP, d * 8);
} else {
- x86_64_mov_membase_reg(cd, REG_SP, s2 * 8, RCX);
- x86_64_mov_membase_reg(cd, REG_SP, s1 * 8, REG_ITMP2);
+ M_ILD(RCX, REG_SP, s2 * 8);
+ M_LLD(REG_ITMP2, REG_SP, s1 * 8);
x86_64_shift_reg(cd, shift_op, REG_ITMP2);
- x86_64_mov_reg_membase(cd, REG_ITMP2, REG_SP, d * 8);
+ M_LST(REG_ITMP2, REG_SP, d * 8);
}
} else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, s2 * 8, RCX);
- x86_64_mov_reg_membase(cd, s1, REG_SP, d * 8);
+ /* s1 may be equal to RCX */
+ if (s1 == RCX) {
+ if (s2 == d) {
+ M_ILD(REG_ITMP1, REG_SP, s2 * 8);
+ M_LST(s1, REG_SP, d * 8);
+ M_INTMOVE(REG_ITMP1, RCX);
+
+ } else {
+ M_LST(s1, REG_SP, d * 8);
+ M_ILD(RCX, REG_SP, s2 * 8);
+ }
+
+ } else {
+ M_ILD(RCX, REG_SP, s2 * 8);
+ M_LST(s1, REG_SP, d * 8);
+ }
+
x86_64_shift_membase(cd, shift_op, REG_SP, d * 8);
} else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
} else {
M_INTMOVE(s2, RCX);
- x86_64_mov_membase_reg(cd, REG_SP, s1 * 8, REG_ITMP2);
+ M_LLD(REG_ITMP2, REG_SP, s1 * 8);
x86_64_shift_reg(cd, shift_op, REG_ITMP2);
- x86_64_mov_reg_membase(cd, REG_ITMP2, REG_SP, d * 8);
+ M_LST(REG_ITMP2, REG_SP, d * 8);
}
} else {
+ /* s1 may be equal to RCX */
+ M_LST(s1, REG_SP, d * 8);
M_INTMOVE(s2, RCX);
- x86_64_mov_reg_membase(cd, s1, REG_SP, d * 8);
x86_64_shift_membase(cd, shift_op, REG_SP, d * 8);
}
- M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
+
+ M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
} else {
+ d_old = d;
if (d == RCX) {
- d_old = d;
d = REG_ITMP3;
}
if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, s2 * 8, RCX);
- x86_64_mov_membase_reg(cd, REG_SP, s1 * 8, d);
+ M_ILD(RCX, REG_SP, s2 * 8);
+ M_LLD(d, REG_SP, s1 * 8);
x86_64_shift_reg(cd, shift_op, d);
} else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(s1, d); /* maybe src is RCX */
- x86_64_mov_membase_reg(cd, REG_SP, s2 * 8, RCX);
+ /* s1 may be equal to RCX */
+ M_INTMOVE(s1, d);
+ M_ILD(RCX, REG_SP, s2 * 8);
x86_64_shift_reg(cd, shift_op, d);
} else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
M_INTMOVE(s2, RCX);
- x86_64_mov_membase_reg(cd, REG_SP, s1 * 8, d);
+ M_LLD(d, REG_SP, s1 * 8);
x86_64_shift_reg(cd, shift_op, d);
} else {
+ /* s1 may be equal to RCX */
if (s1 == RCX) {
- M_INTMOVE(s1, d);
- M_INTMOVE(s2, RCX);
+ if (s2 == d) {
+ /* d cannot be used to backup s1 since this would
+ overwrite s2. */
+ M_INTMOVE(s1, REG_ITMP3);
+ M_INTMOVE(s2, RCX);
+ M_INTMOVE(REG_ITMP3, d);
+
+ } else {
+ M_INTMOVE(s1, d);
+ M_INTMOVE(s2, RCX);
+ }
+
} else {
+ /* d may be equal to s2 */
M_INTMOVE(s2, RCX);
M_INTMOVE(s1, d);
}
x86_64_shift_reg(cd, shift_op, d);
}
- if (d_old == RCX) {
+ if (d_old == RCX)
M_INTMOVE(REG_ITMP3, RCX);
-
- } else {
- M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
- }
+ else
+ M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
}
}
void x86_64_emit_ifcc(codegendata *cd, s4 if_op, stackptr src, instruction *iptr)
{
- if (src->flags & INMEMORY) {
- x86_64_alul_imm_membase(cd, X86_64_CMP, iptr->val.i, REG_SP, src->regoff * 8);
+ if (src->flags & INMEMORY)
+ M_ICMP_IMM_MEMBASE(iptr->val.i, REG_SP, src->regoff * 8);
+ else {
+ if (iptr->val.i == 0)
+ M_ITEST(src->regoff);
+ else
+ M_ICMP_IMM(iptr->val.i, src->regoff);
+ }
- } else {
- if (iptr->val.i == 0) {
- x86_64_testl_reg_reg(cd, src->regoff, src->regoff);
+ /* If the conditional branch is part of an if-converted block,
+ don't generate the actual branch. */
- } else {
- x86_64_alul_imm_reg(cd, X86_64_CMP, iptr->val.i, src->regoff);
- }
+ if ((iptr->opc & ICMD_CONDITION_MASK) == 0) {
+ x86_64_jcc(cd, if_op, 0);
+ codegen_addreference(cd, (basicblock *) iptr->target, cd->mcodeptr);
}
- x86_64_jcc(cd, if_op, 0);
- codegen_addreference(cd, BlockPtrOfPC(iptr->op1), cd->mcodeptr);
}
}
}
x86_64_jcc(cd, if_op, 0);
- codegen_addreference(cd, BlockPtrOfPC(iptr->op1), cd->mcodeptr);
+ codegen_addreference(cd, (basicblock *) iptr->target, cd->mcodeptr);
}
-void x86_64_emit_if_icmpcc(codegendata *cd, s4 if_op, stackptr src, instruction *iptr)
+/* emit_if_icmpcc **************************************************************
+
+ Generate ICMD_IF_ICMPxx instructions.
+
+*******************************************************************************/
+
+void x86_64_emit_if_icmpcc(codegendata *cd, s4 if_op, stackptr src,
+ instruction *iptr)
{
s4 s1 = src->prev->regoff;
s4 s2 = src->regoff;
} else {
x86_64_alul_reg_reg(cd, X86_64_CMP, s2, s1);
}
- x86_64_jcc(cd, if_op, 0);
- codegen_addreference(cd, BlockPtrOfPC(iptr->op1), cd->mcodeptr);
+
+
+ /* If the conditional branch is part of an if-converted block,
+ don't generate the actual branch. */
+
+ if ((iptr->opc & ICMD_CONDITION_MASK) == 0) {
+ x86_64_jcc(cd, if_op, 0);
+ codegen_addreference(cd, (basicblock *) iptr->target, cd->mcodeptr);
+ }
}
x86_64_alu_reg_reg(cd, X86_64_CMP, s2, s1);
}
x86_64_jcc(cd, if_op, 0);
- codegen_addreference(cd, BlockPtrOfPC(iptr->op1), cd->mcodeptr);
+ codegen_addreference(cd, (basicblock *) iptr->target, cd->mcodeptr);
}
-/*
- * mov ops
- */
+/* low-level code emitter functions *******************************************/
+
void x86_64_mov_reg_reg(codegendata *cd, s8 reg, s8 dreg) {
x86_64_emit_rex(1,(reg),0,(dreg));
*(cd->mcodeptr++) = 0x89;
}
-void x86_64_movl_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg) {
- x86_64_emit_rex(0,(reg),0,(basereg));
- *(cd->mcodeptr++) = 0x8b;
- x86_64_emit_membase((basereg),(disp),(reg));
-}
-
-
/*
* this one is for INVOKEVIRTUAL/INVOKEINTERFACE to have a
* constant membase immediate length of 32bit
void x86_64_mov_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg) {
x86_64_emit_rex(1,(reg),0,(basereg));
*(cd->mcodeptr++) = 0x8b;
- x86_64_address_byte(2, (reg), (basereg));
- x86_64_emit_imm32((disp));
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
+void x86_64_movl_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg)
+{
+ x86_64_emit_rex(0,(reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x8b;
+ x86_64_emit_membase((basereg),(disp),(reg));
+}
+
+
+/* ATTENTION: Always emit a REX byte, because the instruction size can
+ be smaller when all register indexes are smaller than 7. */
+void x86_64_movl_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 reg)
+{
+ x86_64_emit_byte_rex((reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x8b;
+ x86_64_emit_membase32((basereg),(disp),(reg));
}
}
+void x86_64_mov_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
+ x86_64_emit_rex(1,(reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x89;
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
void x86_64_movl_reg_membase(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
x86_64_emit_rex(0,(reg),0,(basereg));
*(cd->mcodeptr++) = 0x89;
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movl_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
+ x86_64_emit_byte_rex((reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x89;
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
void x86_64_mov_memindex_reg(codegendata *cd, s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg) {
x86_64_emit_rex(1,(reg),(indexreg),(basereg));
*(cd->mcodeptr++) = 0x8b;
}
+void x86_64_mov_imm_membase32(codegendata *cd, s8 imm, s8 basereg, s8 disp) {
+ x86_64_emit_rex(1,0,0,(basereg));
+ *(cd->mcodeptr++) = 0xc7;
+ x86_64_emit_membase32((basereg),(disp),0);
+ x86_64_emit_imm32((imm));
+}
+
+
void x86_64_movl_imm_membase(codegendata *cd, s8 imm, s8 basereg, s8 disp) {
x86_64_emit_rex(0,0,0,(basereg));
*(cd->mcodeptr++) = 0xc7;
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movl_imm_membase32(codegendata *cd, s8 imm, s8 basereg, s8 disp) {
+ x86_64_emit_byte_rex(0,0,(basereg));
+ *(cd->mcodeptr++) = 0xc7;
+ x86_64_emit_membase32((basereg),(disp),0);
+ x86_64_emit_imm32((imm));
+}
+
+
void x86_64_movsbq_reg_reg(codegendata *cd, s8 reg, s8 dreg) {
x86_64_emit_rex(1,(dreg),0,(reg));
*(cd->mcodeptr++) = 0x0f;
}
+void x86_64_alu_imm32_reg(codegendata *cd, s8 opc, s8 imm, s8 dreg) {
+ x86_64_emit_rex(1,0,0,(dreg));
+ *(cd->mcodeptr++) = 0x81;
+ x86_64_emit_reg((opc),(dreg));
+ x86_64_emit_imm32((imm));
+}
+
+
void x86_64_alul_imm_reg(codegendata *cd, s8 opc, s8 imm, s8 dreg) {
if (IS_IMM8(imm)) {
x86_64_emit_rex(0,0,0,(dreg));
}
-void x86_64_inc_membase(codegendata *cd, s8 basereg, s8 disp) {
- x86_64_emit_rex(1,(basereg),0,0);
+void x86_64_inc_membase(codegendata *cd, s8 basereg, s8 disp)
+{
+ x86_64_emit_rex(1,0,0,(basereg));
*(cd->mcodeptr++) = 0xff;
x86_64_emit_membase((basereg),(disp),0);
}
-void x86_64_incl_membase(codegendata *cd, s8 basereg, s8 disp) {
- x86_64_emit_rex(0,(basereg),0,0);
+void x86_64_incl_membase(codegendata *cd, s8 basereg, s8 disp)
+{
+ x86_64_emit_rex(0,0,0,(basereg));
*(cd->mcodeptr++) = 0xff;
x86_64_emit_membase((basereg),(disp),0);
}
}
-void x86_64_cmovcc_reg_reg(codegendata *cd, s8 opc, s8 reg, s8 dreg) {
+void x86_64_cmovcc_reg_reg(codegendata *cd, s8 opc, s8 reg, s8 dreg)
+{
x86_64_emit_rex(1,(dreg),0,(reg));
*(cd->mcodeptr++) = 0x0f;
*(cd->mcodeptr++) = (0x40 + (opc));
}
-void x86_64_cmovccl_reg_reg(codegendata *cd, s8 opc, s8 reg, s8 dreg) {
+void x86_64_cmovccl_reg_reg(codegendata *cd, s8 opc, s8 reg, s8 dreg)
+{
x86_64_emit_rex(0,(dreg),0,(reg));
*(cd->mcodeptr++) = 0x0f;
*(cd->mcodeptr++) = (0x40 + (opc));
}
-void x86_64_call_mem(codegendata *cd, s8 mem) {
+void x86_64_call_mem(codegendata *cd, ptrint mem)
+{
*(cd->mcodeptr++) = 0xff;
x86_64_emit_mem(2,(mem));
}
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movss_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
+ *(cd->mcodeptr++) = 0xf3;
+ x86_64_emit_byte_rex((reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0x11;
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
void x86_64_movsd_reg_membase(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
*(cd->mcodeptr++) = 0xf2;
x86_64_emit_rex(0,(reg),0,(basereg));
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movsd_reg_membase32(codegendata *cd, s8 reg, s8 basereg, s8 disp) {
+ *(cd->mcodeptr++) = 0xf2;
+ x86_64_emit_byte_rex((reg),0,(basereg));
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0x11;
+ x86_64_emit_membase32((basereg),(disp),(reg));
+}
+
+
void x86_64_movss_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
*(cd->mcodeptr++) = 0xf3;
x86_64_emit_rex(0,(dreg),0,(basereg));
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movss_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
+ *(cd->mcodeptr++) = 0xf3;
+ x86_64_emit_byte_rex((dreg),0,(basereg));
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0x10;
+ x86_64_emit_membase32((basereg),(disp),(dreg));
+}
+
+
void x86_64_movlps_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
x86_64_emit_rex(0,(dreg),0,(basereg));
*(cd->mcodeptr++) = 0x0f;
}
+/* Always emit a REX byte, because the instruction size can be smaller when */
+/* all register indexes are smaller than 7. */
+void x86_64_movsd_membase32_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
+ *(cd->mcodeptr++) = 0xf2;
+ x86_64_emit_byte_rex((dreg),0,(basereg));
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0x10;
+ x86_64_emit_membase32((basereg),(disp),(dreg));
+}
+
+
void x86_64_movlpd_membase_reg(codegendata *cd, s8 basereg, s8 disp, s8 dreg) {
*(cd->mcodeptr++) = 0x66;
x86_64_emit_rex(0,(dreg),0,(basereg));
}
+/* system instructions ********************************************************/
+
+void emit_rdtsc(codegendata *cd)
+{
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0x31;
+}
+
+
/*
* These are local overrides for various environment variables in Emacs.
* Please do not remove this and leave it at the end of the file, where