/* src/vm/jit/x86_64/emit.c - x86_64 code emitter functions
- Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
- C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
- E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
- J. Wenninger, Institut f. Computersprachen - TU Wien
+ Copyright (C) 1996-2005, 2006, 2007, 2008, 2009
+ CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
This file is part of CACAO.
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
- $Id: emit.c 7486 2007-03-08 13:50:07Z twisti $
-
*/
#include "config.h"
#include <assert.h>
#include "vm/types.h"
+#include "vm/os.hpp"
#include "md-abi.h"
#include "vm/jit/x86_64/codegen.h"
#include "vm/jit/x86_64/emit.h"
-#include "mm/memory.h"
+#include "mm/memory.hpp"
-#if defined(ENABLE_THREADS)
-# include "threads/native/lock.h"
-#endif
+#include "threads/lock.hpp"
-#include "vm/builtin.h"
+#include "vm/options.h"
+#include "vm/jit/abi.h"
#include "vm/jit/abi-asm.h"
#include "vm/jit/asmpart.h"
-#include "vm/jit/codegen-common.h"
-#include "vm/jit/emit-common.h"
-#include "vm/jit/jit.h"
-#include "vm/jit/replace.h"
-
-#include "vmcore/options.h"
+#include "vm/jit/codegen-common.hpp"
+#include "vm/jit/emit-common.hpp"
+#include "vm/jit/jit.hpp"
+#include "vm/jit/patcher-common.hpp"
+#include "vm/jit/replace.hpp"
+#include "vm/jit/trace.hpp"
+#include "vm/jit/trap.hpp"
/* emit_load *******************************************************************
if (IS_INMEMORY(src->flags)) {
COUNT_SPILLS;
- disp = src->vv.regoff * 8;
+ disp = src->vv.regoff;
switch (src->type) {
case TYPE_INT:
*******************************************************************************/
-inline void emit_store(jitdata *jd, instruction *iptr, varinfo *dst, s4 d)
+void emit_store(jitdata *jd, instruction *iptr, varinfo *dst, s4 d)
{
codegendata *cd;
s4 disp;
-#if 0
- s4 s;
- u2 opcode;
-#endif
/* get required compiler data */
cd = jd->cd;
-#if 0
- /* do we have to generate a conditional move? */
-
- if ((iptr != NULL) && (iptr->opc & ICMD_CONDITION_MASK)) {
- /* the passed register d is actually the source register */
-
- s = d;
-
- /* Only pass the opcode to codegen_reg_of_var to get the real
- destination register. */
-
- opcode = iptr->opc & ICMD_OPCODE_MASK;
-
- /* get the real destination register */
-
- d = codegen_reg_of_var(rd, opcode, dst, REG_ITMP1);
-
- /* and emit the conditional move */
-
- emit_cmovxx(cd, iptr, s, d);
- }
-#endif
-
if (IS_INMEMORY(dst->flags)) {
COUNT_SPILLS;
- disp = dst->vv.regoff * 8;
+ disp = dst->vv.regoff;
switch (dst->type) {
case TYPE_INT:
*******************************************************************************/
-void emit_copy(jitdata *jd, instruction *iptr, varinfo *src, varinfo *dst)
+void emit_copy(jitdata *jd, instruction *iptr)
{
- codegendata *cd;
- s4 s1, d;
+ codegendata *cd;
+ varinfo *src;
+ varinfo *dst;
+ s4 s1, d;
/* get required compiler data */
cd = jd->cd;
+ /* get source and destination variables */
+
+ src = VAROP(iptr->s1);
+ dst = VAROP(iptr->dst);
+
if ((src->vv.regoff != dst->vv.regoff) ||
((src->flags ^ dst->flags) & INMEMORY)) {
+ if ((src->type == TYPE_RET) || (dst->type == TYPE_RET)) {
+ /* emit nothing, as the value won't be used anyway */
+ return;
+ }
+
/* If one of the variables resides in memory, we can eliminate
the register move from/to the temporary register with the
order of getting the destination register and the load. */
if (IS_INMEMORY(src->flags)) {
- d = codegen_reg_of_var(iptr->opc, dst, REG_IFTMP);
+ d = codegen_reg_of_var(iptr->opc, dst, REG_IFTMP);
s1 = emit_load(jd, iptr, src, d);
}
else {
s1 = emit_load(jd, iptr, src, REG_IFTMP);
- d = codegen_reg_of_var(iptr->opc, dst, s1);
+ d = codegen_reg_of_var(iptr->opc, dst, s1);
}
if (s1 != d) {
}
+/* emit_branch *****************************************************************
+
+ Emits the code for conditional and unconditional branchs.
+
+*******************************************************************************/
+
+void emit_branch(codegendata *cd, s4 disp, s4 condition, s4 reg, u4 options)
+{
+ s4 branchdisp;
+
+ /* NOTE: A displacement overflow cannot happen. */
+
+ /* check which branch to generate */
+
+ if (condition == BRANCH_UNCONDITIONAL) {
+
+ /* calculate the different displacements */
+
+ branchdisp = disp - BRANCH_UNCONDITIONAL_SIZE;
+
+ M_JMP_IMM(branchdisp);
+ }
+ else {
+ /* calculate the different displacements */
+
+ branchdisp = disp - BRANCH_CONDITIONAL_SIZE;
+
+ switch (condition) {
+ case BRANCH_EQ:
+ M_BEQ(branchdisp);
+ break;
+ case BRANCH_NE:
+ M_BNE(branchdisp);
+ break;
+ case BRANCH_LT:
+ M_BLT(branchdisp);
+ break;
+ case BRANCH_GE:
+ M_BGE(branchdisp);
+ break;
+ case BRANCH_GT:
+ M_BGT(branchdisp);
+ break;
+ case BRANCH_LE:
+ M_BLE(branchdisp);
+ break;
+ case BRANCH_ULT:
+ M_BULT(branchdisp);
+ break;
+ case BRANCH_ULE:
+ M_BULE(branchdisp);
+ break;
+ case BRANCH_UGE:
+ M_BUGE(branchdisp);
+ break;
+ case BRANCH_UGT:
+ M_BUGT(branchdisp);
+ break;
+ default:
+ vm_abort("emit_branch: unknown condition %d", condition);
+ }
+ }
+}
+
+
/* emit_arithmetic_check *******************************************************
Emit an ArithmeticException check.
{
if (INSTRUCTION_MUST_CHECK(iptr)) {
M_TEST(reg);
- M_BEQ(0);
- codegen_add_arithmeticexception_ref(cd);
+ M_BNE(8);
+ M_ALD_MEM(reg, TRAP_ArithmeticException);
}
}
void emit_arrayindexoutofbounds_check(codegendata *cd, instruction *iptr, s4 s1, s4 s2)
{
if (INSTRUCTION_MUST_CHECK(iptr)) {
- M_ILD(REG_ITMP3, s1, OFFSET(java_arrayheader, size));
+ M_ILD(REG_ITMP3, s1, OFFSET(java_array_t, size));
M_ICMP(REG_ITMP3, s2);
- M_BAE(0);
- codegen_add_arrayindexoutofboundsexception_ref(cd, s2);
+ M_BULT(8);
+ M_ALD_MEM(s2, TRAP_ArrayIndexOutOfBoundsException);
+ }
+}
+
+
+/* emit_arraystore_check *******************************************************
+
+ Emit an ArrayStoreException check.
+
+*******************************************************************************/
+
+void emit_arraystore_check(codegendata *cd, instruction *iptr)
+{
+ if (INSTRUCTION_MUST_CHECK(iptr)) {
+ M_TEST(REG_RESULT);
+ M_BNE(8);
+ M_ALD_MEM(REG_RESULT, TRAP_ArrayStoreException);
}
}
void emit_classcast_check(codegendata *cd, instruction *iptr, s4 condition, s4 reg, s4 s1)
{
- vm_abort("IMPLEMENT ME!");
+ if (INSTRUCTION_MUST_CHECK(iptr)) {
+ switch (condition) {
+ case BRANCH_LE:
+ M_BGT(8);
+ break;
+ case BRANCH_GE:
+ M_BLT(8);
+ break;
+ case BRANCH_EQ:
+ M_BNE(8);
+ break;
+ case BRANCH_NE:
+ M_BEQ(8);
+ break;
+ case BRANCH_UGT:
+ M_BULE(8);
+ break;
+ default:
+ vm_abort("emit_classcast_check: unknown condition %d", condition);
+ }
+ M_ALD_MEM(s1, TRAP_ClassCastException);
+ }
}
{
if (INSTRUCTION_MUST_CHECK(iptr)) {
M_TEST(reg);
- M_BEQ(0);
- codegen_add_nullpointerexception_ref(cd);
+ M_BNE(8);
+ M_ALD_MEM(reg, TRAP_NullPointerException);
}
}
-/* emit_exception_stubs ********************************************************
+/* emit_exception_check ********************************************************
- Generates the code for the exception stubs.
+ Emit an Exception check.
*******************************************************************************/
-void emit_exception_stubs(jitdata *jd)
+void emit_exception_check(codegendata *cd, instruction *iptr)
{
- codegendata *cd;
- registerdata *rd;
- exceptionref *er;
- s4 branchmpc;
- s4 targetmpc;
- s4 targetdisp;
-
- /* get required compiler data */
-
- cd = jd->cd;
- rd = jd->rd;
-
- /* generate exception stubs */
-
- targetdisp = 0;
-
- for (er = cd->exceptionrefs; er != NULL; er = er->next) {
- /* back-patch the branch to this exception code */
-
- branchmpc = er->branchpos;
- targetmpc = cd->mcodeptr - cd->mcodebase;
-
- md_codegen_patch_branch(cd, branchmpc, targetmpc);
-
- MCODECHECK(512);
-
- /* Check if the exception is an
- ArrayIndexOutOfBoundsException. If so, move index register
- into a4. */
-
- if (er->reg != -1)
- M_MOV(er->reg, rd->argintregs[4]);
-
- /* calcuate exception address */
-
- M_MOV_IMM(0, rd->argintregs[3]);
- dseg_adddata(cd);
- M_AADD_IMM32(er->branchpos - 6, rd->argintregs[3]);
-
- /* move function to call into REG_ITMP3 */
-
- M_MOV_IMM(er->function, REG_ITMP3);
-
- if (targetdisp == 0) {
- targetdisp = cd->mcodeptr - cd->mcodebase;
-
- emit_lea_membase_reg(cd, RIP, -((cd->mcodeptr + 7) - cd->mcodebase), rd->argintregs[0]);
- M_MOV(REG_SP, rd->argintregs[1]);
- M_ALD(rd->argintregs[2], REG_SP, cd->stackframesize * 8);
-
- M_ASUB_IMM(2 * 8, REG_SP);
- M_AST(rd->argintregs[3], REG_SP, 0 * 8); /* store XPC */
-
- M_CALL(REG_ITMP3);
-
- M_ALD(REG_ITMP2_XPC, REG_SP, 0 * 8);
- M_AADD_IMM(2 * 8, REG_SP);
-
- M_MOV_IMM(asm_handle_exception, REG_ITMP3);
- M_JMP(REG_ITMP3);
- }
- else {
- M_JMP_IMM((cd->mcodebase + targetdisp) -
- (cd->mcodeptr + PATCHER_CALL_SIZE));
- }
+ if (INSTRUCTION_MUST_CHECK(iptr)) {
+ M_TEST(REG_RESULT);
+ M_BNE(8);
+ M_ALD_MEM(REG_RESULT, TRAP_CHECK_EXCEPTION);
}
}
-/* emit_patcher_stubs **********************************************************
+/* emit_trap_compiler **********************************************************
- Generates the code for the patcher stubs.
+ Emit a trap instruction which calls the JIT compiler.
*******************************************************************************/
-void emit_patcher_stubs(jitdata *jd)
+void emit_trap_compiler(codegendata *cd)
{
- codegendata *cd;
- patchref *pref;
- u8 mcode;
- u1 *savedmcodeptr;
- u1 *tmpmcodeptr;
- s4 targetdisp;
- s4 disp;
-
- /* get required compiler data */
-
- cd = jd->cd;
-
- /* generate code patching stub call code */
-
- targetdisp = 0;
-
- for (pref = cd->patchrefs; pref != NULL; pref = pref->next) {
- /* check size of code segment */
-
- MCODECHECK(512);
-
- /* Get machine code which is patched back in later. A
- `call rel32' is 5 bytes long (but read 8 bytes). */
-
- savedmcodeptr = cd->mcodebase + pref->branchpos;
- mcode = *((u8 *) savedmcodeptr);
-
- /* patch in `call rel32' to call the following code */
-
- tmpmcodeptr = cd->mcodeptr; /* save current mcodeptr */
- cd->mcodeptr = savedmcodeptr; /* set mcodeptr to patch position */
-
- M_CALL_IMM(tmpmcodeptr - (savedmcodeptr + PATCHER_CALL_SIZE));
-
- cd->mcodeptr = tmpmcodeptr; /* restore the current mcodeptr */
-
- /* move pointer to java_objectheader onto stack */
-
-#if defined(ENABLE_THREADS)
- /* create a virtual java_objectheader */
-
- (void) dseg_add_unique_address(cd, NULL); /* flcword */
- (void) dseg_add_unique_address(cd, lock_get_initial_lock_word());
- disp = dseg_add_unique_address(cd, NULL); /* vftbl */
-
- emit_lea_membase_reg(cd, RIP, -((cd->mcodeptr + 7) - cd->mcodebase) + disp, REG_ITMP3);
- M_PUSH(REG_ITMP3);
-#else
- M_PUSH_IMM(0);
-#endif
-
- /* move machine code bytes and classinfo pointer into registers */
-
- M_MOV_IMM(mcode, REG_ITMP3);
- M_PUSH(REG_ITMP3);
-
- M_MOV_IMM(pref->ref, REG_ITMP3);
- M_PUSH(REG_ITMP3);
-
- M_MOV_IMM(pref->disp, REG_ITMP3);
- M_PUSH(REG_ITMP3);
-
- M_MOV_IMM(pref->patcher, REG_ITMP3);
- M_PUSH(REG_ITMP3);
-
- if (targetdisp == 0) {
- targetdisp = cd->mcodeptr - cd->mcodebase;
-
- M_MOV_IMM(asm_patcher_wrapper, REG_ITMP3);
- M_JMP(REG_ITMP3);
- }
- else {
- M_JMP_IMM((cd->mcodebase + targetdisp) -
- (cd->mcodeptr + PATCHER_CALL_SIZE));
- }
- }
+ M_ALD_MEM(REG_METHODPTR, TRAP_COMPILER);
}
-/* emit_replacement_stubs ******************************************************
+/* emit_patcher_alignment ******************************************************
- Generates the code for the replacement stubs.
+ Emit NOP to ensure placement at an even address.
*******************************************************************************/
-#if defined(ENABLE_REPLACEMENT)
-void emit_replacement_stubs(jitdata *jd)
+void emit_patcher_alignment(codegendata *cd)
{
- codegendata *cd;
- codeinfo *code;
- rplpoint *rplp;
- s4 disp;
- s4 i;
-#if !defined(NDEBUG)
- u1 *savedmcodeptr;
-#endif
-
- /* get required compiler data */
-
- cd = jd->cd;
- code = jd->code;
-
- rplp = code->rplpoints;
-
- /* store beginning of replacement stubs */
-
- code->replacementstubs = (u1*) (cd->mcodeptr - cd->mcodebase);
+ if ((uintptr_t) cd->mcodeptr & 1)
+ M_NOP;
+}
- for (i = 0; i < code->rplpointcount; ++i, ++rplp) {
- /* do not generate stubs for non-trappable points */
- if (rplp->flags & RPLPOINT_FLAG_NOTRAP)
- continue;
+/* emit_trap *******************************************************************
- /* check code segment size */
+ Emit a trap instruction and return the original machine code.
- MCODECHECK(512);
+*******************************************************************************/
- /* note start of stub code */
+uint32_t emit_trap(codegendata *cd)
+{
+ uint16_t mcode;
-#if !defined(NDEBUG)
- savedmcodeptr = cd->mcodeptr;
-#endif
+ /* Get machine code which is patched back in later. The trap is 2
+ bytes long. */
- /* push address of `rplpoint` struct */
-
- M_MOV_IMM(rplp, REG_ITMP3);
- M_PUSH(REG_ITMP3);
+ mcode = *((uint16_t *) cd->mcodeptr);
- /* jump to replacement function */
+ /* XXX This needs to be change to INT3 when the debugging problems
+ with gdb are resolved. */
- M_MOV_IMM(asm_replacement_out, REG_ITMP3);
- M_PUSH(REG_ITMP3);
- M_RET;
+ M_UD2;
- assert((cd->mcodeptr - savedmcodeptr) == REPLACEMENT_STUB_SIZE);
- }
+ return mcode;
}
-#endif /* defined(ENABLE_REPLACEMENT) */
/* emit_verbosecall_enter ******************************************************
void emit_verbosecall_enter(jitdata *jd)
{
methodinfo *m;
+ codeinfo *code;
codegendata *cd;
registerdata *rd;
methoddesc *md;
- s4 i, j, k;
+ s4 stackframesize;
+ s4 i, s;
/* get required compiler data */
- m = jd->m;
- cd = jd->cd;
- rd = jd->rd;
+ m = jd->m;
+ code = jd->code;
+ cd = jd->cd;
+ rd = jd->rd;
md = m->parseddesc;
M_NOP;
- /* additional +1 is for 16-byte stack alignment */
+ /* keep 16-byte stack alignment */
+
+ stackframesize = md->paramcount + ARG_CNT + TMP_CNT;
+ ALIGN_2(stackframesize);
- M_LSUB_IMM((ARG_CNT + TMP_CNT + 1 + 1) * 8, REG_SP);
+ M_LSUB_IMM(stackframesize * 8, REG_SP);
/* save argument registers */
- for (i = 0; i < INT_ARG_CNT; i++)
- M_LST(rd->argintregs[i], REG_SP, (1 + i) * 8);
+ for (i = 0; i < md->paramcount; i++) {
+ if (!md->params[i].inmemory) {
+ s = md->params[i].regoff;
+
+ switch (md->paramtypes[i].type) {
+ case TYPE_ADR:
+ case TYPE_INT:
+ case TYPE_LNG:
+ M_LST(s, REG_SP, i * 8);
+ break;
+ case TYPE_FLT:
+ case TYPE_DBL:
+ M_DST(s, REG_SP, i * 8);
+ break;
+ }
+ }
+ }
+
+ /* save all argument and temporary registers for leaf methods */
- for (i = 0; i < FLT_ARG_CNT; i++)
- M_DST(rd->argfltregs[i], REG_SP, (1 + INT_ARG_CNT + i) * 8);
+ if (code_is_leafmethod(code)) {
+ for (i = 0; i < INT_ARG_CNT; i++)
+ M_LST(abi_registers_integer_argument[i], REG_SP, (md->paramcount + i) * 8);
- /* save temporary registers for leaf methods */
+ for (i = 0; i < FLT_ARG_CNT; i++)
+ M_DST(abi_registers_float_argument[i], REG_SP, (md->paramcount + INT_ARG_CNT + i) * 8);
- if (jd->isleafmethod) {
for (i = 0; i < INT_TMP_CNT; i++)
- M_LST(rd->tmpintregs[i], REG_SP, (1 + ARG_CNT + i) * 8);
+ M_LST(rd->tmpintregs[i], REG_SP, (md->paramcount + ARG_CNT + i) * 8);
for (i = 0; i < FLT_TMP_CNT; i++)
- M_DST(rd->tmpfltregs[i], REG_SP, (1 + ARG_CNT + INT_TMP_CNT + i) * 8);
+ M_DST(rd->tmpfltregs[i], REG_SP, (md->paramcount + ARG_CNT + INT_TMP_CNT + i) * 8);
}
- /* show integer hex code for float arguments */
+ M_MOV_IMM(m, REG_A0);
+ M_MOV(REG_SP, REG_A1);
+ M_MOV(REG_SP, REG_A2);
+ M_AADD_IMM((stackframesize + cd->stackframesize + 1) * 8, REG_A2);
+ M_MOV_IMM(trace_java_call_enter, REG_ITMP1);
+ M_CALL(REG_ITMP1);
- for (i = 0, j = 0; i < md->paramcount && i < INT_ARG_CNT; i++) {
- /* If the paramtype is a float, we have to right shift all
- following integer registers. */
-
- if (IS_FLT_DBL_TYPE(md->paramtypes[i].type)) {
- for (k = INT_ARG_CNT - 2; k >= i; k--)
- M_MOV(rd->argintregs[k], rd->argintregs[k + 1]);
+ /* restore argument registers */
- emit_movd_freg_reg(cd, rd->argfltregs[j], rd->argintregs[i]);
- j++;
+ for (i = 0; i < md->paramcount; i++) {
+ if (!md->params[i].inmemory) {
+ s = md->params[i].regoff;
+
+ switch (md->paramtypes[i].type) {
+ case TYPE_ADR:
+ case TYPE_INT:
+ case TYPE_LNG:
+ M_LLD(s, REG_SP, i * 8);
+ break;
+ case TYPE_FLT:
+ case TYPE_DBL:
+ M_DLD(s, REG_SP, i * 8);
+ break;
+ }
}
}
- M_MOV_IMM(m, REG_ITMP2);
- M_AST(REG_ITMP2, REG_SP, 0 * 8);
- M_MOV_IMM(builtin_verbosecall_enter, REG_ITMP1);
- M_CALL(REG_ITMP1);
-
- /* restore argument registers */
- for (i = 0; i < INT_ARG_CNT; i++)
- M_LLD(rd->argintregs[i], REG_SP, (1 + i) * 8);
+ /* restore all argument and temporary registers for leaf methods */
- for (i = 0; i < FLT_ARG_CNT; i++)
- M_DLD(rd->argfltregs[i], REG_SP, (1 + INT_ARG_CNT + i) * 8);
+ if (code_is_leafmethod(code)) {
+ for (i = 0; i < INT_ARG_CNT; i++)
+ M_LLD(abi_registers_integer_argument[i], REG_SP, (md->paramcount + i) * 8);
- /* restore temporary registers for leaf methods */
+ for (i = 0; i < FLT_ARG_CNT; i++)
+ M_DLD(abi_registers_float_argument[i], REG_SP, (md->paramcount + INT_ARG_CNT + i) * 8);
- if (jd->isleafmethod) {
for (i = 0; i < INT_TMP_CNT; i++)
- M_LLD(rd->tmpintregs[i], REG_SP, (1 + ARG_CNT + i) * 8);
+ M_LLD(rd->tmpintregs[i], REG_SP, (md->paramcount + ARG_CNT + i) * 8);
for (i = 0; i < FLT_TMP_CNT; i++)
- M_DLD(rd->tmpfltregs[i], REG_SP, (1 + ARG_CNT + INT_TMP_CNT + i) * 8);
+ M_DLD(rd->tmpfltregs[i], REG_SP, (md->paramcount + ARG_CNT + INT_TMP_CNT + i) * 8);
}
- M_LADD_IMM((ARG_CNT + TMP_CNT + 1 + 1) * 8, REG_SP);
+ M_LADD_IMM(stackframesize * 8, REG_SP);
/* mark trace code */
methodinfo *m;
codegendata *cd;
registerdata *rd;
+ methoddesc *md;
/* get required compiler data */
cd = jd->cd;
rd = jd->rd;
+ md = m->parseddesc;
+
/* mark trace code */
M_NOP;
+ /* keep 16-byte stack alignment */
+
M_ASUB_IMM(2 * 8, REG_SP);
- M_LST(REG_RESULT, REG_SP, 0 * 8);
- M_DST(REG_FRESULT, REG_SP, 1 * 8);
+ /* save return value */
+
+ switch (md->returntype.type) {
+ case TYPE_ADR:
+ case TYPE_INT:
+ case TYPE_LNG:
+ M_LST(REG_RESULT, REG_SP, 0 * 8);
+ break;
+ case TYPE_FLT:
+ case TYPE_DBL:
+ M_DST(REG_FRESULT, REG_SP, 0 * 8);
+ break;
+ }
- M_INTMOVE(REG_RESULT, REG_A0);
- M_FLTMOVE(REG_FRESULT, REG_FA0);
- M_FLTMOVE(REG_FRESULT, REG_FA1);
- M_MOV_IMM(m, REG_A1);
+ M_MOV_IMM(m, REG_A0);
+ M_MOV(REG_SP, REG_A1);
- M_MOV_IMM(builtin_verbosecall_exit, REG_ITMP1);
+ M_MOV_IMM(trace_java_call_exit, REG_ITMP1);
M_CALL(REG_ITMP1);
- M_LLD(REG_RESULT, REG_SP, 0 * 8);
- M_DLD(REG_FRESULT, REG_SP, 1 * 8);
+ /* restore return value */
+
+ switch (md->returntype.type) {
+ case TYPE_ADR:
+ case TYPE_INT:
+ case TYPE_LNG:
+ M_LLD(REG_RESULT, REG_SP, 0 * 8);
+ break;
+ case TYPE_FLT:
+ case TYPE_DBL:
+ M_DLD(REG_FRESULT, REG_SP, 0 * 8);
+ break;
+ }
M_AADD_IMM(2 * 8, REG_SP);
if (IS_INMEMORY(v_dst->flags)) {
if (IS_INMEMORY(v_s2->flags) && IS_INMEMORY(v_s1->flags)) {
if (s1 == d) {
- M_ILD(RCX, REG_SP, s2 * 8);
- emit_shiftl_membase(cd, shift_op, REG_SP, d * 8);
+ M_ILD(RCX, REG_SP, s2);
+ emit_shiftl_membase(cd, shift_op, REG_SP, d);
} else {
- M_ILD(RCX, REG_SP, s2 * 8);
- M_ILD(REG_ITMP2, REG_SP, s1 * 8);
+ M_ILD(RCX, REG_SP, s2);
+ M_ILD(REG_ITMP2, REG_SP, s1);
emit_shiftl_reg(cd, shift_op, REG_ITMP2);
- M_IST(REG_ITMP2, REG_SP, d * 8);
+ M_IST(REG_ITMP2, REG_SP, d);
}
} else if (IS_INMEMORY(v_s2->flags) && !IS_INMEMORY(v_s1->flags)) {
/* s1 may be equal to RCX */
if (s1 == RCX) {
if (s2 == d) {
- M_ILD(REG_ITMP1, REG_SP, s2 * 8);
- M_IST(s1, REG_SP, d * 8);
+ M_ILD(REG_ITMP1, REG_SP, s2);
+ M_IST(s1, REG_SP, d);
M_INTMOVE(REG_ITMP1, RCX);
} else {
- M_IST(s1, REG_SP, d * 8);
- M_ILD(RCX, REG_SP, s2 * 8);
+ M_IST(s1, REG_SP, d);
+ M_ILD(RCX, REG_SP, s2);
}
} else {
- M_ILD(RCX, REG_SP, s2 * 8);
- M_IST(s1, REG_SP, d * 8);
+ M_ILD(RCX, REG_SP, s2);
+ M_IST(s1, REG_SP, d);
}
- emit_shiftl_membase(cd, shift_op, REG_SP, d * 8);
+ emit_shiftl_membase(cd, shift_op, REG_SP, d);
} else if (!IS_INMEMORY(v_s2->flags) && IS_INMEMORY(v_s1->flags)) {
if (s1 == d) {
M_INTMOVE(s2, RCX);
- emit_shiftl_membase(cd, shift_op, REG_SP, d * 8);
+ emit_shiftl_membase(cd, shift_op, REG_SP, d);
} else {
M_INTMOVE(s2, RCX);
- M_ILD(REG_ITMP2, REG_SP, s1 * 8);
+ M_ILD(REG_ITMP2, REG_SP, s1);
emit_shiftl_reg(cd, shift_op, REG_ITMP2);
- M_IST(REG_ITMP2, REG_SP, d * 8);
+ M_IST(REG_ITMP2, REG_SP, d);
}
} else {
/* s1 may be equal to RCX */
- M_IST(s1, REG_SP, d * 8);
+ M_IST(s1, REG_SP, d);
M_INTMOVE(s2, RCX);
- emit_shiftl_membase(cd, shift_op, REG_SP, d * 8);
+ emit_shiftl_membase(cd, shift_op, REG_SP, d);
}
M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
}
if (IS_INMEMORY(v_s2->flags) && IS_INMEMORY(v_s1->flags)) {
- M_ILD(RCX, REG_SP, s2 * 8);
- M_ILD(d, REG_SP, s1 * 8);
+ M_ILD(RCX, REG_SP, s2);
+ M_ILD(d, REG_SP, s1);
emit_shiftl_reg(cd, shift_op, d);
} else if (IS_INMEMORY(v_s2->flags) && !IS_INMEMORY(v_s1->flags)) {
/* s1 may be equal to RCX */
M_INTMOVE(s1, d);
- M_ILD(RCX, REG_SP, s2 * 8);
+ M_ILD(RCX, REG_SP, s2);
emit_shiftl_reg(cd, shift_op, d);
} else if (!IS_INMEMORY(v_s2->flags) && IS_INMEMORY(v_s1->flags)) {
M_INTMOVE(s2, RCX);
- M_ILD(d, REG_SP, s1 * 8);
+ M_ILD(d, REG_SP, s1);
emit_shiftl_reg(cd, shift_op, d);
} else {
if (IS_INMEMORY(v_dst->flags)) {
if (IS_INMEMORY(v_s2->flags) && IS_INMEMORY(v_s1->flags)) {
if (s1 == d) {
- M_ILD(RCX, REG_SP, s2 * 8);
- emit_shift_membase(cd, shift_op, REG_SP, d * 8);
+ M_ILD(RCX, REG_SP, s2);
+ emit_shift_membase(cd, shift_op, REG_SP, d);
} else {
- M_ILD(RCX, REG_SP, s2 * 8);
- M_LLD(REG_ITMP2, REG_SP, s1 * 8);
+ M_ILD(RCX, REG_SP, s2);
+ M_LLD(REG_ITMP2, REG_SP, s1);
emit_shift_reg(cd, shift_op, REG_ITMP2);
- M_LST(REG_ITMP2, REG_SP, d * 8);
+ M_LST(REG_ITMP2, REG_SP, d);
}
} else if (IS_INMEMORY(v_s2->flags) && !IS_INMEMORY(v_s1->flags)) {
/* s1 may be equal to RCX */
if (s1 == RCX) {
if (s2 == d) {
- M_ILD(REG_ITMP1, REG_SP, s2 * 8);
- M_LST(s1, REG_SP, d * 8);
+ M_ILD(REG_ITMP1, REG_SP, s2);
+ M_LST(s1, REG_SP, d);
M_INTMOVE(REG_ITMP1, RCX);
} else {
- M_LST(s1, REG_SP, d * 8);
- M_ILD(RCX, REG_SP, s2 * 8);
+ M_LST(s1, REG_SP, d);
+ M_ILD(RCX, REG_SP, s2);
}
} else {
- M_ILD(RCX, REG_SP, s2 * 8);
- M_LST(s1, REG_SP, d * 8);
+ M_ILD(RCX, REG_SP, s2);
+ M_LST(s1, REG_SP, d);
}
- emit_shift_membase(cd, shift_op, REG_SP, d * 8);
+ emit_shift_membase(cd, shift_op, REG_SP, d);
} else if (!IS_INMEMORY(v_s2->flags) && IS_INMEMORY(v_s1->flags)) {
if (s1 == d) {
M_INTMOVE(s2, RCX);
- emit_shift_membase(cd, shift_op, REG_SP, d * 8);
+ emit_shift_membase(cd, shift_op, REG_SP, d);
} else {
M_INTMOVE(s2, RCX);
- M_LLD(REG_ITMP2, REG_SP, s1 * 8);
+ M_LLD(REG_ITMP2, REG_SP, s1);
emit_shift_reg(cd, shift_op, REG_ITMP2);
- M_LST(REG_ITMP2, REG_SP, d * 8);
+ M_LST(REG_ITMP2, REG_SP, d);
}
} else {
/* s1 may be equal to RCX */
- M_LST(s1, REG_SP, d * 8);
+ M_LST(s1, REG_SP, d);
M_INTMOVE(s2, RCX);
- emit_shift_membase(cd, shift_op, REG_SP, d * 8);
+ emit_shift_membase(cd, shift_op, REG_SP, d);
}
M_INTMOVE(REG_ITMP1, RCX); /* restore RCX */
}
if (IS_INMEMORY(v_s2->flags) && IS_INMEMORY(v_s1->flags)) {
- M_ILD(RCX, REG_SP, s2 * 8);
- M_LLD(d, REG_SP, s1 * 8);
+ M_ILD(RCX, REG_SP, s2);
+ M_LLD(d, REG_SP, s1);
emit_shift_reg(cd, shift_op, d);
} else if (IS_INMEMORY(v_s2->flags) && !IS_INMEMORY(v_s1->flags)) {
/* s1 may be equal to RCX */
M_INTMOVE(s1, d);
- M_ILD(RCX, REG_SP, s2 * 8);
+ M_ILD(RCX, REG_SP, s2);
emit_shift_reg(cd, shift_op, d);
} else if (!IS_INMEMORY(v_s2->flags) && IS_INMEMORY(v_s1->flags)) {
M_INTMOVE(s2, RCX);
- M_LLD(d, REG_SP, s1 * 8);
+ M_LLD(d, REG_SP, s1);
emit_shift_reg(cd, shift_op, d);
} else {
}
+void emit_movzbq_reg_reg(codegendata *cd, s8 reg, s8 dreg)
+{
+ emit_rex(1,(dreg),0,(reg));
+ *(cd->mcodeptr++) = 0x0f;
+ *(cd->mcodeptr++) = 0xb6;
+ /* XXX: why do reg and dreg have to be exchanged */
+ emit_reg((dreg),(reg));
+}
+
+
void emit_movzwq_reg_reg(codegendata *cd, s8 reg, s8 dreg)
{
emit_rex(1,(dreg),0,(reg));
}
+void emit_mov_mem_reg(codegendata *cd, s4 disp, s4 dreg)
+{
+ emit_rex(1, dreg, 0, 0);
+ *(cd->mcodeptr++) = 0x8b;
+ emit_address_byte(0, dreg, 4);
+ emit_mem(4, disp);
+}
+
+
/*
* alu operations
*/
}
-void emit_alu_imm32_reg(codegendata *cd, s8 opc, s8 imm, s8 dreg) {
+void emit_alu_imm32_reg(codegendata *cd, s4 opc, s4 imm, s4 dreg)
+{
emit_rex(1,0,0,(dreg));
*(cd->mcodeptr++) = 0x81;
emit_reg((opc),(dreg));
}
+void emit_alul_imm32_reg(codegendata *cd, s4 opc, s4 imm, s4 dreg)
+{
+ emit_rex(0,0,0,(dreg));
+ *(cd->mcodeptr++) = 0x81;
+ emit_reg((opc),(dreg));
+ emit_imm32((imm));
+}
+
+
void emit_alul_imm_reg(codegendata *cd, s8 opc, s8 imm, s8 dreg) {
if (IS_IMM8(imm)) {
emit_rex(0,0,0,(dreg));
void emit_alu_imm_membase(codegendata *cd, s8 opc, s8 imm, s8 basereg, s8 disp) {
if (IS_IMM8(imm)) {
- emit_rex(1,(basereg),0,0);
+ emit_rex(1,0,0,(basereg));
*(cd->mcodeptr++) = 0x83;
emit_membase(cd, (basereg),(disp),(opc));
emit_imm8((imm));
} else {
- emit_rex(1,(basereg),0,0);
+ emit_rex(1,0,0,(basereg));
*(cd->mcodeptr++) = 0x81;
emit_membase(cd, (basereg),(disp),(opc));
emit_imm32((imm));
void emit_alul_imm_membase(codegendata *cd, s8 opc, s8 imm, s8 basereg, s8 disp) {
if (IS_IMM8(imm)) {
- emit_rex(0,(basereg),0,0);
+ emit_rex(0,0,0,(basereg));
*(cd->mcodeptr++) = 0x83;
emit_membase(cd, (basereg),(disp),(opc));
emit_imm8((imm));
} else {
- emit_rex(0,(basereg),0,0);
+ emit_rex(0,0,0,(basereg));
*(cd->mcodeptr++) = 0x81;
emit_membase(cd, (basereg),(disp),(opc));
emit_imm32((imm));
}
}
+void emit_alu_memindex_reg(codegendata *cd, s8 opc, s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg)
+{
+ emit_rex(1,(reg),(indexreg),(basereg));
+ *(cd->mcodeptr++) = (((opc)) << 3) + 3;
+ emit_memindex(cd, (reg),(disp),(basereg),(indexreg),(scale));
+}
+
+void emit_alul_memindex_reg(codegendata *cd, s8 opc, s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg)
+{
+ emit_rex(0,(reg),(indexreg),(basereg));
+ *(cd->mcodeptr++) = (((opc)) << 3) + 3;
+ emit_memindex(cd, (reg),(disp),(basereg),(indexreg),(scale));
+}
void emit_test_reg_reg(codegendata *cd, s8 reg, s8 dreg) {
emit_rex(1,(reg),0,(dreg));
}
+void emit_incl_reg(codegendata *cd, s8 reg)
+{
+ *(cd->mcodeptr++) = 0xff;
+ emit_reg(0,(reg));
+}
+
+void emit_incq_reg(codegendata *cd, s8 reg)
+{
+ emit_rex(1,0,0,(reg));
+ *(cd->mcodeptr++) = 0xff;
+ emit_reg(0,(reg));
+}
void emit_incl_membase(codegendata *cd, s8 basereg, s8 disp)
{
emit_membase(cd, (basereg),(disp),0);
}
+void emit_incq_membase(codegendata *cd, s8 basereg, s8 disp)
+{
+ emit_rex(1,0,0,(basereg));
+ *(cd->mcodeptr++) = 0xff;
+ emit_membase(cd, (basereg),(disp),0);
+}
+
void emit_cltd(codegendata *cd) {
-void emit_ret(codegendata *cd) {
- *(cd->mcodeptr++) = 0xc3;
-}
-
-
-
/*
* shift ops
*/
emit_imm32((imm));
}
+/* like emit_jmp_imm but allows 8 bit optimization */
+void emit_jmp_imm2(codegendata *cd, s8 imm) {
+ if (IS_IMM8(imm)) {
+ *(cd->mcodeptr++) = 0xeb;
+ emit_imm8((imm));
+ }
+ else {
+ *(cd->mcodeptr++) = 0xe9;
+ emit_imm32((imm));
+ }
+}
+
void emit_jmp_reg(codegendata *cd, s8 reg) {
emit_rex(0,0,0,(reg));
*/
/* we need the rex byte to get all low bytes */
-void emit_setcc_reg(codegendata *cd, s8 opc, s8 reg) {
+void emit_setcc_reg(codegendata *cd, s4 opc, s4 reg)
+{
*(cd->mcodeptr++) = (0x40 | (((reg) >> 3) & 0x01));
*(cd->mcodeptr++) = 0x0f;
*(cd->mcodeptr++) = (0x90 + (opc));
/* we need the rex byte to get all low bytes */
-void emit_setcc_membase(codegendata *cd, s8 opc, s8 basereg, s8 disp) {
+void emit_setcc_membase(codegendata *cd, s4 opc, s4 basereg, s4 disp)
+{
*(cd->mcodeptr++) = (0x40 | (((basereg) >> 3) & 0x01));
*(cd->mcodeptr++) = 0x0f;
*(cd->mcodeptr++) = (0x90 + (opc));
}
-void emit_cmovcc_reg_reg(codegendata *cd, s8 opc, s8 reg, s8 dreg)
+void emit_cmovcc_reg_reg(codegendata *cd, s4 opc, s4 reg, s4 dreg)
{
emit_rex(1,(dreg),0,(reg));
*(cd->mcodeptr++) = 0x0f;
}
-void emit_cmovccl_reg_reg(codegendata *cd, s8 opc, s8 reg, s8 dreg)
+void emit_cmovccl_reg_reg(codegendata *cd, s4 opc, s4 reg, s4 dreg)
{
emit_rex(0,(dreg),0,(reg));
*(cd->mcodeptr++) = 0x0f;
}
-
void emit_neg_reg(codegendata *cd, s8 reg)
{
emit_rex(1,0,0,(reg));
}
-void emit_nop(codegendata *cd) {
- *(cd->mcodeptr++) = 0x90;
-}
-
-
/*
* call instructions