-/* vm/jit/x86_64/codegen.c - machine code generator for x86_64
+/* src/vm/jit/x86_64/codegen.c - machine code generator for x86_64
- Copyright (C) 1996-2005 R. Grafl, A. Krall, C. Kruegel, C. Oates,
- R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner,
- C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich, J. Wenninger,
- Institut f. Computersprachen - TU Wien
+ Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
+ C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
+ E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
+ J. Wenninger, Institut f. Computersprachen - TU Wien
This file is part of CACAO.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- 02111-1307, USA.
-
- Contact: cacao@complang.tuwien.ac.at
-
- Authors: Andreas Krall
- Christian Thalinger
-
- $Id: codegen.c 2048 2005-03-20 16:24:02Z twisti $
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
*/
-#define _GNU_SOURCE
+#include "config.h"
+#include <assert.h>
#include <stdio.h>
-#include <ucontext.h>
-
-#include "cacao/cacao.h"
-#include "native/native.h"
-#include "vm/global.h"
-#include "vm/builtin.h"
-#include "vm/loader.h"
-#include "vm/tables.h"
-#include "vm/jit/asmpart.h"
-#include "vm/jit/jit.h"
-#include "vm/jit/reg.h"
-#include "vm/jit/parse.h"
-#include "vm/jit/x86_64/arch.h"
-#include "vm/jit/x86_64/codegen.h"
-#include "vm/jit/x86_64/emitfuncs.h"
-#include "vm/jit/x86_64/types.h"
-#include "vm/jit/x86_64/asmoffsets.h"
-
-
-/* register descripton - array ************************************************/
-
-/* #define REG_RES 0 reserved register for OS or code generator */
-/* #define REG_RET 1 return value register */
-/* #define REG_EXC 2 exception value register (only old jit) */
-/* #define REG_SAV 3 (callee) saved register */
-/* #define REG_TMP 4 scratch temporary register (caller saved) */
-/* #define REG_ARG 5 argument register (caller saved) */
-
-/* #define REG_END -1 last entry in tables */
-
-static int nregdescint[] = {
- REG_RET, REG_ARG, REG_ARG, REG_TMP, REG_RES, REG_SAV, REG_ARG, REG_ARG,
- REG_ARG, REG_ARG, REG_RES, REG_RES, REG_SAV, REG_SAV, REG_SAV, REG_SAV,
- REG_END
-};
-
-
-static int nregdescfloat[] = {
- REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_ARG,
- REG_RES, REG_RES, REG_RES, REG_TMP, REG_TMP, REG_TMP, REG_TMP, REG_TMP,
- REG_END
-};
-
-
-/* Include independent code generation stuff -- include after register */
-/* descriptions to avoid extern definitions. */
-
-#include "vm/jit/codegen.inc"
-#include "vm/jit/reg.inc"
-#ifdef LSRA
-#include "vm/jit/lsra.inc"
-#endif
-
-
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
-void thread_restartcriticalsection(ucontext_t *uc)
-{
- void *critical;
-
- critical = thread_checkcritical((void *) uc->uc_mcontext.gregs[REG_RIP]);
-
- if (critical)
- uc->uc_mcontext.gregs[REG_RIP] = (u8) critical;
-}
-#endif
-
-/* NullPointerException signal handler for hardware null pointer check */
+#include "vm/types.h"
-void catch_NullPointerException(int sig, siginfo_t *siginfo, void *_p)
-{
- sigset_t nsig;
-
- struct ucontext *_uc = (struct ucontext *) _p;
- struct sigcontext *sigctx = (struct sigcontext *) &_uc->uc_mcontext;
- struct sigaction act;
- java_objectheader *xptr;
-
- /* Reset signal handler - necessary for SysV, does no harm for BSD */
-
- act.sa_sigaction = catch_NullPointerException; /* reinstall handler */
- act.sa_flags = SA_SIGINFO;
- sigaction(sig, &act, NULL);
-
- sigemptyset(&nsig);
- sigaddset(&nsig, sig);
- sigprocmask(SIG_UNBLOCK, &nsig, NULL); /* unblock signal */
-
- xptr = new_nullpointerexception();
-
- sigctx->rax = (u8) xptr; /* REG_ITMP1_XPTR */
- sigctx->r10 = sigctx->rip; /* REG_ITMP2_XPC */
- sigctx->rip = (u8) asm_handle_exception;
-
- return;
-}
-
-
-/* ArithmeticException signal handler for hardware divide by zero check */
-
-void catch_ArithmeticException(int sig, siginfo_t *siginfo, void *_p)
-{
- sigset_t nsig;
-
- struct ucontext *_uc = (struct ucontext *) _p;
- struct sigcontext *sigctx = (struct sigcontext *) &_uc->uc_mcontext;
- struct sigaction act;
- java_objectheader *xptr;
-
- /* Reset signal handler - necessary for SysV, does no harm for BSD */
-
- act.sa_sigaction = catch_ArithmeticException; /* reinstall handler */
- act.sa_flags = SA_SIGINFO;
- sigaction(sig, &act, NULL);
+#include "md-abi.h"
- sigemptyset(&nsig);
- sigaddset(&nsig, sig);
- sigprocmask(SIG_UNBLOCK, &nsig, NULL); /* unblock signal */
-
- xptr = new_arithmeticexception();
-
- sigctx->rax = (u8) xptr; /* REG_ITMP1_XPTR */
- sigctx->r10 = sigctx->rip; /* REG_ITMP2_XPC */
- sigctx->rip = (u8) asm_handle_exception;
+#include "vm/jit/x86_64/arch.h"
+#include "vm/jit/x86_64/codegen.h"
+#include "vm/jit/x86_64/emit.h"
- return;
-}
+#include "mm/memory.h"
+#include "native/jni.h"
+#include "native/localref.h"
+#include "native/native.h"
-void init_exceptions(void)
-{
- struct sigaction act;
+#include "threads/lock-common.h"
- /* install signal handlers we need to convert to exceptions */
- sigemptyset(&act.sa_mask);
+#include "vm/builtin.h"
+#include "vm/exceptions.h"
+#include "vm/global.h"
+#include "vm/stringlocal.h"
+#include "vm/vm.h"
- if (!checknull) {
-#if defined(SIGSEGV)
- act.sa_sigaction = catch_NullPointerException;
- act.sa_flags = SA_SIGINFO;
- sigaction(SIGSEGV, &act, NULL);
-#endif
+#include "vm/jit/abi.h"
+#include "vm/jit/asmpart.h"
+#include "vm/jit/codegen-common.h"
+#include "vm/jit/dseg.h"
+#include "vm/jit/emit-common.h"
+#include "vm/jit/jit.h"
+#include "vm/jit/methodheader.h"
+#include "vm/jit/parse.h"
+#include "vm/jit/patcher.h"
+#include "vm/jit/reg.h"
+#include "vm/jit/replace.h"
+#include "vm/jit/stacktrace.h"
-#if defined(SIGBUS)
- act.sa_sigaction = catch_NullPointerException;
- act.sa_flags = SA_SIGINFO;
- sigaction(SIGBUS, &act, NULL);
+#if defined(ENABLE_LSRA)
+# include "vm/jit/allocator/lsra.h"
#endif
- }
- act.sa_sigaction = catch_ArithmeticException;
- act.sa_flags = SA_SIGINFO;
- sigaction(SIGFPE, &act, NULL);
-}
+#include "vmcore/loader.h"
+#include "vmcore/options.h"
+#include "vmcore/statistics.h"
-/* function gen_mcode **********************************************************
+/* codegen_emit ****************************************************************
- generates machine code
+ Generates machine code.
*******************************************************************************/
-void codegen(methodinfo *m, codegendata *cd, registerdata *rd)
+bool codegen_emit(jitdata *jd)
{
- s4 len, s1, s2, s3, d;
- s8 a;
- s4 parentargs_base;
- stackptr src;
- varinfo *var;
- basicblock *bptr;
- instruction *iptr;
- exceptiontable *ex;
+ methodinfo *m;
+ codeinfo *code;
+ codegendata *cd;
+ registerdata *rd;
+ s4 len, s1, s2, s3, d, disp;
+ u2 currentline;
+ ptrint a;
+ varinfo *var, *dst;
+ basicblock *bptr;
+ instruction *iptr;
+ exception_entry *ex;
+ constant_classref *cr;
+ unresolved_class *uc;
+ methodinfo *lm; /* local methodinfo for ICMD_INVOKE* */
+ unresolved_method *um;
+ builtintable_entry *bte;
+ methoddesc *md;
+ fieldinfo *fi;
+ unresolved_field *uf;
+ s4 fieldtype;
+ s4 varindex;
+
+ /* get required compiler data */
+
+ m = jd->m;
+ code = jd->code;
+ cd = jd->cd;
+ rd = jd->rd;
+
+ /* prevent compiler warnings */
+
+ d = 0;
+ lm = NULL;
+ um = NULL;
+ bte = NULL;
{
- s4 i, p, pa, t, l;
+ s4 i, p, t, l;
s4 savedregs_num;
savedregs_num = 0;
/* space to save used callee saved registers */
- savedregs_num += (rd->savintregcnt - rd->maxsavintreguse);
- savedregs_num += (rd->savfltregcnt - rd->maxsavfltreguse);
+ savedregs_num += (INT_SAV_CNT - rd->savintreguse);
+ savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
- parentargs_base = rd->maxmemuse + savedregs_num;
+ cd->stackframesize = rd->memuse + savedregs_num;
-#if defined(USE_THREADS) /* space to save argument of monitor_enter */
+#if defined(ENABLE_THREADS)
+ /* space to save argument of monitor_enter */
if (checksync && (m->flags & ACC_SYNCHRONIZED))
- parentargs_base++;
-
+ cd->stackframesize++;
#endif
- /* keep stack 16-byte aligned for calls into native code e.g. libc or jni */
- /* (alignment problems with movaps) */
+ /* Keep stack of non-leaf functions 16-byte aligned for calls into
+ native code e.g. libc or jni (alignment problems with
+ movaps). */
- if (!(parentargs_base & 0x1)) {
- parentargs_base++;
- }
+ if (!jd->isleafmethod || opt_verbosecall)
+ cd->stackframesize |= 0x1;
/* create method header */
- (void) dseg_addaddress(cd, m); /* MethodPointer */
- (void) dseg_adds4(cd, parentargs_base * 8); /* FrameSize */
-
-#if defined(USE_THREADS)
+ (void) dseg_add_unique_address(cd, code); /* CodeinfoPointer */
+ (void) dseg_add_unique_s4(cd, cd->stackframesize * 8); /* FrameSize */
+#if defined(ENABLE_THREADS)
/* IsSync contains the offset relative to the stack pointer for the
argument of monitor_exit used in the exception handler. Since the
offset could be zero and give a wrong meaning of the flag it is
*/
if (checksync && (m->flags & ACC_SYNCHRONIZED))
- (void) dseg_adds4(cd, (rd->maxmemuse + 1) * 8); /* IsSync */
+ (void) dseg_add_unique_s4(cd, (rd->memuse + 1) * 8); /* IsSync */
else
-
#endif
-
- (void) dseg_adds4(cd, 0); /* IsSync */
+ (void) dseg_add_unique_s4(cd, 0); /* IsSync */
- (void) dseg_adds4(cd, m->isleafmethod); /* IsLeaf */
- (void) dseg_adds4(cd, rd->savintregcnt - rd->maxsavintreguse);/* IntSave */
- (void) dseg_adds4(cd, rd->savfltregcnt - rd->maxsavfltreguse);/* FltSave */
- (void) dseg_adds4(cd, cd->exceptiontablelength); /* ExTableSize */
+ (void) dseg_add_unique_s4(cd, jd->isleafmethod); /* IsLeaf */
+ (void) dseg_add_unique_s4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
+ (void) dseg_add_unique_s4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
+
+ (void) dseg_addlinenumbertablesize(cd);
+
+ (void) dseg_add_unique_s4(cd, jd->exceptiontablelength); /* ExTableSize */
/* create exception table */
- for (ex = cd->exceptiontable; ex != NULL; ex = ex->down) {
- dseg_addtarget(cd, ex->start);
- dseg_addtarget(cd, ex->end);
- dseg_addtarget(cd, ex->handler);
- (void) dseg_addaddress(cd, ex->catchtype);
+ for (ex = jd->exceptiontable; ex != NULL; ex = ex->down) {
+ dseg_add_target(cd, ex->start);
+ dseg_add_target(cd, ex->end);
+ dseg_add_target(cd, ex->handler);
+ (void) dseg_add_unique_address(cd, ex->catchtype.any);
}
-
- /* initialize mcode variables */
-
- cd->mcodeptr = (u1 *) cd->mcodebase;
- cd->mcodeend = (s4 *) (cd->mcodebase + cd->mcodesize);
- MCODECHECK(128 + m->paramcount);
- /* create stack frame (if necessary) */
+#if defined(ENABLE_PROFILING)
+ /* generate method profiling code */
- if (parentargs_base) {
- x86_64_alu_imm_reg(cd, X86_64_SUB, parentargs_base * 8, REG_SP);
+ if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
+ /* count frequency */
+
+ M_MOV_IMM(code, REG_ITMP3);
+ M_IINC_MEMBASE(REG_ITMP3, OFFSET(codeinfo, frequency));
+
+ PROFILE_CYCLE_START;
}
+#endif
+
+ /* create stack frame (if necessary) */
+
+ if (cd->stackframesize)
+ M_ASUB_IMM(cd->stackframesize * 8, REG_SP);
/* save used callee saved registers */
- p = parentargs_base;
- for (i = rd->savintregcnt - 1; i >= rd->maxsavintreguse; i--) {
- p--; x86_64_mov_reg_membase(cd, rd->savintregs[i], REG_SP, p * 8);
+ p = cd->stackframesize;
+ for (i = INT_SAV_CNT - 1; i >= rd->savintreguse; i--) {
+ p--; M_LST(rd->savintregs[i], REG_SP, p * 8);
}
- for (i = rd->savfltregcnt - 1; i >= rd->maxsavfltreguse; i--) {
- p--; x86_64_movq_reg_membase(cd, rd->savfltregs[i], REG_SP, p * 8);
+ for (i = FLT_SAV_CNT - 1; i >= rd->savfltreguse; i--) {
+ p--; M_DST(rd->savfltregs[i], REG_SP, p * 8);
}
/* take arguments out of register or stack frame */
- for (p = 0, l = 0, s1 = 0, s2 = 0; p < m->paramcount; p++) {
- t = m->paramtypes[p];
- var = &(rd->locals[l][t]);
+ md = m->parseddesc;
+
+ for (p = 0, l = 0; p < md->paramcount; p++) {
+ t = md->paramtypes[p].type;
+
+ varindex = jd->local_map[l * 5 + t];
+
l++;
if (IS_2_WORD_TYPE(t)) /* increment local counter for 2 word types */
l++;
- if (var->type < 0) {
- if (IS_INT_LNG_TYPE(t)) {
- s1++;
- } else {
- s2++;
- }
- continue;
- }
- if (IS_INT_LNG_TYPE(t)) { /* integer args */
- if (s1 < INT_ARG_CNT) { /* register arguments */
- if (!(var->flags & INMEMORY)) { /* reg arg -> register */
- M_INTMOVE(rd->argintregs[s1], var->regoff);
-
- } else { /* reg arg -> spilled */
- x86_64_mov_reg_membase(cd, rd->argintregs[s1], REG_SP, var->regoff * 8);
- }
-
- } else { /* stack arguments */
- pa = s1 - INT_ARG_CNT;
- if (s2 >= FLT_ARG_CNT) {
- pa += s2 - FLT_ARG_CNT;
- }
- if (!(var->flags & INMEMORY)) { /* stack arg -> register */
- x86_64_mov_membase_reg(cd, REG_SP, (parentargs_base + pa) * 8 + 8, var->regoff); /* + 8 for return address */
- } else { /* stack arg -> spilled */
- x86_64_mov_membase_reg(cd, REG_SP, (parentargs_base + pa) * 8 + 8, REG_ITMP1); /* + 8 for return address */
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, var->regoff * 8);
- }
- }
- s1++;
- } else { /* floating args */
- if (s2 < FLT_ARG_CNT) { /* register arguments */
- if (!(var->flags & INMEMORY)) { /* reg arg -> register */
- M_FLTMOVE(rd->argfltregs[s2], var->regoff);
-
- } else { /* reg arg -> spilled */
- x86_64_movq_reg_membase(cd, rd->argfltregs[s2], REG_SP, var->regoff * 8);
- }
+ if (varindex == UNUSED)
+ continue;
- } else { /* stack arguments */
- pa = s2 - FLT_ARG_CNT;
- if (s1 >= INT_ARG_CNT) {
- pa += s1 - INT_ARG_CNT;
- }
- if (!(var->flags & INMEMORY)) { /* stack-arg -> register */
- x86_64_movq_membase_reg(cd, REG_SP, (parentargs_base + pa) * 8 + 8, var->regoff);
+ var = VAR(varindex);
+
+ s1 = md->params[p].regoff;
- } else {
- x86_64_movq_membase_reg(cd, REG_SP, (parentargs_base + pa) * 8 + 8, REG_FTMP1);
- x86_64_movq_reg_membase(cd, REG_FTMP1, REG_SP, var->regoff * 8);
- }
+ if (IS_INT_LNG_TYPE(t)) { /* integer args */
+ if (!md->params[p].inmemory) { /* register arguments */
+ if (!IS_INMEMORY(var->flags))
+ M_INTMOVE(s1, var->vv.regoff);
+ else
+ M_LST(s1, REG_SP, var->vv.regoff);
+ }
+ else { /* stack arguments */
+ if (!IS_INMEMORY(var->flags))
+ /* + 8 for return address */
+ M_LLD(var->vv.regoff, REG_SP, cd->stackframesize * 8 + s1 + 8);
+ else
+ var->vv.regoff = cd->stackframesize * 8 + s1 + 8;
+ }
+ }
+ else { /* floating args */
+ if (!md->params[p].inmemory) { /* register arguments */
+ if (!IS_INMEMORY(var->flags))
+ M_FLTMOVE(s1, var->vv.regoff);
+ else
+ M_DST(s1, REG_SP, var->vv.regoff);
+ }
+ else { /* stack arguments */
+ if (!IS_INMEMORY(var->flags))
+ M_DLD(var->vv.regoff, REG_SP, cd->stackframesize * 8 + s1 + 8);
+ else
+ var->vv.regoff = cd->stackframesize * 8 + s1 + 8;
}
- s2++;
}
- } /* end for */
+ }
/* save monitorenter argument */
-#if defined(USE_THREADS)
+#if defined(ENABLE_THREADS)
if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
- u8 func_enter;
-
- if (m->flags & ACC_STATIC) {
- func_enter = (u8) builtin_staticmonitorenter;
- x86_64_mov_imm_reg(cd, (s8) m->class, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, rd->maxmemuse * 8);
-
- } else {
- func_enter = (u8) builtin_monitorenter;
- x86_64_mov_reg_membase(cd, rd->argintregs[0], REG_SP, rd->maxmemuse * 8);
- }
+ /* stack offset for monitor argument */
- /* call monitorenter function */
-
- x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, rd->argintregs[0]);
- x86_64_mov_imm_reg(cd, func_enter, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
- }
-#endif
+ s1 = rd->memuse;
- /* Copy argument registers to stack and call trace function with pointer */
- /* to arguments on stack. */
+ if (opt_verbosecall) {
+ M_LSUB_IMM((INT_ARG_CNT + FLT_ARG_CNT) * 8, REG_SP);
- if (runverbose) {
- x86_64_alu_imm_reg(cd, X86_64_SUB, (INT_ARG_CNT + FLT_ARG_CNT + 1 + 1) * 8, REG_SP);
+ for (p = 0; p < INT_ARG_CNT; p++)
+ M_LST(abi_registers_integer_argument[p], REG_SP, p * 8);
- /* save integer argument registers */
+ for (p = 0; p < FLT_ARG_CNT; p++)
+ M_DST(abi_registers_float_argument[p], REG_SP, (INT_ARG_CNT + p) * 8);
- for (p = 0; p < INT_ARG_CNT; p++) {
- x86_64_mov_reg_membase(cd, rd->argintregs[p], REG_SP, (1 + p) * 8);
+ s1 += INT_ARG_CNT + FLT_ARG_CNT;
}
- /* save float argument registers */
+ /* decide which monitor enter function to call */
- for (p = 0; p < FLT_ARG_CNT; p++) {
- x86_64_movq_reg_membase(cd, rd->argfltregs[p], REG_SP, (1 + INT_ARG_CNT + p) * 8);
+ if (m->flags & ACC_STATIC) {
+ M_MOV_IMM(&m->class->object.header, REG_A0);
}
-
- /* show integer hex code for float arguments */
-
- for (p = 0, l = 0; p < m->paramcount && p < INT_ARG_CNT; p++) {
- t = m->paramtypes[p];
-
- /* if the paramtype is a float, we have to right shift all */
- /* following integer registers */
-
- if (IS_FLT_DBL_TYPE(t)) {
- for (s1 = INT_ARG_CNT - 2; s1 >= p; s1--) {
- x86_64_mov_reg_reg(cd, rd->argintregs[s1], rd->argintregs[s1 + 1]);
- }
-
- x86_64_movd_freg_reg(cd, rd->argfltregs[l], rd->argintregs[p]);
- l++;
- }
+ else {
+ M_TEST(REG_A0);
+ M_BNE(8);
+ M_ALD_MEM(REG_A0, EXCEPTION_HARDWARE_NULLPOINTER);
}
- x86_64_mov_imm_reg(cd, (u8) m, REG_ITMP2);
- x86_64_mov_reg_membase(cd, REG_ITMP2, REG_SP, 0 * 8);
- x86_64_mov_imm_reg(cd, (u8) builtin_trace_args, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
-
- /* restore integer argument registers */
+ M_AST(REG_A0, REG_SP, s1 * 8);
+ M_MOV_IMM(LOCK_monitor_enter, REG_ITMP1);
+ M_CALL(REG_ITMP1);
- for (p = 0; p < INT_ARG_CNT; p++) {
- x86_64_mov_membase_reg(cd, REG_SP, (1 + p) * 8, rd->argintregs[p]);
- }
+ if (opt_verbosecall) {
+ for (p = 0; p < INT_ARG_CNT; p++)
+ M_LLD(abi_registers_integer_argument[p], REG_SP, p * 8);
- /* restore float argument registers */
+ for (p = 0; p < FLT_ARG_CNT; p++)
+ M_DLD(abi_registers_float_argument[p], REG_SP, (INT_ARG_CNT + p) * 8);
- for (p = 0; p < FLT_ARG_CNT; p++) {
- x86_64_movq_membase_reg(cd, REG_SP, (1 + INT_ARG_CNT + p) * 8, rd->argfltregs[p]);
+ M_LADD_IMM((INT_ARG_CNT + FLT_ARG_CNT) * 8, REG_SP);
}
-
- x86_64_alu_imm_reg(cd, X86_64_ADD, (6 + 8 + 1 + 1) * 8, REG_SP);
}
+#endif
+
+#if !defined(NDEBUG)
+ if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
+ emit_verbosecall_enter(jd);
+#endif /* !defined(NDEBUG) */
}
/* end of header generation */
+ /* create replacement points */
+
+ REPLACEMENT_POINTS_INIT(cd, jd);
+
/* walk through all basic blocks */
- for (bptr = m->basicblocks; bptr != NULL; bptr = bptr->next) {
+
+ for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
bptr->mpc = (u4) ((u1 *) cd->mcodeptr - cd->mcodebase);
if (bptr->flags >= BBREACHED) {
- /* branch resolving */
+ /* branch resolving */
- branchref *bref;
- for (bref = bptr->branchrefs; bref != NULL; bref = bref->next) {
- gen_resolvebranch((u1 *) cd->mcodebase + bref->branchpos,
- bref->branchpos,
- bptr->mpc);
- }
+ codegen_resolve_branchrefs(cd, bptr);
+
+ /* handle replacement points */
+
+ REPLACEMENT_POINT_BLOCK_START(cd, bptr);
/* copy interface registers to their destination */
- src = bptr->instack;
len = bptr->indepth;
- MCODECHECK(64 + len);
+ MCODECHECK(512);
-#ifdef LSRA
+#if defined(ENABLE_PROFILING)
+ /* generate basicblock profiling code */
+
+ if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
+ /* count frequency */
+
+ M_MOV_IMM(code->bbfrequency, REG_ITMP3);
+ M_IINC_MEMBASE(REG_ITMP3, bptr->nr * 4);
+
+ /* if this is an exception handler, start profiling again */
+
+ if (bptr->type == BBTYPE_EXH)
+ PROFILE_CYCLE_START;
+ }
+#endif
+
+#if defined(ENABLE_LSRA)
if (opt_lsra) {
- while (src != NULL) {
+ while (len) {
len--;
- if ((len == 0) && (bptr->type != BBTYPE_STD)) {
- if (bptr->type == BBTYPE_SBR) {
- /* d = reg_of_var(rd, src, REG_ITMP1); */
- if (!(src->flags & INMEMORY))
- d= src->regoff;
- else
- d=REG_ITMP1;
- x86_64_pop_reg(cd, d);
- store_reg_to_var_int(src, d);
-
- } else if (bptr->type == BBTYPE_EXH) {
- /* d = reg_of_var(rd, src, REG_ITMP1); */
- if (!(src->flags & INMEMORY))
- d= src->regoff;
+ src = bptr->invars[len];
+ if ((len == bptr->indepth-1) && (bptr->type != BBTYPE_STD)) {
+ if (bptr->type == BBTYPE_EXH) {
+/* d = reg_of_var(rd, src, REG_ITMP1); */
+ if (!IS_INMEMORY(src->flags))
+ d= src->vv.regoff;
else
d=REG_ITMP1;
M_INTMOVE(REG_ITMP1, d);
- store_reg_to_var_int(src, d);
+ emit_store(jd, NULL, src, d);
}
}
- src = src->prev;
}
} else {
#endif
- while (src != NULL) {
+ while (len) {
len--;
- if ((len == 0) && (bptr->type != BBTYPE_STD)) {
- if (bptr->type == BBTYPE_SBR) {
- d = reg_of_var(rd, src, REG_ITMP1);
- x86_64_pop_reg(cd, d);
- store_reg_to_var_int(src, d);
-
- } else if (bptr->type == BBTYPE_EXH) {
- d = reg_of_var(rd, src, REG_ITMP1);
+ var = VAR(bptr->invars[len]);
+ if ((len == bptr->indepth-1) && (bptr->type != BBTYPE_STD)) {
+ if (bptr->type == BBTYPE_EXH) {
+ d = codegen_reg_of_var(0, var, REG_ITMP1);
M_INTMOVE(REG_ITMP1, d);
- store_reg_to_var_int(src, d);
- }
-
- } else {
- d = reg_of_var(rd, src, REG_ITMP1);
- if ((src->varkind != STACKVAR)) {
- s2 = src->type;
- if (IS_FLT_DBL_TYPE(s2)) {
- s1 = rd->interfaces[len][s2].regoff;
- if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
- M_FLTMOVE(s1, d);
-
- } else {
- x86_64_movq_membase_reg(cd, REG_SP, s1 * 8, d);
- }
- store_reg_to_var_flt(src, d);
-
- } else {
- s1 = rd->interfaces[len][s2].regoff;
- if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
- M_INTMOVE(s1, d);
-
- } else {
- x86_64_mov_membase_reg(cd, REG_SP, s1 * 8, d);
- }
- store_reg_to_var_int(src, d);
- }
+ emit_store(jd, NULL, var, d);
}
+ }
+ else {
+ assert((var->flags & INOUT));
}
- src = src->prev;
}
-#ifdef LSRA
+#if defined(ENABLE_LSRA)
}
#endif
/* walk through all instructions */
- src = bptr->instack;
len = bptr->icount;
- for (iptr = bptr->iinstr; len > 0; src = iptr->dst, len--, iptr++) {
+ currentline = 0;
- MCODECHECK(64); /* an instruction usually needs < 64 words */
- switch (iptr->opc) {
- case ICMD_INLINE_START: /* internal ICMDs */
- case ICMD_INLINE_END:
- break;
+ for (iptr = bptr->iinstr; len > 0; len--, iptr++) {
+ if (iptr->line != currentline) {
+ dseg_addlinenumber(cd, iptr->line);
+ currentline = iptr->line;
+ }
- case ICMD_NOP: /* ... ==> ... */
- break;
+ MCODECHECK(1024); /* 1KB should be enough */
- case ICMD_NULLCHECKPOP: /* ..., objectref ==> ... */
- if (src->flags & INMEMORY) {
- x86_64_alu_imm_membase(cd, X86_64_CMP, 0, REG_SP, src->regoff * 8);
+ switch (iptr->opc) {
+ case ICMD_NOP: /* ... ==> ... */
+ case ICMD_POP: /* ..., value ==> ... */
+ case ICMD_POP2: /* ..., value, value ==> ... */
+ break;
- } else {
- x86_64_test_reg_reg(cd, src->regoff, src->regoff);
- }
- x86_64_jcc(cd, X86_64_CC_E, 0);
- codegen_addxnullrefs(cd, cd->mcodeptr);
- break;
+ case ICMD_INLINE_START:
- /* constant operations ************************************************/
+ REPLACEMENT_POINT_INLINE_START(cd, iptr);
+ break;
- case ICMD_ICONST: /* ... ==> ..., constant */
- /* op1 = 0, val.i = constant */
+ case ICMD_INLINE_BODY:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- if (iptr->val.i == 0) {
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
- } else {
- x86_64_movl_imm_reg(cd, iptr->val.i, d);
- }
- store_reg_to_var_int(iptr->dst, d);
+ REPLACEMENT_POINT_INLINE_BODY(cd, iptr);
+ dseg_addlinenumber_inline_start(cd, iptr);
+ dseg_addlinenumber(cd, iptr->line);
break;
- case ICMD_ACONST: /* ... ==> ..., constant */
- /* op1 = 0, val.a = constant */
+ case ICMD_INLINE_END:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- if (iptr->val.a == 0) {
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
- } else {
- x86_64_mov_imm_reg(cd, (s8) iptr->val.a, d);
- }
- store_reg_to_var_int(iptr->dst, d);
+ dseg_addlinenumber_inline_end(cd, iptr);
+ dseg_addlinenumber(cd, iptr->line);
+ break;
+
+ case ICMD_CHECKNULL: /* ..., objectref ==> ..., objectref */
+
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ emit_nullpointer_check(cd, iptr, s1);
+ break;
+
+ /* constant operations ************************************************/
+
+ case ICMD_ICONST: /* ... ==> ..., constant */
+
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ ICONST(d, iptr->sx.val.i);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LCONST: /* ... ==> ..., constant */
- /* op1 = 0, val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- if (iptr->val.l == 0) {
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
- } else {
- x86_64_mov_imm_reg(cd, iptr->val.l, d);
- }
- store_reg_to_var_int(iptr->dst, d);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ LCONST(d, iptr->sx.val.l);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_FCONST: /* ... ==> ..., constant */
- /* op1 = 0, val.f = constant */
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- a = dseg_addfloat(cd, iptr->val.f);
- x86_64_movdl_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + ((d > 7) ? 9 : 8)) - (s8) cd->mcodebase) + a, d);
- store_reg_to_var_flt(iptr->dst, d);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ disp = dseg_add_float(cd, iptr->sx.val.f);
+ emit_movdl_membase_reg(cd, RIP, -((cd->mcodeptr + ((d > 7) ? 9 : 8)) - cd->mcodebase) + disp, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_DCONST: /* ... ==> ..., constant */
- /* op1 = 0, val.d = constant */
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- a = dseg_adddouble(cd, iptr->val.d);
- x86_64_movd_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 9) - (s8) cd->mcodebase) + a, d);
- store_reg_to_var_flt(iptr->dst, d);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ disp = dseg_add_double(cd, iptr->sx.val.d);
+ emit_movd_membase_reg(cd, RIP, -((cd->mcodeptr + 9) - cd->mcodebase) + disp, d);
+ emit_store_dst(jd, iptr, d);
break;
+ case ICMD_ACONST: /* ... ==> ..., constant */
- /* load/store operations **********************************************/
-
- case ICMD_ILOAD: /* ... ==> ..., content of local variable */
- /* op1 = local variable */
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- if ((iptr->dst->varkind == LOCALVAR) &&
- (iptr->dst->varnum == iptr->op1)) {
- break;
- }
- var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, var->regoff * 8, d);
- store_reg_to_var_int(iptr->dst, d);
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ cr = iptr->sx.val.c.ref;
- } else {
- if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, var->regoff, REG_SP, iptr->dst->regoff * 8);
+/* PROFILE_CYCLE_STOP; */
- } else {
- M_INTMOVE(var->regoff, d);
- }
- }
- break;
+ codegen_add_patch_ref(cd, PATCHER_aconst, cr, 0);
- case ICMD_LLOAD: /* ... ==> ..., content of local variable */
- case ICMD_ALOAD: /* op1 = local variable */
+/* PROFILE_CYCLE_START; */
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- if ((iptr->dst->varkind == LOCALVAR) &&
- (iptr->dst->varnum == iptr->op1)) {
- break;
- }
- var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY) {
- x86_64_mov_membase_reg(cd, REG_SP, var->regoff * 8, d);
- store_reg_to_var_int(iptr->dst, d);
+ M_MOV_IMM(NULL, d);
} else {
- if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, var->regoff, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- M_INTMOVE(var->regoff, d);
- }
+ if (iptr->sx.val.anyptr == 0)
+ M_CLR(d);
+ else
+ M_MOV_IMM(iptr->sx.val.anyptr, d);
}
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_FLOAD: /* ... ==> ..., content of local variable */
- case ICMD_DLOAD: /* op1 = local variable */
-
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- if ((iptr->dst->varkind == LOCALVAR) &&
- (iptr->dst->varnum == iptr->op1)) {
- break;
- }
- var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ILOAD]);
- if (var->flags & INMEMORY) {
- x86_64_movq_membase_reg(cd, REG_SP, var->regoff * 8, d);
- store_reg_to_var_flt(iptr->dst, d);
- } else {
- if (iptr->dst->flags & INMEMORY) {
- x86_64_movq_reg_membase(cd, var->regoff, REG_SP, iptr->dst->regoff * 8);
+ /* load/store/copy/move operations ************************************/
- } else {
- M_FLTMOVE(var->regoff, d);
- }
- }
+ case ICMD_ILOAD: /* ... ==> ..., content of local variable */
+ case ICMD_ALOAD: /* s1 = local variable */
+ case ICMD_LLOAD:
+ case ICMD_FLOAD:
+ case ICMD_DLOAD:
+ case ICMD_ISTORE: /* ..., value ==> ... */
+ case ICMD_LSTORE:
+ case ICMD_FSTORE:
+ case ICMD_DSTORE:
+ case ICMD_COPY:
+ case ICMD_MOVE:
+
+ emit_copy(jd, iptr);
break;
- case ICMD_ISTORE: /* ..., value ==> ... */
- case ICMD_LSTORE: /* op1 = local variable */
case ICMD_ASTORE:
-
- if ((src->varkind == LOCALVAR) &&
- (src->varnum == iptr->op1)) {
- break;
- }
- var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ISTORE]);
- if (var->flags & INMEMORY) {
- var_to_reg_int(s1, src, REG_ITMP1);
- x86_64_mov_reg_membase(cd, s1, REG_SP, var->regoff * 8);
-
- } else {
- var_to_reg_int(s1, src, var->regoff);
- M_INTMOVE(s1, var->regoff);
- }
+ if (!(iptr->flags.bits & INS_FLAG_RETADDR))
+ emit_copy(jd, iptr);
break;
- case ICMD_FSTORE: /* ..., value ==> ... */
- case ICMD_DSTORE: /* op1 = local variable */
+ /* integer operations *************************************************/
- if ((src->varkind == LOCALVAR) &&
- (src->varnum == iptr->op1)) {
- break;
- }
- var = &(rd->locals[iptr->op1][iptr->opc - ICMD_ISTORE]);
- if (var->flags & INMEMORY) {
- var_to_reg_flt(s1, src, REG_FTMP1);
- x86_64_movq_reg_membase(cd, s1, REG_SP, var->regoff * 8);
+ case ICMD_INEG: /* ..., value ==> ..., - value */
- } else {
- var_to_reg_flt(s1, src, var->regoff);
- M_FLTMOVE(s1, var->regoff);
- }
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_INEG(d);
+ emit_store_dst(jd, iptr, d);
break;
+ case ICMD_LNEG: /* ..., value ==> ..., - value */
- /* pop/dup/swap operations ********************************************/
-
- /* attention: double and longs are only one entry in CACAO ICMDs */
-
- case ICMD_POP: /* ..., value ==> ... */
- case ICMD_POP2: /* ..., value, value ==> ... */
- break;
-
- case ICMD_DUP: /* ..., a ==> ..., a, a */
- M_COPY(src, iptr->dst);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_LNEG(d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_DUP_X1: /* ..., a, b ==> ..., b, a, b */
+ case ICMD_I2L: /* ..., value ==> ..., value */
- M_COPY(src, iptr->dst);
- M_COPY(src->prev, iptr->dst->prev);
- M_COPY(iptr->dst, iptr->dst->prev->prev);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ M_ISEXT(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_DUP_X2: /* ..., a, b, c ==> ..., c, a, b, c */
+ case ICMD_L2I: /* ..., value ==> ..., value */
- M_COPY(src, iptr->dst);
- M_COPY(src->prev, iptr->dst->prev);
- M_COPY(src->prev->prev, iptr->dst->prev->prev);
- M_COPY(iptr->dst, iptr->dst->prev->prev->prev);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_IMOV(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_DUP2: /* ..., a, b ==> ..., a, b, a, b */
+ case ICMD_INT2BYTE: /* ..., value ==> ..., value */
- M_COPY(src, iptr->dst);
- M_COPY(src->prev, iptr->dst->prev);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ M_BSEXT(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_DUP2_X1: /* ..., a, b, c ==> ..., b, c, a, b, c */
+ case ICMD_INT2CHAR: /* ..., value ==> ..., value */
- M_COPY(src, iptr->dst);
- M_COPY(src->prev, iptr->dst->prev);
- M_COPY(src->prev->prev, iptr->dst->prev->prev);
- M_COPY(iptr->dst, iptr->dst->prev->prev->prev);
- M_COPY(iptr->dst->prev, iptr->dst->prev->prev->prev->prev);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ M_CZEXT(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_DUP2_X2: /* ..., a, b, c, d ==> ..., c, d, a, b, c, d */
+ case ICMD_INT2SHORT: /* ..., value ==> ..., value */
- M_COPY(src, iptr->dst);
- M_COPY(src->prev, iptr->dst->prev);
- M_COPY(src->prev->prev, iptr->dst->prev->prev);
- M_COPY(src->prev->prev->prev, iptr->dst->prev->prev->prev);
- M_COPY(iptr->dst, iptr->dst->prev->prev->prev->prev);
- M_COPY(iptr->dst->prev, iptr->dst->prev->prev->prev->prev->prev);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ M_SSEXT(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_SWAP: /* ..., a, b ==> ..., b, a */
-
- M_COPY(src, iptr->dst->prev);
- M_COPY(src->prev, iptr->dst);
- break;
+ case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
- /* integer operations *************************************************/
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_IADD(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IADD(s2, d);
+ }
+ emit_store_dst(jd, iptr, d);
+ break;
- case ICMD_INEG: /* ..., value ==> ..., - value */
+ case ICMD_IINC:
+ case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
+ /* sx.val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if (src->flags & INMEMORY) {
- if (src->regoff == iptr->dst->regoff) {
- x86_64_negl_membase(cd, REG_SP, iptr->dst->regoff * 8);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
- } else {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_negl_reg(cd, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
+ /* Using inc and dec is not faster than add (tested with
+ sieve). */
- } else {
- x86_64_movl_reg_membase(cd, src->regoff, REG_SP, iptr->dst->regoff * 8);
- x86_64_negl_membase(cd, REG_SP, iptr->dst->regoff * 8);
- }
+ M_INTMOVE(s1, d);
+ M_IADD_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
+ break;
- } else {
- if (src->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
- x86_64_negl_reg(cd, d);
+ case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
- } else {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_negl_reg(cd, iptr->dst->regoff);
- }
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_LADD(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LADD(s2, d);
}
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_LNEG: /* ..., value ==> ..., - value */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if (src->flags & INMEMORY) {
- if (src->regoff == iptr->dst->regoff) {
- x86_64_neg_membase(cd, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_neg_reg(cd, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- x86_64_mov_reg_membase(cd, src->regoff, REG_SP, iptr->dst->regoff * 8);
- x86_64_neg_membase(cd, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if (src->flags & INMEMORY) {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
- x86_64_neg_reg(cd, iptr->dst->regoff);
+ case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
+ /* sx.val.l = constant */
- } else {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_neg_reg(cd, iptr->dst->regoff);
- }
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->sx.val.l))
+ M_LADD_IMM(iptr->sx.val.l, d);
+ else {
+ M_MOV_IMM(iptr->sx.val.l, REG_ITMP2);
+ M_LADD(REG_ITMP2, d);
}
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_I2L: /* ..., value ==> ..., value */
-
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (src->flags & INMEMORY) {
- x86_64_movslq_membase_reg(cd, REG_SP, src->regoff * 8, d);
+ case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d) {
+ M_INTMOVE(s1, REG_ITMP1);
+ M_ISUB(s2, REG_ITMP1);
+ M_INTMOVE(REG_ITMP1, d);
} else {
- x86_64_movslq_reg_reg(cd, src->regoff, d);
+ M_INTMOVE(s1, d);
+ M_ISUB(s2, d);
}
- store_reg_to_var_int(iptr->dst, d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_L2I: /* ..., value ==> ..., value */
+ case ICMD_ISUBCONST: /* ..., value ==> ..., value + constant */
+ /* sx.val.i = constant */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
M_INTMOVE(s1, d);
- store_reg_to_var_int(iptr->dst, d);
+ M_ISUB_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_INT2BYTE: /* ..., value ==> ..., value */
-
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (src->flags & INMEMORY) {
- x86_64_movsbq_membase_reg(cd, REG_SP, src->regoff * 8, d);
+ case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d) {
+ M_INTMOVE(s1, REG_ITMP1);
+ M_LSUB(s2, REG_ITMP1);
+ M_INTMOVE(REG_ITMP1, d);
} else {
- x86_64_movsbq_reg_reg(cd, src->regoff, d);
- }
- store_reg_to_var_int(iptr->dst, d);
- break;
-
- case ICMD_INT2CHAR: /* ..., value ==> ..., value */
-
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (src->flags & INMEMORY) {
- x86_64_movzwq_membase_reg(cd, REG_SP, src->regoff * 8, d);
-
- } else {
- x86_64_movzwq_reg_reg(cd, src->regoff, d);
- }
- store_reg_to_var_int(iptr->dst, d);
- break;
-
- case ICMD_INT2SHORT: /* ..., value ==> ..., value */
-
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (src->flags & INMEMORY) {
- x86_64_movswq_membase_reg(cd, REG_SP, src->regoff * 8, d);
-
- } else {
- x86_64_movswq_reg_reg(cd, src->regoff, d);
- }
- store_reg_to_var_int(iptr->dst, d);
- break;
-
-
- case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialu(cd, X86_64_ADD, src, iptr);
- break;
-
- case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
- /* val.i = constant */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_ADD, src, iptr);
- break;
-
- case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lalu(cd, X86_64_ADD, src, iptr);
- break;
-
- case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
- /* val.l = constant */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_ADD, src, iptr);
- break;
-
- case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- if (src->prev->regoff == iptr->dst->regoff) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_alul_reg_membase(cd, X86_64_SUB, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alul_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_alul_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- if (src->prev->regoff == iptr->dst->regoff) {
- x86_64_alul_reg_membase(cd, X86_64_SUB, src->regoff, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- x86_64_movl_reg_membase(cd, src->prev->regoff, REG_SP, iptr->dst->regoff * 8);
- x86_64_alul_reg_membase(cd, X86_64_SUB, src->regoff, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, d);
- x86_64_alul_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, d);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, d);
- x86_64_alul_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, d);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- /* workaround for reg alloc */
- if (src->regoff == iptr->dst->regoff) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- M_INTMOVE(REG_ITMP1, d);
-
- } else {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, d);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, d);
- }
-
- } else {
- /* workaround for reg alloc */
- if (src->regoff == iptr->dst->regoff) {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- M_INTMOVE(REG_ITMP1, d);
-
- } else {
- M_INTMOVE(src->prev->regoff, d);
- x86_64_alul_reg_reg(cd, X86_64_SUB, src->regoff, d);
- }
- }
- }
- break;
-
- case ICMD_ISUBCONST: /* ..., value ==> ..., value + constant */
- /* val.i = constant */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_SUB, src, iptr);
- break;
-
- case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- if (src->prev->regoff == iptr->dst->regoff) {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_alu_reg_membase(cd, X86_64_SUB, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alu_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_alu_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- if (src->prev->regoff == iptr->dst->regoff) {
- x86_64_alu_reg_membase(cd, X86_64_SUB, src->regoff, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- x86_64_mov_reg_membase(cd, src->prev->regoff, REG_SP, iptr->dst->regoff * 8);
- x86_64_alu_reg_membase(cd, X86_64_SUB, src->regoff, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, d);
- x86_64_alu_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, d);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, d);
- x86_64_alu_membase_reg(cd, X86_64_SUB, REG_SP, src->regoff * 8, d);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- /* workaround for reg alloc */
- if (src->regoff == iptr->dst->regoff) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- M_INTMOVE(REG_ITMP1, d);
-
- } else {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, d);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, d);
- }
-
- } else {
- /* workaround for reg alloc */
- if (src->regoff == iptr->dst->regoff) {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, REG_ITMP1);
- M_INTMOVE(REG_ITMP1, d);
-
- } else {
- M_INTMOVE(src->prev->regoff, d);
- x86_64_alu_reg_reg(cd, X86_64_SUB, src->regoff, d);
- }
- }
+ M_INTMOVE(s1, d);
+ M_LSUB(s2, d);
}
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
- /* val.l = constant */
+ /* sx.val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_SUB, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->sx.val.l))
+ M_LSUB_IMM(iptr->sx.val.l, d);
+ else {
+ M_MOV_IMM(iptr->sx.val.l, REG_ITMP2);
+ M_LSUB(REG_ITMP2, d);
+ }
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_imull_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_imull_reg_reg(cd, src->prev->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_imull_reg_reg(cd, src->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- x86_64_imull_reg_reg(cd, src->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, iptr->dst->regoff);
- x86_64_imull_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, iptr->dst->regoff);
- x86_64_imull_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_imull_membase_reg(cd, REG_SP, src->prev->regoff * 8, iptr->dst->regoff);
-
- } else {
- if (src->regoff == iptr->dst->regoff) {
- x86_64_imull_reg_reg(cd, src->prev->regoff, iptr->dst->regoff);
-
- } else {
- M_INTMOVE(src->prev->regoff, iptr->dst->regoff);
- x86_64_imull_reg_reg(cd, src->regoff, iptr->dst->regoff);
- }
- }
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_IMUL(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IMUL(s2, d);
}
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
- /* val.i = constant */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if (src->flags & INMEMORY) {
- x86_64_imull_imm_membase_reg(cd, iptr->val.i, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_imull_imm_reg_reg(cd, iptr->val.i, src->regoff, REG_ITMP1);
- x86_64_movl_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if (src->flags & INMEMORY) {
- x86_64_imull_imm_membase_reg(cd, iptr->val.i, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else {
- if (iptr->val.i == 2) {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_alul_reg_reg(cd, X86_64_ADD, iptr->dst->regoff, iptr->dst->regoff);
+ /* sx.val.i = constant */
- } else {
- x86_64_imull_imm_reg_reg(cd, iptr->val.i, src->regoff, iptr->dst->regoff); /* 3 cycles */
- }
- }
- }
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ if (iptr->sx.val.i == 2) {
+ M_INTMOVE(s1, d);
+ M_ISLL_IMM(1, d);
+ } else
+ M_IMUL_IMM(s1, iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- x86_64_imul_reg_reg(cd, src->prev->regoff, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
- x86_64_imul_reg_reg(cd, src->regoff, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- x86_64_mov_reg_reg(cd, src->prev->regoff, REG_ITMP1);
- x86_64_imul_reg_reg(cd, src->regoff, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if ((src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, iptr->dst->regoff);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else if ((src->flags & INMEMORY) && !(src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->prev->regoff, iptr->dst->regoff);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
-
- } else if (!(src->flags & INMEMORY) && (src->prev->flags & INMEMORY)) {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_imul_membase_reg(cd, REG_SP, src->prev->regoff * 8, iptr->dst->regoff);
-
- } else {
- if (src->regoff == iptr->dst->regoff) {
- x86_64_imul_reg_reg(cd, src->prev->regoff, iptr->dst->regoff);
-
- } else {
- M_INTMOVE(src->prev->regoff, iptr->dst->regoff);
- x86_64_imul_reg_reg(cd, src->regoff, iptr->dst->regoff);
- }
- }
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_LMUL(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LMUL(s2, d);
}
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
- /* val.l = constant */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (iptr->dst->flags & INMEMORY) {
- if (src->flags & INMEMORY) {
- if (IS_IMM32(iptr->val.l)) {
- x86_64_imul_imm_membase_reg(cd, iptr->val.l, REG_SP, src->regoff * 8, REG_ITMP1);
-
- } else {
- x86_64_mov_imm_reg(cd, iptr->val.l, REG_ITMP1);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP1);
- }
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
-
- } else {
- if (IS_IMM32(iptr->val.l)) {
- x86_64_imul_imm_reg_reg(cd, iptr->val.l, src->regoff, REG_ITMP1);
-
- } else {
- x86_64_mov_imm_reg(cd, iptr->val.l, REG_ITMP1);
- x86_64_imul_reg_reg(cd, src->regoff, REG_ITMP1);
- }
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
- }
-
- } else {
- if (src->flags & INMEMORY) {
- if (IS_IMM32(iptr->val.l)) {
- x86_64_imul_imm_membase_reg(cd, iptr->val.l, REG_SP, src->regoff * 8, iptr->dst->regoff);
+ /* sx.val.l = constant */
- } else {
- x86_64_mov_imm_reg(cd, iptr->val.l, iptr->dst->regoff);
- x86_64_imul_membase_reg(cd, REG_SP, src->regoff * 8, iptr->dst->regoff);
- }
-
- } else {
- /* should match in many cases */
- if (iptr->val.l == 2) {
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_alul_reg_reg(cd, X86_64_ADD, iptr->dst->regoff, iptr->dst->regoff);
-
- } else {
- if (IS_IMM32(iptr->val.l)) {
- x86_64_imul_imm_reg_reg(cd, iptr->val.l, src->regoff, iptr->dst->regoff); /* 4 cycles */
-
- } else {
- x86_64_mov_imm_reg(cd, iptr->val.l, REG_ITMP1);
- M_INTMOVE(src->regoff, iptr->dst->regoff);
- x86_64_imul_reg_reg(cd, REG_ITMP1, iptr->dst->regoff);
- }
- }
- }
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ if (IS_IMM32(iptr->sx.val.l))
+ M_LMUL_IMM(s1, iptr->sx.val.l, d);
+ else {
+ M_MOV_IMM(iptr->sx.val.l, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_LMUL(REG_ITMP2, d);
}
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (src->prev->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, RAX);
+ s1 = emit_load_s1(jd, iptr, RAX);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP3);
+ d = codegen_reg_of_dst(jd, iptr, RAX);
- } else {
- M_INTMOVE(src->prev->regoff, RAX);
- }
-
- if (src->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP3);
-
- } else {
- M_INTMOVE(src->regoff, REG_ITMP3);
- }
- gen_div_check(src);
+ M_INTMOVE(s1, RAX);
+ M_INTMOVE(s2, REG_ITMP3);
+ emit_arithmetic_check(cd, iptr, REG_ITMP3);
- x86_64_alul_imm_reg(cd, X86_64_CMP, 0x80000000, RAX); /* check as described in jvm spec */
- x86_64_jcc(cd, X86_64_CC_NE, 4 + 6);
- x86_64_alul_imm_reg(cd, X86_64_CMP, -1, REG_ITMP3); /* 4 bytes */
- x86_64_jcc(cd, X86_64_CC_E, 3 + 1 + 3); /* 6 bytes */
+ M_MOV(RDX, REG_ITMP2); /* save RDX (it's an argument register) */
- x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
- x86_64_cltd(cd);
- x86_64_idivl_reg(cd, REG_ITMP3);
+ M_ICMP_IMM(0x80000000, RAX); /* check as described in jvm spec */
+ M_BNE(4 + 6);
+ M_ICMP_IMM(-1, REG_ITMP3); /* 4 bytes */
+ M_BEQ(1 + 3); /* 6 bytes */
- if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, RAX, REG_SP, iptr->dst->regoff * 8);
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
-
- } else {
- M_INTMOVE(RAX, iptr->dst->regoff);
+ emit_cltd(cd); /* 1 byte */
+ emit_idivl_reg(cd, REG_ITMP3); /* 3 bytes */
- if (iptr->dst->regoff != RDX) {
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
- }
- }
+ M_INTMOVE(RAX, d);
+ emit_store_dst(jd, iptr, d);
+ dst = VAROP(iptr->dst);
+ if (IS_INMEMORY(dst->flags) || (dst->vv.regoff != RDX))
+ M_MOV(REG_ITMP2, RDX); /* restore RDX */
break;
case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (src->prev->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->prev->regoff * 8, RAX);
-
- } else {
- M_INTMOVE(src->prev->regoff, RAX);
- }
-
- if (src->flags & INMEMORY) {
- x86_64_movl_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP3);
-
- } else {
- M_INTMOVE(src->regoff, REG_ITMP3);
- }
- gen_div_check(src);
+ s1 = emit_load_s1(jd, iptr, RAX);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP3);
+ d = codegen_reg_of_dst(jd, iptr, RDX);
- x86_64_alul_imm_reg(cd, X86_64_CMP, 0x80000000, RAX); /* check as described in jvm spec */
- x86_64_jcc(cd, X86_64_CC_NE, 2 + 4 + 6);
- x86_64_alul_reg_reg(cd, X86_64_XOR, RDX, RDX); /* 2 bytes */
- x86_64_alul_imm_reg(cd, X86_64_CMP, -1, REG_ITMP3); /* 4 bytes */
- x86_64_jcc(cd, X86_64_CC_E, 3 + 1 + 3); /* 6 bytes */
+ M_INTMOVE(s1, RAX);
+ M_INTMOVE(s2, REG_ITMP3);
+ emit_arithmetic_check(cd, iptr, REG_ITMP3);
- x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
- x86_64_cltd(cd);
- x86_64_idivl_reg(cd, REG_ITMP3);
+ M_MOV(RDX, REG_ITMP2); /* save RDX (it's an argument register) */
- if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, RDX, REG_SP, iptr->dst->regoff * 8);
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
+ M_ICMP_IMM(0x80000000, RAX); /* check as described in jvm spec */
+ M_BNE(3 + 4 + 6);
+ M_CLR(RDX); /* 3 bytes */
+ M_ICMP_IMM(-1, REG_ITMP3); /* 4 bytes */
+ M_BEQ(1 + 3); /* 6 bytes */
- } else {
- M_INTMOVE(RDX, iptr->dst->regoff);
+ emit_cltd(cd); /* 1 byte */
+ emit_idivl_reg(cd, REG_ITMP3); /* 3 byte */
- if (iptr->dst->regoff != RDX) {
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
- }
- }
+ M_INTMOVE(RDX, d);
+ emit_store_dst(jd, iptr, d);
+ dst = VAROP(iptr->dst);
+ if (IS_INMEMORY(dst->flags) || (dst->vv.regoff != RDX))
+ M_MOV(REG_ITMP2, RDX); /* restore RDX */
break;
case ICMD_IDIVPOW2: /* ..., value ==> ..., value >> constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
M_INTMOVE(s1, REG_ITMP1);
- x86_64_alul_imm_reg(cd, X86_64_CMP, -1, REG_ITMP1);
- x86_64_leal_membase_reg(cd, REG_ITMP1, (1 << iptr->val.i) - 1, REG_ITMP2);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_LE, REG_ITMP2, REG_ITMP1);
- x86_64_shiftl_imm_reg(cd, X86_64_SAR, iptr->val.i, REG_ITMP1);
- x86_64_mov_reg_reg(cd, REG_ITMP1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_alul_imm_reg(cd, ALU_CMP, -1, REG_ITMP1);
+ emit_leal_membase_reg(cd, REG_ITMP1, (1 << iptr->sx.val.i) - 1, REG_ITMP2);
+ emit_cmovccl_reg_reg(cd, CC_LE, REG_ITMP2, REG_ITMP1);
+ emit_shiftl_imm_reg(cd, SHIFT_SAR, iptr->sx.val.i, REG_ITMP1);
+ emit_mov_reg_reg(cd, REG_ITMP1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
M_INTMOVE(s1, REG_ITMP1);
- x86_64_alul_imm_reg(cd, X86_64_CMP, -1, REG_ITMP1);
- x86_64_leal_membase_reg(cd, REG_ITMP1, iptr->val.i, REG_ITMP2);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_G, REG_ITMP1, REG_ITMP2);
- x86_64_alul_imm_reg(cd, X86_64_AND, -1 - (iptr->val.i), REG_ITMP2);
- x86_64_alul_reg_reg(cd, X86_64_SUB, REG_ITMP2, REG_ITMP1);
- x86_64_mov_reg_reg(cd, REG_ITMP1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_alul_imm_reg(cd, ALU_CMP, -1, REG_ITMP1);
+ emit_leal_membase_reg(cd, REG_ITMP1, iptr->sx.val.i, REG_ITMP2);
+ emit_cmovccl_reg_reg(cd, CC_G, REG_ITMP1, REG_ITMP2);
+ emit_alul_imm_reg(cd, ALU_AND, -1 - (iptr->sx.val.i), REG_ITMP2);
+ emit_alul_reg_reg(cd, ALU_SUB, REG_ITMP2, REG_ITMP1);
+ emit_mov_reg_reg(cd, REG_ITMP1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (src->prev->flags & INMEMORY) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
-
- } else {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- }
-
- if (src->flags & INMEMORY) {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP3);
-
- } else {
- M_INTMOVE(src->regoff, REG_ITMP3);
- }
- gen_div_check(src);
+ s1 = emit_load_s1(jd, iptr, RAX);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP3);
+ d = codegen_reg_of_dst(jd, iptr, RAX);
- x86_64_mov_imm_reg(cd, 0x8000000000000000LL, REG_ITMP2); /* check as described in jvm spec */
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, REG_ITMP1);
- x86_64_jcc(cd, X86_64_CC_NE, 4 + 6);
- x86_64_alu_imm_reg(cd, X86_64_CMP, -1, REG_ITMP3); /* 4 bytes */
- x86_64_jcc(cd, X86_64_CC_E, 3 + 2 + 3); /* 6 bytes */
+ M_INTMOVE(s1, RAX);
+ M_INTMOVE(s2, REG_ITMP3);
+ emit_arithmetic_check(cd, iptr, REG_ITMP3);
- x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
- x86_64_cqto(cd);
- x86_64_idiv_reg(cd, REG_ITMP3);
+ M_MOV(RDX, REG_ITMP2); /* save RDX (it's an argument register) */
- if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, RAX, REG_SP, iptr->dst->regoff * 8);
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
+ /* check as described in jvm spec */
+ disp = dseg_add_s8(cd, 0x8000000000000000LL);
+ M_LCMP_MEMBASE(RIP, -((cd->mcodeptr + 7) - cd->mcodebase) + disp, RAX);
+ M_BNE(4 + 6);
+ M_LCMP_IMM(-1, REG_ITMP3); /* 4 bytes */
+ M_BEQ(2 + 3); /* 6 bytes */
- } else {
- M_INTMOVE(RAX, iptr->dst->regoff);
+ emit_cqto(cd); /* 2 bytes */
+ emit_idiv_reg(cd, REG_ITMP3); /* 3 bytes */
- if (iptr->dst->regoff != RDX) {
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
- }
- }
+ M_INTMOVE(RAX, d);
+ emit_store_dst(jd, iptr, d);
+ dst = VAROP(iptr->dst);
+ if (IS_INMEMORY(dst->flags) || (dst->vv.regoff != RDX))
+ M_MOV(REG_ITMP2, RDX); /* restore RDX */
break;
case ICMD_LREM: /* ..., val1, val2 ==> ..., val1 % val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- if (src->prev->flags & INMEMORY) {
- x86_64_mov_membase_reg(cd, REG_SP, src->prev->regoff * 8, REG_ITMP1);
-
- } else {
- M_INTMOVE(src->prev->regoff, REG_ITMP1);
- }
-
- if (src->flags & INMEMORY) {
- x86_64_mov_membase_reg(cd, REG_SP, src->regoff * 8, REG_ITMP3);
-
- } else {
- M_INTMOVE(src->regoff, REG_ITMP3);
- }
- gen_div_check(src);
+ s1 = emit_load_s1(jd, iptr, RAX);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP3);
+ d = codegen_reg_of_dst(jd, iptr, RDX);
- x86_64_mov_imm_reg(cd, 0x8000000000000000LL, REG_ITMP2); /* check as described in jvm spec */
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, REG_ITMP1);
- x86_64_jcc(cd, X86_64_CC_NE, 2 + 4 + 6);
- x86_64_alul_reg_reg(cd, X86_64_XOR, RDX, RDX); /* 2 bytes */
- x86_64_alu_imm_reg(cd, X86_64_CMP, -1, REG_ITMP3); /* 4 bytes */
- x86_64_jcc(cd, X86_64_CC_E, 3 + 2 + 3); /* 6 bytes */
+ M_INTMOVE(s1, RAX);
+ M_INTMOVE(s2, REG_ITMP3);
+ emit_arithmetic_check(cd, iptr, REG_ITMP3);
- x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
- x86_64_cqto(cd);
- x86_64_idiv_reg(cd, REG_ITMP3);
+ M_MOV(RDX, REG_ITMP2); /* save RDX (it's an argument register) */
- if (iptr->dst->flags & INMEMORY) {
- x86_64_mov_reg_membase(cd, RDX, REG_SP, iptr->dst->regoff * 8);
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
+ /* check as described in jvm spec */
+ disp = dseg_add_s8(cd, 0x8000000000000000LL);
+ M_LCMP_MEMBASE(RIP, -((cd->mcodeptr + 7) - cd->mcodebase) + disp, REG_ITMP1);
+ M_BNE(3 + 4 + 6);
+ M_LXOR(RDX, RDX); /* 3 bytes */
+ M_LCMP_IMM(-1, REG_ITMP3); /* 4 bytes */
+ M_BEQ(2 + 3); /* 6 bytes */
- } else {
- M_INTMOVE(RDX, iptr->dst->regoff);
+ emit_cqto(cd); /* 2 bytes */
+ emit_idiv_reg(cd, REG_ITMP3); /* 3 bytes */
- if (iptr->dst->regoff != RDX) {
- x86_64_mov_reg_reg(cd, REG_ITMP2, RDX); /* restore %rdx */
- }
- }
+ M_INTMOVE(RDX, d);
+ emit_store_dst(jd, iptr, d);
+ dst = VAROP(iptr->dst);
+ if (IS_INMEMORY(dst->flags) || (dst->vv.regoff != RDX))
+ M_MOV(REG_ITMP2, RDX); /* restore RDX */
break;
case ICMD_LDIVPOW2: /* ..., value ==> ..., value >> constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
M_INTMOVE(s1, REG_ITMP1);
- x86_64_alu_imm_reg(cd, X86_64_CMP, -1, REG_ITMP1);
- x86_64_lea_membase_reg(cd, REG_ITMP1, (1 << iptr->val.i) - 1, REG_ITMP2);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_LE, REG_ITMP2, REG_ITMP1);
- x86_64_shift_imm_reg(cd, X86_64_SAR, iptr->val.i, REG_ITMP1);
- x86_64_mov_reg_reg(cd, REG_ITMP1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_alu_imm_reg(cd, ALU_CMP, -1, REG_ITMP1);
+ emit_lea_membase_reg(cd, REG_ITMP1, (1 << iptr->sx.val.i) - 1, REG_ITMP2);
+ emit_cmovcc_reg_reg(cd, CC_LE, REG_ITMP2, REG_ITMP1);
+ emit_shift_imm_reg(cd, SHIFT_SAR, iptr->sx.val.i, REG_ITMP1);
+ emit_mov_reg_reg(cd, REG_ITMP1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LREMPOW2: /* ..., value ==> ..., value % constant */
- /* val.l = constant */
+ /* sx.val.l = constant */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
M_INTMOVE(s1, REG_ITMP1);
- x86_64_alu_imm_reg(cd, X86_64_CMP, -1, REG_ITMP1);
- x86_64_lea_membase_reg(cd, REG_ITMP1, iptr->val.i, REG_ITMP2);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_G, REG_ITMP1, REG_ITMP2);
- x86_64_alu_imm_reg(cd, X86_64_AND, -1 - (iptr->val.i), REG_ITMP2);
- x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP2, REG_ITMP1);
- x86_64_mov_reg_reg(cd, REG_ITMP1, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_alu_imm_reg(cd, ALU_CMP, -1, REG_ITMP1);
+ emit_lea_membase_reg(cd, REG_ITMP1, iptr->sx.val.i, REG_ITMP2);
+ emit_cmovcc_reg_reg(cd, CC_G, REG_ITMP1, REG_ITMP2);
+ emit_alu_imm_reg(cd, ALU_AND, -1 - (iptr->sx.val.i), REG_ITMP2);
+ emit_alu_reg_reg(cd, ALU_SUB, REG_ITMP2, REG_ITMP1);
+ emit_mov_reg_reg(cd, REG_ITMP1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_ISHL: /* ..., val1, val2 ==> ..., val1 << val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishift(cd, X86_64_SHL, src, iptr);
+ d = codegen_reg_of_dst(jd, iptr, REG_NULL);
+ emit_ishift(jd, SHIFT_SHL, iptr);
break;
case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishiftconst(cd, X86_64_SHL, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_ISLL_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_ISHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishift(cd, X86_64_SAR, src, iptr);
+ d = codegen_reg_of_dst(jd, iptr, REG_NULL);
+ emit_ishift(jd, SHIFT_SAR, iptr);
break;
case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishiftconst(cd, X86_64_SAR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_ISRA_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishift(cd, X86_64_SHR, src, iptr);
+ d = codegen_reg_of_dst(jd, iptr, REG_NULL);
+ emit_ishift(jd, SHIFT_SHR, iptr);
break;
case ICMD_IUSHRCONST: /* ..., value ==> ..., value >>> constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ishiftconst(cd, X86_64_SHR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_ISRL_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LSHL: /* ..., val1, val2 ==> ..., val1 << val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshift(cd, X86_64_SHL, src, iptr);
+ d = codegen_reg_of_dst(jd, iptr, REG_NULL);
+ emit_lshift(jd, SHIFT_SHL, iptr);
break;
case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshiftconst(cd, X86_64_SHL, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_LSLL_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshift(cd, X86_64_SAR, src, iptr);
+ d = codegen_reg_of_dst(jd, iptr, REG_NULL);
+ emit_lshift(jd, SHIFT_SAR, iptr);
break;
case ICMD_LSHRCONST: /* ..., value ==> ..., value >> constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshiftconst(cd, X86_64_SAR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_LSRA_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshift(cd, X86_64_SHR, src, iptr);
+ d = codegen_reg_of_dst(jd, iptr, REG_NULL);
+ emit_lshift(jd, SHIFT_SHR, iptr);
break;
case ICMD_LUSHRCONST: /* ..., value ==> ..., value >>> constant */
- /* val.l = constant */
+ /* sx.val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lshiftconst(cd, X86_64_SHR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ M_INTMOVE(s1, d);
+ M_LSRL_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IAND: /* ..., val1, val2 ==> ..., val1 & val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialu(cd, X86_64_AND, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_IAND(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IAND(s2, d);
+ }
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_AND, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_IAND_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LAND: /* ..., val1, val2 ==> ..., val1 & val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lalu(cd, X86_64_AND, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_LAND(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LAND(s2, d);
+ }
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
- /* val.l = constant */
+ /* sx.val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_AND, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->sx.val.l))
+ M_LAND_IMM(iptr->sx.val.l, d);
+ else {
+ M_MOV_IMM(iptr->sx.val.l, REG_ITMP2);
+ M_LAND(REG_ITMP2, d);
+ }
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IOR: /* ..., val1, val2 ==> ..., val1 | val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialu(cd, X86_64_OR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_IOR(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IOR(s2, d);
+ }
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_OR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_IOR_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LOR: /* ..., val1, val2 ==> ..., val1 | val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lalu(cd, X86_64_OR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_LOR(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LOR(s2, d);
+ }
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
- /* val.l = constant */
+ /* sx.val.l = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_OR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->sx.val.l))
+ M_LOR_IMM(iptr->sx.val.l, d);
+ else {
+ M_MOV_IMM(iptr->sx.val.l, REG_ITMP2);
+ M_LOR(REG_ITMP2, d);
+ }
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialu(cd, X86_64_XOR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_IXOR(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_IXOR(s2, d);
+ }
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
- /* val.i = constant */
+ /* sx.val.i = constant */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_ialuconst(cd, X86_64_XOR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ M_IXOR_IMM(iptr->sx.val.i, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_lalu(cd, X86_64_XOR, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ if (s2 == d)
+ M_LXOR(s1, d);
+ else {
+ M_INTMOVE(s1, d);
+ M_LXOR(s2, d);
+ }
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
- /* val.l = constant */
-
- d = reg_of_var(rd, iptr->dst, REG_NULL);
- x86_64_emit_laluconst(cd, X86_64_XOR, src, iptr);
- break;
-
-
- case ICMD_IINC: /* ..., value ==> ..., value + constant */
- /* op1 = variable, val.i = constant */
-
- /* using inc and dec is definitely faster than add -- tested */
- /* with sieve */
+ /* sx.val.l = constant */
- var = &(rd->locals[iptr->op1][TYPE_INT]);
- d = var->regoff;
- if (var->flags & INMEMORY) {
- if (iptr->val.i == 1) {
- x86_64_incl_membase(cd, REG_SP, d * 8);
-
- } else if (iptr->val.i == -1) {
- x86_64_decl_membase(cd, REG_SP, d * 8);
-
- } else {
- x86_64_alul_imm_membase(cd, X86_64_ADD, iptr->val.i, REG_SP, d * 8);
- }
-
- } else {
- if (iptr->val.i == 1) {
- x86_64_incl_reg(cd, d);
-
- } else if (iptr->val.i == -1) {
- x86_64_decl_reg(cd, d);
-
- } else {
- x86_64_alul_imm_reg(cd, X86_64_ADD, iptr->val.i, d);
- }
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_INTMOVE(s1, d);
+ if (IS_IMM32(iptr->sx.val.l))
+ M_LXOR_IMM(iptr->sx.val.l, d);
+ else {
+ M_MOV_IMM(iptr->sx.val.l, REG_ITMP2);
+ M_LXOR(REG_ITMP2, d);
}
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_FNEG: /* ..., value ==> ..., - value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- a = dseg_adds4(cd, 0x80000000);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ disp = dseg_add_s4(cd, 0x80000000);
M_FLTMOVE(s1, d);
- x86_64_movss_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 9) - (s8) cd->mcodebase) + a, REG_FTMP2);
- x86_64_xorps_reg_reg(cd, REG_FTMP2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ emit_movss_membase_reg(cd, RIP, -((cd->mcodeptr + 9) - cd->mcodebase) + disp, REG_FTMP2);
+ emit_xorps_reg_reg(cd, REG_FTMP2, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_DNEG: /* ..., value ==> ..., - value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- a = dseg_adds8(cd, 0x8000000000000000);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ disp = dseg_add_s8(cd, 0x8000000000000000);
M_FLTMOVE(s1, d);
- x86_64_movd_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 9) - (s8) cd->mcodebase) + a, REG_FTMP2);
- x86_64_xorpd_reg_reg(cd, REG_FTMP2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ emit_movd_membase_reg(cd, RIP, -((cd->mcodeptr + 9) - cd->mcodebase) + disp, REG_FTMP2);
+ emit_xorpd_reg_reg(cd, REG_FTMP2, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_FADD: /* ..., val1, val2 ==> ..., val1 + val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (s1 == d) {
- x86_64_addss_reg_reg(cd, s2, d);
- } else if (s2 == d) {
- x86_64_addss_reg_reg(cd, s1, d);
- } else {
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ if (s2 == d)
+ M_FADD(s1, d);
+ else {
M_FLTMOVE(s1, d);
- x86_64_addss_reg_reg(cd, s2, d);
+ M_FADD(s2, d);
}
- store_reg_to_var_flt(iptr->dst, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_DADD: /* ..., val1, val2 ==> ..., val1 + val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (s1 == d) {
- x86_64_addsd_reg_reg(cd, s2, d);
- } else if (s2 == d) {
- x86_64_addsd_reg_reg(cd, s1, d);
- } else {
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ if (s2 == d)
+ M_DADD(s1, d);
+ else {
M_FLTMOVE(s1, d);
- x86_64_addsd_reg_reg(cd, s2, d);
+ M_DADD(s2, d);
}
- store_reg_to_var_flt(iptr->dst, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_FSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
if (s2 == d) {
M_FLTMOVE(s2, REG_FTMP2);
s2 = REG_FTMP2;
}
M_FLTMOVE(s1, d);
- x86_64_subss_reg_reg(cd, s2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ M_FSUB(s2, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_DSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
if (s2 == d) {
M_FLTMOVE(s2, REG_FTMP2);
s2 = REG_FTMP2;
}
M_FLTMOVE(s1, d);
- x86_64_subsd_reg_reg(cd, s2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ M_DSUB(s2, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_FMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (s1 == d) {
- x86_64_mulss_reg_reg(cd, s2, d);
- } else if (s2 == d) {
- x86_64_mulss_reg_reg(cd, s1, d);
- } else {
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ if (s2 == d)
+ M_FMUL(s1, d);
+ else {
M_FLTMOVE(s1, d);
- x86_64_mulss_reg_reg(cd, s2, d);
+ M_FMUL(s2, d);
}
- store_reg_to_var_flt(iptr->dst, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_DMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (s1 == d) {
- x86_64_mulsd_reg_reg(cd, s2, d);
- } else if (s2 == d) {
- x86_64_mulsd_reg_reg(cd, s1, d);
- } else {
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ if (s2 == d)
+ M_DMUL(s1, d);
+ else {
M_FLTMOVE(s1, d);
- x86_64_mulsd_reg_reg(cd, s2, d);
+ M_DMUL(s2, d);
}
- store_reg_to_var_flt(iptr->dst, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_FDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
if (s2 == d) {
M_FLTMOVE(s2, REG_FTMP2);
s2 = REG_FTMP2;
}
M_FLTMOVE(s1, d);
- x86_64_divss_reg_reg(cd, s2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ M_FDIV(s2, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_DDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
if (s2 == d) {
M_FLTMOVE(s2, REG_FTMP2);
s2 = REG_FTMP2;
}
M_FLTMOVE(s1, d);
- x86_64_divsd_reg_reg(cd, s2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ M_DDIV(s2, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_I2F: /* ..., value ==> ..., (float) value */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_cvtsi2ss_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_CVTIF(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_I2D: /* ..., value ==> ..., (double) value */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_cvtsi2sd_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_CVTID(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_L2F: /* ..., value ==> ..., (float) value */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_cvtsi2ssq_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_CVTLF(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_L2D: /* ..., value ==> ..., (double) value */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_cvtsi2sdq_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_CVTLD(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_F2I: /* ..., value ==> ..., (int) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_cvttss2si_reg_reg(cd, s1, d);
- x86_64_alul_imm_reg(cd, X86_64_CMP, 0x80000000, d); /* corner cases */
- a = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 + ((REG_RESULT == d) ? 0 : 3);
- x86_64_jcc(cd, X86_64_CC_NE, a);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_CVTFI(s1, d);
+ M_ICMP_IMM(0x80000000, d); /* corner cases */
+ disp = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 +
+ ((REG_RESULT == d) ? 0 : 3);
+ M_BNE(disp);
M_FLTMOVE(s1, REG_FTMP1);
- x86_64_mov_imm_reg(cd, (s8) asm_builtin_f2i, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
+ M_MOV_IMM(asm_builtin_f2i, REG_ITMP2);
+ M_CALL(REG_ITMP2);
M_INTMOVE(REG_RESULT, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_D2I: /* ..., value ==> ..., (int) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_cvttsd2si_reg_reg(cd, s1, d);
- x86_64_alul_imm_reg(cd, X86_64_CMP, 0x80000000, d); /* corner cases */
- a = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 + ((REG_RESULT == d) ? 0 : 3);
- x86_64_jcc(cd, X86_64_CC_NE, a);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_CVTDI(s1, d);
+ M_ICMP_IMM(0x80000000, d); /* corner cases */
+ disp = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 +
+ ((REG_RESULT == d) ? 0 : 3);
+ M_BNE(disp);
M_FLTMOVE(s1, REG_FTMP1);
- x86_64_mov_imm_reg(cd, (s8) asm_builtin_d2i, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
+ M_MOV_IMM(asm_builtin_d2i, REG_ITMP2);
+ M_CALL(REG_ITMP2);
M_INTMOVE(REG_RESULT, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_F2L: /* ..., value ==> ..., (long) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_cvttss2siq_reg_reg(cd, s1, d);
- x86_64_mov_imm_reg(cd, 0x8000000000000000, REG_ITMP2);
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, d); /* corner cases */
- a = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 + ((REG_RESULT == d) ? 0 : 3);
- x86_64_jcc(cd, X86_64_CC_NE, a);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_CVTFL(s1, d);
+ M_MOV_IMM(0x8000000000000000, REG_ITMP2);
+ M_LCMP(REG_ITMP2, d); /* corner cases */
+ disp = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 +
+ ((REG_RESULT == d) ? 0 : 3);
+ M_BNE(disp);
M_FLTMOVE(s1, REG_FTMP1);
- x86_64_mov_imm_reg(cd, (s8) asm_builtin_f2l, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
+ M_MOV_IMM(asm_builtin_f2l, REG_ITMP2);
+ M_CALL(REG_ITMP2);
M_INTMOVE(REG_RESULT, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_D2L: /* ..., value ==> ..., (long) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_cvttsd2siq_reg_reg(cd, s1, d);
- x86_64_mov_imm_reg(cd, 0x8000000000000000, REG_ITMP2);
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, d); /* corner cases */
- a = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 + ((REG_RESULT == d) ? 0 : 3);
- x86_64_jcc(cd, X86_64_CC_NE, a);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_CVTDL(s1, d);
+ M_MOV_IMM(0x8000000000000000, REG_ITMP2);
+ M_LCMP(REG_ITMP2, d); /* corner cases */
+ disp = ((s1 == REG_FTMP1) ? 0 : 5) + 10 + 3 +
+ ((REG_RESULT == d) ? 0 : 3);
+ M_BNE(disp);
M_FLTMOVE(s1, REG_FTMP1);
- x86_64_mov_imm_reg(cd, (s8) asm_builtin_d2l, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
+ M_MOV_IMM(asm_builtin_d2l, REG_ITMP2);
+ M_CALL(REG_ITMP2);
M_INTMOVE(REG_RESULT, d);
- store_reg_to_var_int(iptr->dst, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_F2D: /* ..., value ==> ..., (double) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- x86_64_cvtss2sd_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ M_CVTFD(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_D2F: /* ..., value ==> ..., (float) value */
- var_to_reg_flt(s1, src, REG_FTMP1);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- x86_64_cvtsd2ss_reg_reg(cd, s1, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ M_CVTDF(s1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_FCMPL: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
/* == => 0, < => 1, > => -1 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
- x86_64_mov_imm_reg(cd, 1, REG_ITMP1);
- x86_64_mov_imm_reg(cd, -1, REG_ITMP2);
- x86_64_ucomiss_reg_reg(cd, s1, s2);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_B, REG_ITMP1, d);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_A, REG_ITMP2, d);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_P, REG_ITMP2, d); /* treat unordered as GT */
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ M_CLR(d);
+ M_MOV_IMM(1, REG_ITMP1);
+ M_MOV_IMM(-1, REG_ITMP2);
+ emit_ucomiss_reg_reg(cd, s1, s2);
+ M_CMOVULT(REG_ITMP1, d);
+ M_CMOVUGT(REG_ITMP2, d);
+ M_CMOVP(REG_ITMP2, d); /* treat unordered as GT */
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_FCMPG: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
/* == => 0, < => 1, > => -1 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
- x86_64_mov_imm_reg(cd, 1, REG_ITMP1);
- x86_64_mov_imm_reg(cd, -1, REG_ITMP2);
- x86_64_ucomiss_reg_reg(cd, s1, s2);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_B, REG_ITMP1, d);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_A, REG_ITMP2, d);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_P, REG_ITMP1, d); /* treat unordered as LT */
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ M_CLR(d);
+ M_MOV_IMM(1, REG_ITMP1);
+ M_MOV_IMM(-1, REG_ITMP2);
+ emit_ucomiss_reg_reg(cd, s1, s2);
+ M_CMOVULT(REG_ITMP1, d);
+ M_CMOVUGT(REG_ITMP2, d);
+ M_CMOVP(REG_ITMP1, d); /* treat unordered as LT */
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_DCMPL: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
/* == => 0, < => 1, > => -1 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
- x86_64_mov_imm_reg(cd, 1, REG_ITMP1);
- x86_64_mov_imm_reg(cd, -1, REG_ITMP2);
- x86_64_ucomisd_reg_reg(cd, s1, s2);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_B, REG_ITMP1, d);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_A, REG_ITMP2, d);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_P, REG_ITMP2, d); /* treat unordered as GT */
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ M_CLR(d);
+ M_MOV_IMM(1, REG_ITMP1);
+ M_MOV_IMM(-1, REG_ITMP2);
+ emit_ucomisd_reg_reg(cd, s1, s2);
+ M_CMOVULT(REG_ITMP1, d);
+ M_CMOVUGT(REG_ITMP2, d);
+ M_CMOVP(REG_ITMP2, d); /* treat unordered as GT */
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_DCMPG: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
/* == => 0, < => 1, > => -1 */
- var_to_reg_flt(s1, src->prev, REG_FTMP1);
- var_to_reg_flt(s2, src, REG_FTMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
- x86_64_mov_imm_reg(cd, 1, REG_ITMP1);
- x86_64_mov_imm_reg(cd, -1, REG_ITMP2);
- x86_64_ucomisd_reg_reg(cd, s1, s2);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_B, REG_ITMP1, d);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_A, REG_ITMP2, d);
- x86_64_cmovcc_reg_reg(cd, X86_64_CC_P, REG_ITMP1, d); /* treat unordered as LT */
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ s2 = emit_load_s2(jd, iptr, REG_FTMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ M_CLR(d);
+ M_MOV_IMM(1, REG_ITMP1);
+ M_MOV_IMM(-1, REG_ITMP2);
+ emit_ucomisd_reg_reg(cd, s1, s2);
+ M_CMOVULT(REG_ITMP1, d);
+ M_CMOVUGT(REG_ITMP2, d);
+ M_CMOVP(REG_ITMP1, d); /* treat unordered as LT */
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_ARRAYLENGTH: /* ..., arrayref ==> ..., (int) length */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- gen_nullptr_check(s1);
- x86_64_movl_membase_reg(cd, s1, OFFSET(java_arrayheader, size), d);
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ /* implicit null-pointer check */
+ M_ILD(d, s1, OFFSET(java_array_t, size));
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
+ case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_mov_memindex_reg(cd, OFFSET(java_objectarray, data[0]), s1, s2, 3, d);
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movsbq_memindex_reg(cd, OFFSET(java_bytearray_t, data[0]), s1, s2, 0, d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
+ case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_mov_memindex_reg(cd, OFFSET(java_longarray, data[0]), s1, s2, 3, d);
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movzwq_memindex_reg(cd, OFFSET(java_chararray_t, data[0]), s1, s2, 1, d);
+ emit_store_dst(jd, iptr, d);
+ break;
+
+ case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
+
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movswq_memindex_reg(cd, OFFSET(java_shortarray_t, data[0]), s1, s2, 1, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_IALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_movl_memindex_reg(cd, OFFSET(java_intarray, data[0]), s1, s2, 2, d);
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movl_memindex_reg(cd, OFFSET(java_intarray_t, data[0]), s1, s2, 2, d);
+ emit_store_dst(jd, iptr, d);
+ break;
+
+ case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
+
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_mov_memindex_reg(cd, OFFSET(java_longarray_t, data[0]), s1, s2, 3, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_movss_memindex_reg(cd, OFFSET(java_floatarray, data[0]), s1, s2, 2, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movss_memindex_reg(cd, OFFSET(java_floatarray_t, data[0]), s1, s2, 2, d);
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_DALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_FTMP3);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_movsd_memindex_reg(cd, OFFSET(java_doublearray, data[0]), s1, s2, 3, d);
- store_reg_to_var_flt(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movsd_memindex_reg(cd, OFFSET(java_doublearray_t, data[0]), s1, s2, 3, d);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
+ case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_movzwq_memindex_reg(cd, OFFSET(java_chararray, data[0]), s1, s2, 1, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_mov_memindex_reg(cd, OFFSET(java_objectarray_t, data[0]), s1, s2, 3, d);
+ emit_store_dst(jd, iptr, d);
+ break;
- case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_movswq_memindex_reg(cd, OFFSET(java_shortarray, data[0]), s1, s2, 1, d);
- store_reg_to_var_int(iptr->dst, d);
+ case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
+
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ s3 = emit_load_s3(jd, iptr, REG_ITMP3);
+ emit_movb_reg_memindex(cd, s3, OFFSET(java_bytearray_t, data[0]), s1, s2, 0);
break;
- case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
+ case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_movsbq_memindex_reg(cd, OFFSET(java_bytearray, data[0]), s1, s2, 0, d);
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ s3 = emit_load_s3(jd, iptr, REG_ITMP3);
+ emit_movw_reg_memindex(cd, s3, OFFSET(java_chararray_t, data[0]), s1, s2, 1);
break;
+ case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
- case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
-
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_mov_reg_memindex(cd, s3, OFFSET(java_objectarray, data[0]), s1, s2, 3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ s3 = emit_load_s3(jd, iptr, REG_ITMP3);
+ emit_movw_reg_memindex(cd, s3, OFFSET(java_shortarray_t, data[0]), s1, s2, 1);
break;
- case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
+ case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_mov_reg_memindex(cd, s3, OFFSET(java_longarray, data[0]), s1, s2, 3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ s3 = emit_load_s3(jd, iptr, REG_ITMP3);
+ emit_movl_reg_memindex(cd, s3, OFFSET(java_intarray_t, data[0]), s1, s2, 2);
break;
- case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
+ case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_movl_reg_memindex(cd, s3, OFFSET(java_intarray, data[0]), s1, s2, 2);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ s3 = emit_load_s3(jd, iptr, REG_ITMP3);
+ emit_mov_reg_memindex(cd, s3, OFFSET(java_longarray_t, data[0]), s1, s2, 3);
break;
case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- var_to_reg_flt(s3, src, REG_FTMP3);
- x86_64_movss_reg_memindex(cd, s3, OFFSET(java_floatarray, data[0]), s1, s2, 2);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ s3 = emit_load_s3(jd, iptr, REG_FTMP3);
+ emit_movss_reg_memindex(cd, s3, OFFSET(java_floatarray_t, data[0]), s1, s2, 2);
break;
case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- var_to_reg_flt(s3, src, REG_FTMP3);
- x86_64_movsd_reg_memindex(cd, s3, OFFSET(java_doublearray, data[0]), s1, s2, 3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ s3 = emit_load_s3(jd, iptr, REG_FTMP3);
+ emit_movsd_reg_memindex(cd, s3, OFFSET(java_doublearray_t, data[0]), s1, s2, 3);
break;
- case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
+ case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_movw_reg_memindex(cd, s3, OFFSET(java_chararray, data[0]), s1, s2, 1);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ s3 = emit_load_s3(jd, iptr, REG_ITMP3);
+
+ M_MOV(s1, REG_A0);
+ M_MOV(s3, REG_A1);
+ M_MOV_IMM(BUILTIN_canstore, REG_ITMP1);
+ M_CALL(REG_ITMP1);
+ emit_exception_check(cd, iptr);
+
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ s3 = emit_load_s3(jd, iptr, REG_ITMP3);
+ emit_mov_reg_memindex(cd, s3, OFFSET(java_objectarray_t, data[0]), s1, s2, 3);
break;
- case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_movw_reg_memindex(cd, s3, OFFSET(java_shortarray, data[0]), s1, s2, 1);
+ case ICMD_BASTORECONST: /* ..., arrayref, index ==> ... */
+
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movb_imm_memindex(cd, iptr->sx.s23.s3.constval, OFFSET(java_bytearray_t, data[0]), s1, s2, 0);
break;
- case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
+ case ICMD_CASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev->prev, REG_ITMP1);
- var_to_reg_int(s2, src->prev, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- var_to_reg_int(s3, src, REG_ITMP3);
- x86_64_movb_reg_memindex(cd, s3, OFFSET(java_bytearray, data[0]), s1, s2, 0);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movw_imm_memindex(cd, iptr->sx.s23.s3.constval, OFFSET(java_chararray_t, data[0]), s1, s2, 1);
+ break;
+
+ case ICMD_SASTORECONST: /* ..., arrayref, index ==> ... */
+
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movw_imm_memindex(cd, iptr->sx.s23.s3.constval, OFFSET(java_shortarray_t, data[0]), s1, s2, 1);
break;
case ICMD_IASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_movl_imm_memindex(cd, iptr->val.i, OFFSET(java_intarray, data[0]), s1, s2, 2);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_movl_imm_memindex(cd, iptr->sx.s23.s3.constval, OFFSET(java_intarray_t, data[0]), s1, s2, 2);
break;
case ICMD_LASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
-
- if (IS_IMM32(iptr->val.l)) {
- x86_64_mov_imm_memindex(cd, (u4) (iptr->val.l & 0x00000000ffffffff), OFFSET(java_longarray, data[0]), s1, s2, 3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
- } else {
- x86_64_movl_imm_memindex(cd, (u4) (iptr->val.l & 0x00000000ffffffff), OFFSET(java_longarray, data[0]), s1, s2, 3);
- x86_64_movl_imm_memindex(cd, (u4) (iptr->val.l >> 32), OFFSET(java_longarray, data[0]) + 4, s1, s2, 3);
+ if (IS_IMM32(iptr->sx.s23.s3.constval)) {
+ emit_mov_imm_memindex(cd, (u4) (iptr->sx.s23.s3.constval & 0x00000000ffffffff), OFFSET(java_longarray_t, data[0]), s1, s2, 3);
+ }
+ else {
+ emit_movl_imm_memindex(cd, (u4) (iptr->sx.s23.s3.constval & 0x00000000ffffffff), OFFSET(java_longarray_t, data[0]), s1, s2, 3);
+ emit_movl_imm_memindex(cd, (u4) (iptr->sx.s23.s3.constval >> 32), OFFSET(java_longarray_t, data[0]) + 4, s1, s2, 3);
}
break;
case ICMD_AASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
- }
- x86_64_mov_imm_memindex(cd, 0, OFFSET(java_objectarray, data[0]), s1, s2, 3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ /* implicit null-pointer check */
+ emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
+ emit_mov_imm_memindex(cd, 0, OFFSET(java_objectarray_t, data[0]), s1, s2, 3);
break;
- case ICMD_BASTORECONST: /* ..., arrayref, index ==> ... */
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
+ case ICMD_GETSTATIC: /* ... ==> ..., value */
+
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ uf = iptr->sx.s23.s3.uf;
+ fieldtype = uf->fieldref->parseddesc.fd->type;
+ disp = dseg_add_unique_address(cd, NULL);
+ disp = disp + -((cd->mcodeptr + 7) - cd->mcodebase);
+
+ /* must be calculated before codegen_add_patch_ref */
+
+ if (opt_shownops)
+ disp -= PATCHER_CALL_SIZE;
+
+/* PROFILE_CYCLE_STOP; */
+
+ codegen_add_patch_ref(cd, PATCHER_get_putstatic, uf, disp);
+
+/* PROFILE_CYCLE_START; */
}
- x86_64_movb_imm_memindex(cd, iptr->val.i, OFFSET(java_bytearray, data[0]), s1, s2, 0);
- break;
+ else {
+ fi = iptr->sx.s23.s3.fmiref->p.field;
+ fieldtype = fi->type;
+ disp = dseg_add_address(cd, fi->value);
+ disp = disp + -((cd->mcodeptr + 7) - cd->mcodebase);
- case ICMD_CASTORECONST: /* ..., arrayref, index ==> ... */
+ if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->class)) {
+ PROFILE_CYCLE_STOP;
+
+ codegen_add_patch_ref(cd, PATCHER_clinit, fi->class, 0);
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
+ PROFILE_CYCLE_START;
+
+ if (opt_shownops)
+ disp -= PATCHER_CALL_SIZE;
+ }
+ }
+
+ /* This approach is much faster than moving the field
+ address inline into a register. */
+
+ M_ALD(REG_ITMP1, RIP, disp);
+
+ switch (fieldtype) {
+ case TYPE_INT:
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ M_ILD(d, REG_ITMP1, 0);
+ break;
+ case TYPE_LNG:
+ case TYPE_ADR:
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ M_LLD(d, REG_ITMP1, 0);
+ break;
+ case TYPE_FLT:
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_FLD(d, REG_ITMP1, 0);
+ break;
+ case TYPE_DBL:
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_DLD(d, REG_ITMP1, 0);
+ break;
}
- x86_64_movw_imm_memindex(cd, iptr->val.i, OFFSET(java_chararray, data[0]), s1, s2, 1);
+ emit_store_dst(jd, iptr, d);
break;
- case ICMD_SASTORECONST: /* ..., arrayref, index ==> ... */
+ case ICMD_PUTSTATIC: /* ..., value ==> ... */
+
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ uf = iptr->sx.s23.s3.uf;
+ fieldtype = uf->fieldref->parseddesc.fd->type;
+ disp = dseg_add_unique_address(cd, NULL);
+ disp = disp + -((cd->mcodeptr + 7) - cd->mcodebase);
+
+ /* must be calculated before codegen_add_patch_ref */
+
+ if (opt_shownops)
+ disp -= PATCHER_CALL_SIZE;
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- var_to_reg_int(s2, src, REG_ITMP2);
- if (iptr->op1 == 0) {
- gen_nullptr_check(s1);
- gen_bound_check;
+/* PROFILE_CYCLE_STOP; */
+
+ codegen_add_patch_ref(cd, PATCHER_get_putstatic, uf, disp);
+
+/* PROFILE_CYCLE_START; */
}
- x86_64_movw_imm_memindex(cd, iptr->val.i, OFFSET(java_shortarray, data[0]), s1, s2, 1);
- break;
+ else {
+ fi = iptr->sx.s23.s3.fmiref->p.field;
+ fieldtype = fi->type;
+ disp = dseg_add_address(cd, fi->value);
+ disp = disp + -((cd->mcodeptr + 7) - cd->mcodebase);
+ if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->class)) {
+ PROFILE_CYCLE_STOP;
- case ICMD_PUTSTATIC: /* ..., value ==> ... */
- /* op1 = type, val.a = field address */
-
- /* If the static fields' class is not yet initialized, we do it */
- /* now. The call code is generated later. */
- if (!((fieldinfo *) iptr->val.a)->class->initialized) {
- codegen_addclinitref(cd, cd->mcodeptr, ((fieldinfo *) iptr->val.a)->class);
-
- /* This is just for debugging purposes. Is very difficult to */
- /* read patched code. Here we patch the following 5 nop's */
- /* so that the real code keeps untouched. */
- if (showdisassemble) {
- x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd);
- x86_64_nop(cd); x86_64_nop(cd);
+ codegen_add_patch_ref(cd, PATCHER_clinit, fi->class, 0);
+
+ PROFILE_CYCLE_START;
+
+ if (opt_shownops)
+ disp -= PATCHER_CALL_SIZE;
}
}
- /* This approach is much faster than moving the field address */
- /* inline into a register. */
- a = dseg_addaddress(cd, &(((fieldinfo *) iptr->val.a)->value));
- x86_64_mov_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 7) - (s8) cd->mcodebase) + a, REG_ITMP2);
- switch (iptr->op1) {
+ /* This approach is much faster than moving the field
+ address inline into a register. */
+
+ M_ALD(REG_ITMP1, RIP, disp);
+
+ switch (fieldtype) {
case TYPE_INT:
- var_to_reg_int(s2, src, REG_ITMP1);
- x86_64_movl_reg_membase(cd, s2, REG_ITMP2, 0);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP2);
+ M_IST(s1, REG_ITMP1, 0);
break;
case TYPE_LNG:
case TYPE_ADR:
- var_to_reg_int(s2, src, REG_ITMP1);
- x86_64_mov_reg_membase(cd, s2, REG_ITMP2, 0);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP2);
+ M_LST(s1, REG_ITMP1, 0);
break;
case TYPE_FLT:
- var_to_reg_flt(s2, src, REG_FTMP1);
- x86_64_movss_reg_membase(cd, s2, REG_ITMP2, 0);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ M_FST(s1, REG_ITMP1, 0);
break;
case TYPE_DBL:
- var_to_reg_flt(s2, src, REG_FTMP1);
- x86_64_movsd_reg_membase(cd, s2, REG_ITMP2, 0);
+ s1 = emit_load_s1(jd, iptr, REG_FTMP1);
+ M_DST(s1, REG_ITMP1, 0);
break;
}
break;
case ICMD_PUTSTATICCONST: /* ... ==> ... */
/* val = value (in current instruction) */
- /* op1 = type, val.a = field address (in */
/* following NOP) */
- /* If the static fields' class is not yet initialized, we do it */
- /* now. The call code is generated later. */
- if (!((fieldinfo *) iptr[1].val.a)->class->initialized) {
- codegen_addclinitref(cd, cd->mcodeptr, ((fieldinfo *) iptr[1].val.a)->class);
-
- /* This is just for debugging purposes. Is very difficult to */
- /* read patched code. Here we patch the following 5 nop's */
- /* so that the real code keeps untouched. */
- if (showdisassemble) {
- x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd);
- x86_64_nop(cd); x86_64_nop(cd);
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ uf = iptr->sx.s23.s3.uf;
+ fieldtype = uf->fieldref->parseddesc.fd->type;
+ disp = dseg_add_unique_address(cd, NULL);
+ disp = disp + -((cd->mcodeptr + 7) - cd->mcodebase);
+
+ /* must be calculated before codegen_add_patch_ref */
+
+ if (opt_shownops)
+ disp -= PATCHER_CALL_SIZE;
+
+/* PROFILE_CYCLE_STOP; */
+
+ codegen_add_patch_ref(cd, PATCHER_get_putstatic, uf, disp);
+
+/* PROFILE_CYCLE_START; */
+ }
+ else {
+ fi = iptr->sx.s23.s3.fmiref->p.field;
+ fieldtype = fi->type;
+ disp = dseg_add_address(cd, fi->value);
+ disp = disp + -((cd->mcodeptr + 7) - cd->mcodebase);
+
+ if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->class)) {
+ PROFILE_CYCLE_STOP;
+
+ codegen_add_patch_ref(cd, PATCHER_clinit, fi->class, 0);
+
+ PROFILE_CYCLE_START;
+
+ if (opt_shownops)
+ disp -= PATCHER_CALL_SIZE;
}
}
- /* This approach is much faster than moving the field address */
- /* inline into a register. */
- a = dseg_addaddress(cd, &(((fieldinfo *) iptr[1].val.a)->value));
- x86_64_mov_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + a, REG_ITMP1);
- switch (iptr->op1) {
+ /* This approach is much faster than moving the field
+ address inline into a register. */
+
+ M_ALD(REG_ITMP1, RIP, disp);
+
+ switch (fieldtype) {
case TYPE_INT:
case TYPE_FLT:
- x86_64_movl_imm_membase(cd, iptr->val.i, REG_ITMP1, 0);
+ M_IST_IMM(iptr->sx.s23.s2.constval, REG_ITMP1, 0);
break;
case TYPE_LNG:
case TYPE_ADR:
case TYPE_DBL:
- if (IS_IMM32(iptr->val.l)) {
- x86_64_mov_imm_membase(cd, iptr->val.l, REG_ITMP1, 0);
- } else {
- x86_64_movl_imm_membase(cd, iptr->val.l, REG_ITMP1, 0);
- x86_64_movl_imm_membase(cd, iptr->val.l >> 32, REG_ITMP1, 4);
+ if (IS_IMM32(iptr->sx.s23.s2.constval))
+ M_LST_IMM32(iptr->sx.s23.s2.constval, REG_ITMP1, 0);
+ else {
+ M_IST_IMM(iptr->sx.s23.s2.constval, REG_ITMP1, 0);
+ M_IST_IMM(iptr->sx.s23.s2.constval >> 32, REG_ITMP1, 4);
}
break;
}
break;
- case ICMD_GETSTATIC: /* ... ==> ..., value */
- /* op1 = type, val.a = field address */
-
- /* If the static fields' class is not yet initialized, we do it */
- /* now. The call code is generated later. */
- if (!((fieldinfo *) iptr->val.a)->class->initialized) {
- codegen_addclinitref(cd, cd->mcodeptr, ((fieldinfo *) iptr->val.a)->class);
-
- /* This is just for debugging purposes. Is very difficult to */
- /* read patched code. Here we patch the following 5 nop's */
- /* so that the real code keeps untouched. */
- if (showdisassemble) {
- x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd);
- x86_64_nop(cd); x86_64_nop(cd);
- }
- }
+ case ICMD_GETFIELD: /* ... ==> ..., value */
+
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ uf = iptr->sx.s23.s3.uf;
+ fieldtype = uf->fieldref->parseddesc.fd->type;
+ disp = 0;
+
+/* PROFILE_CYCLE_STOP; */
+
+ codegen_add_patch_ref(cd, PATCHER_get_putfield, uf, 0);
- /* This approach is much faster than moving the field address */
- /* inline into a register. */
- a = dseg_addaddress(cd, &(((fieldinfo *) iptr->val.a)->value));
- x86_64_mov_membase_reg(cd, RIP, -(((s8) cd->mcodeptr + 7) - (s8) cd->mcodebase) + a, REG_ITMP2);
- switch (iptr->op1) {
+/* PROFILE_CYCLE_START; */
+ }
+ else {
+ fi = iptr->sx.s23.s3.fmiref->p.field;
+ fieldtype = fi->type;
+ disp = fi->offset;
+ }
+
+ /* implicit null-pointer check */
+ switch (fieldtype) {
case TYPE_INT:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movl_membase_reg(cd, REG_ITMP2, 0, d);
- store_reg_to_var_int(iptr->dst, d);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_ILD32(d, s1, disp);
break;
case TYPE_LNG:
case TYPE_ADR:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_mov_membase_reg(cd, REG_ITMP2, 0, d);
- store_reg_to_var_int(iptr->dst, d);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
+ M_LLD32(d, s1, disp);
break;
case TYPE_FLT:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movss_membase_reg(cd, REG_ITMP2, 0, d);
- store_reg_to_var_flt(iptr->dst, d);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_FLD32(d, s1, disp);
break;
case TYPE_DBL:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movsd_membase_reg(cd, REG_ITMP2, 0, d);
- store_reg_to_var_flt(iptr->dst, d);
+ d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
+ M_DLD32(d, s1, disp);
break;
}
+ emit_store_dst(jd, iptr, d);
break;
case ICMD_PUTFIELD: /* ..., objectref, value ==> ... */
- /* op1 = type, val.i = field offset */
- a = ((fieldinfo *)(iptr->val.a))->offset;
- var_to_reg_int(s1, src->prev, REG_ITMP1);
- gen_nullptr_check(s1);
- switch (iptr->op1) {
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_IFTMP); /* REG_IFTMP == REG_ITMP2 */
+
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ uf = iptr->sx.s23.s3.uf;
+ fieldtype = uf->fieldref->parseddesc.fd->type;
+ disp = 0;
+
+/* PROFILE_CYCLE_STOP; */
+
+ codegen_add_patch_ref(cd, PATCHER_get_putfield, uf, 0);
+
+/* PROFILE_CYCLE_START; */
+ }
+ else {
+ fi = iptr->sx.s23.s3.fmiref->p.field;
+ fieldtype = fi->type;
+ disp = fi->offset;
+ }
+
+ /* implicit null-pointer check */
+ switch (fieldtype) {
case TYPE_INT:
- var_to_reg_int(s2, src, REG_ITMP2);
- x86_64_movl_reg_membase(cd, s2, s1, a);
+ M_IST32(s2, s1, disp);
break;
case TYPE_LNG:
case TYPE_ADR:
- var_to_reg_int(s2, src, REG_ITMP2);
- x86_64_mov_reg_membase(cd, s2, s1, a);
+ M_LST32(s2, s1, disp);
break;
case TYPE_FLT:
- var_to_reg_flt(s2, src, REG_FTMP2);
- x86_64_movss_reg_membase(cd, s2, s1, a);
+ M_FST32(s2, s1, disp);
break;
case TYPE_DBL:
- var_to_reg_flt(s2, src, REG_FTMP2);
- x86_64_movsd_reg_membase(cd, s2, s1, a);
+ M_DST32(s2, s1, disp);
break;
}
break;
case ICMD_PUTFIELDCONST: /* ..., objectref, value ==> ... */
/* val = value (in current instruction) */
- /* op1 = type, val.a = field address (in */
/* following NOP) */
- a = ((fieldinfo *) iptr[1].val.a)->offset;
- var_to_reg_int(s1, src, REG_ITMP1);
- gen_nullptr_check(s1);
- switch (iptr->op1) {
- case TYPE_INT:
- case TYPE_FLT:
- x86_64_movl_imm_membase(cd, iptr->val.i, s1, a);
- break;
- case TYPE_LNG:
- case TYPE_ADR:
- case TYPE_DBL:
- if (IS_IMM32(iptr->val.l)) {
- x86_64_mov_imm_membase(cd, iptr->val.l, s1, a);
- } else {
- x86_64_movl_imm_membase(cd, iptr->val.l, s1, a);
- x86_64_movl_imm_membase(cd, iptr->val.l >> 32, s1, a + 4);
- }
- break;
- }
- break;
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
- case ICMD_GETFIELD: /* ... ==> ..., value */
- /* op1 = type, val.i = field offset */
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ uf = iptr->sx.s23.s3.uf;
+ fieldtype = uf->fieldref->parseddesc.fd->type;
+ disp = 0;
+
+/* PROFILE_CYCLE_STOP; */
+
+ codegen_add_patch_ref(cd, PATCHER_putfieldconst, uf, 0);
+
+/* PROFILE_CYCLE_START; */
+ }
+ else {
+ fi = iptr->sx.s23.s3.fmiref->p.field;
+ fieldtype = fi->type;
+ disp = fi->offset;
+ }
- a = ((fieldinfo *)(iptr->val.a))->offset;
- var_to_reg_int(s1, src, REG_ITMP1);
- gen_nullptr_check(s1);
- switch (iptr->op1) {
+ /* implicit null-pointer check */
+ switch (fieldtype) {
case TYPE_INT:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_movl_membase_reg(cd, s1, a, d);
- store_reg_to_var_int(iptr->dst, d);
+ case TYPE_FLT:
+ M_IST32_IMM(iptr->sx.s23.s2.constval, s1, disp);
break;
case TYPE_LNG:
case TYPE_ADR:
- d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- x86_64_mov_membase_reg(cd, s1, a, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
- case TYPE_FLT:
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_movss_membase_reg(cd, s1, a, d);
- store_reg_to_var_flt(iptr->dst, d);
- break;
- case TYPE_DBL:
- d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- x86_64_movsd_membase_reg(cd, s1, a, d);
- store_reg_to_var_flt(iptr->dst, d);
+ case TYPE_DBL:
+ /* XXX why no check for IS_IMM32? */
+ M_IST32_IMM(iptr->sx.s23.s2.constval, s1, disp);
+ M_IST32_IMM(iptr->sx.s23.s2.constval >> 32, s1, disp + 4);
break;
}
break;
case ICMD_ATHROW: /* ..., objectref ==> ... (, objectref) */
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
M_INTMOVE(s1, REG_ITMP1_XPTR);
- x86_64_call_imm(cd, 0); /* passing exception pointer */
- x86_64_pop_reg(cd, REG_ITMP2_XPC);
+ PROFILE_CYCLE_STOP;
+
+#ifdef ENABLE_VERIFIER
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ uc = iptr->sx.s23.s2.uc;
+
+ codegen_add_patch_ref(cd, PATCHER_athrow_areturn, uc, 0);
+ }
+#endif /* ENABLE_VERIFIER */
+
+ M_CALL_IMM(0); /* passing exception pc */
+ M_POP(REG_ITMP2_XPC);
- x86_64_mov_imm_reg(cd, (s8) asm_handle_exception, REG_ITMP3);
- x86_64_jmp_reg(cd, REG_ITMP3);
+ M_MOV_IMM(asm_handle_exception, REG_ITMP3);
+ M_JMP(REG_ITMP3);
break;
case ICMD_GOTO: /* ... ==> ... */
- /* op1 = target JavaVM pc */
+ case ICMD_RET:
- x86_64_jmp_imm(cd, 0);
- codegen_addreference(cd, BlockPtrOfPC(iptr->op1), cd->mcodeptr);
+ emit_br(cd, iptr->dst.block);
+ ALIGNCODENOP;
break;
case ICMD_JSR: /* ... ==> ... */
- /* op1 = target JavaVM pc */
- x86_64_call_imm(cd, 0);
- codegen_addreference(cd, BlockPtrOfPC(iptr->op1), cd->mcodeptr);
+ emit_br(cd, iptr->sx.s23.s3.jsrtarget.block);
+ ALIGNCODENOP;
break;
- case ICMD_RET: /* ... ==> ... */
- /* op1 = local variable */
-
- var = &(rd->locals[iptr->op1][TYPE_ADR]);
- var_to_reg_int(s1, var, REG_ITMP1);
- x86_64_jmp_reg(cd, s1);
- break;
-
case ICMD_IFNULL: /* ..., value ==> ... */
- /* op1 = target JavaVM pc */
-
- if (src->flags & INMEMORY) {
- x86_64_alu_imm_membase(cd, X86_64_CMP, 0, REG_SP, src->regoff * 8);
-
- } else {
- x86_64_test_reg_reg(cd, src->regoff, src->regoff);
- }
- x86_64_jcc(cd, X86_64_CC_E, 0);
- codegen_addreference(cd, BlockPtrOfPC(iptr->op1), cd->mcodeptr);
- break;
-
- case ICMD_IFNONNULL: /* ..., value ==> ... */
- /* op1 = target JavaVM pc */
-
- if (src->flags & INMEMORY) {
- x86_64_alu_imm_membase(cd, X86_64_CMP, 0, REG_SP, src->regoff * 8);
+ case ICMD_IFNONNULL:
- } else {
- x86_64_test_reg_reg(cd, src->regoff, src->regoff);
- }
- x86_64_jcc(cd, X86_64_CC_NE, 0);
- codegen_addreference(cd, BlockPtrOfPC(iptr->op1), cd->mcodeptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ M_TEST(s1);
+ emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, BRANCH_OPT_NONE);
break;
case ICMD_IFEQ: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.i = constant */
-
- x86_64_emit_ifcc(cd, X86_64_CC_E, src, iptr);
- break;
+ case ICMD_IFLT:
+ case ICMD_IFLE:
+ case ICMD_IFNE:
+ case ICMD_IFGT:
+ case ICMD_IFGE:
- case ICMD_IFLT: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.i = constant */
-
- x86_64_emit_ifcc(cd, X86_64_CC_L, src, iptr);
- break;
-
- case ICMD_IFLE: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.i = constant */
-
- x86_64_emit_ifcc(cd, X86_64_CC_LE, src, iptr);
- break;
-
- case ICMD_IFNE: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.i = constant */
-
- x86_64_emit_ifcc(cd, X86_64_CC_NE, src, iptr);
- break;
-
- case ICMD_IFGT: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.i = constant */
-
- x86_64_emit_ifcc(cd, X86_64_CC_G, src, iptr);
- break;
-
- case ICMD_IFGE: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.i = constant */
-
- x86_64_emit_ifcc(cd, X86_64_CC_GE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ M_ICMP_IMM(iptr->sx.val.i, s1);
+ emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, BRANCH_OPT_NONE);
break;
case ICMD_IF_LEQ: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.l = constant */
-
- x86_64_emit_if_lcc(cd, X86_64_CC_E, src, iptr);
- break;
-
- case ICMD_IF_LLT: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.l = constant */
+ case ICMD_IF_LNE:
+ case ICMD_IF_LLT:
+ case ICMD_IF_LGE:
+ case ICMD_IF_LGT:
+ case ICMD_IF_LLE:
- x86_64_emit_if_lcc(cd, X86_64_CC_L, src, iptr);
- break;
-
- case ICMD_IF_LLE: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.l = constant */
-
- x86_64_emit_if_lcc(cd, X86_64_CC_LE, src, iptr);
- break;
-
- case ICMD_IF_LNE: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.l = constant */
-
- x86_64_emit_if_lcc(cd, X86_64_CC_NE, src, iptr);
- break;
-
- case ICMD_IF_LGT: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.l = constant */
-
- x86_64_emit_if_lcc(cd, X86_64_CC_G, src, iptr);
- break;
-
- case ICMD_IF_LGE: /* ..., value ==> ... */
- /* op1 = target JavaVM pc, val.l = constant */
-
- x86_64_emit_if_lcc(cd, X86_64_CC_GE, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ if (IS_IMM32(iptr->sx.val.l))
+ M_LCMP_IMM(iptr->sx.val.l, s1);
+ else {
+ M_MOV_IMM(iptr->sx.val.l, REG_ITMP2);
+ M_LCMP(REG_ITMP2, s1);
+ }
+ emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_LEQ, BRANCH_OPT_NONE);
break;
case ICMD_IF_ICMPEQ: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
-
- x86_64_emit_if_icmpcc(cd, X86_64_CC_E, src, iptr);
- break;
-
- case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
- case ICMD_IF_ACMPEQ: /* op1 = target JavaVM pc */
-
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_E, src, iptr);
- break;
-
- case ICMD_IF_ICMPNE: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
-
- x86_64_emit_if_icmpcc(cd, X86_64_CC_NE, src, iptr);
- break;
-
- case ICMD_IF_LCMPNE: /* ..., value, value ==> ... */
- case ICMD_IF_ACMPNE: /* op1 = target JavaVM pc */
-
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_NE, src, iptr);
- break;
-
- case ICMD_IF_ICMPLT: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
-
- x86_64_emit_if_icmpcc(cd, X86_64_CC_L, src, iptr);
- break;
-
- case ICMD_IF_LCMPLT: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
-
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_L, src, iptr);
- break;
-
- case ICMD_IF_ICMPGT: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
+ case ICMD_IF_ICMPNE:
+ case ICMD_IF_ICMPLT:
+ case ICMD_IF_ICMPGE:
+ case ICMD_IF_ICMPGT:
+ case ICMD_IF_ICMPLE:
- x86_64_emit_if_icmpcc(cd, X86_64_CC_G, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ M_ICMP(s2, s1);
+ emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ICMPEQ, BRANCH_OPT_NONE);
break;
- case ICMD_IF_LCMPGT: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
+ case ICMD_IF_ACMPEQ: /* ..., value, value ==> ... */
+ case ICMD_IF_ACMPNE:
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_G, src, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ M_LCMP(s2, s1);
+ emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ACMPEQ, BRANCH_OPT_NONE);
break;
- case ICMD_IF_ICMPLE: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
-
- x86_64_emit_if_icmpcc(cd, X86_64_CC_LE, src, iptr);
- break;
-
- case ICMD_IF_LCMPLE: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
-
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_LE, src, iptr);
- break;
-
- case ICMD_IF_ICMPGE: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
-
- x86_64_emit_if_icmpcc(cd, X86_64_CC_GE, src, iptr);
- break;
-
- case ICMD_IF_LCMPGE: /* ..., value, value ==> ... */
- /* op1 = target JavaVM pc */
-
- x86_64_emit_if_lcmpcc(cd, X86_64_CC_GE, src, iptr);
- break;
-
- /* (value xx 0) ? IFxx_ICONST : ELSE_ICONST */
-
- case ICMD_ELSE_ICONST: /* handled by IFxx_ICONST */
- break;
-
- case ICMD_IFEQ_ICONST: /* ..., value ==> ..., constant */
- /* val.i = constant */
-
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr[1].opc == ICMD_ELSE_ICONST) {
- if (s1 == d) {
- M_INTMOVE(s1, REG_ITMP1);
- s1 = REG_ITMP1;
- }
- x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
- }
- x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
- x86_64_testl_reg_reg(cd, s1, s1);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_E, REG_ITMP2, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
-
- case ICMD_IFNE_ICONST: /* ..., value ==> ..., constant */
- /* val.i = constant */
+ case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
+ case ICMD_IF_LCMPNE:
+ case ICMD_IF_LCMPLT:
+ case ICMD_IF_LCMPGE:
+ case ICMD_IF_LCMPGT:
+ case ICMD_IF_LCMPLE:
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr[1].opc == ICMD_ELSE_ICONST) {
- if (s1 == d) {
- M_INTMOVE(s1, REG_ITMP1);
- s1 = REG_ITMP1;
- }
- x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
- }
- x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
- x86_64_testl_reg_reg(cd, s1, s1);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_NE, REG_ITMP2, d);
- store_reg_to_var_int(iptr->dst, d);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ s2 = emit_load_s2(jd, iptr, REG_ITMP2);
+ M_LCMP(s2, s1);
+ emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_LCMPEQ, BRANCH_OPT_NONE);
break;
- case ICMD_IFLT_ICONST: /* ..., value ==> ..., constant */
- /* val.i = constant */
+ case ICMD_IRETURN: /* ..., retvalue ==> ... */
+ case ICMD_LRETURN:
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr[1].opc == ICMD_ELSE_ICONST) {
- if (s1 == d) {
- M_INTMOVE(s1, REG_ITMP1);
- s1 = REG_ITMP1;
- }
- x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
- }
- x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
- x86_64_testl_reg_reg(cd, s1, s1);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_L, REG_ITMP2, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
+ REPLACEMENT_POINT_RETURN(cd, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_RESULT);
+ M_INTMOVE(s1, REG_RESULT);
+ goto nowperformreturn;
- case ICMD_IFGE_ICONST: /* ..., value ==> ..., constant */
- /* val.i = constant */
+ case ICMD_ARETURN: /* ..., retvalue ==> ... */
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr[1].opc == ICMD_ELSE_ICONST) {
- if (s1 == d) {
- M_INTMOVE(s1, REG_ITMP1);
- s1 = REG_ITMP1;
- }
- x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
- }
- x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
- x86_64_testl_reg_reg(cd, s1, s1);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_GE, REG_ITMP2, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
+ REPLACEMENT_POINT_RETURN(cd, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_RESULT);
+ M_INTMOVE(s1, REG_RESULT);
- case ICMD_IFGT_ICONST: /* ..., value ==> ..., constant */
- /* val.i = constant */
+#ifdef ENABLE_VERIFIER
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ uc = iptr->sx.s23.s2.uc;
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr[1].opc == ICMD_ELSE_ICONST) {
- if (s1 == d) {
- M_INTMOVE(s1, REG_ITMP1);
- s1 = REG_ITMP1;
- }
- x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
- }
- x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
- x86_64_testl_reg_reg(cd, s1, s1);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_G, REG_ITMP2, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
+ PROFILE_CYCLE_STOP;
- case ICMD_IFLE_ICONST: /* ..., value ==> ..., constant */
- /* val.i = constant */
+ codegen_add_patch_ref(cd, PATCHER_athrow_areturn, uc, 0);
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (iptr[1].opc == ICMD_ELSE_ICONST) {
- if (s1 == d) {
- M_INTMOVE(s1, REG_ITMP1);
- s1 = REG_ITMP1;
- }
- x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
+ PROFILE_CYCLE_START;
}
- x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
- x86_64_testl_reg_reg(cd, s1, s1);
- x86_64_cmovccl_reg_reg(cd, X86_64_CC_LE, REG_ITMP2, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
-
-
- case ICMD_IRETURN: /* ..., retvalue ==> ... */
- case ICMD_LRETURN:
- case ICMD_ARETURN:
-
- var_to_reg_int(s1, src, REG_RESULT);
- M_INTMOVE(s1, REG_RESULT);
-
+#endif /* ENABLE_VERIFIER */
goto nowperformreturn;
case ICMD_FRETURN: /* ..., retvalue ==> ... */
case ICMD_DRETURN:
- var_to_reg_flt(s1, src, REG_FRESULT);
+ REPLACEMENT_POINT_RETURN(cd, iptr);
+ s1 = emit_load_s1(jd, iptr, REG_FRESULT);
M_FLTMOVE(s1, REG_FRESULT);
-
goto nowperformreturn;
case ICMD_RETURN: /* ... ==> ... */
+ REPLACEMENT_POINT_RETURN(cd, iptr);
+
nowperformreturn:
{
s4 i, p;
-
- p = parentargs_base;
-
- /* call trace function */
- if (runverbose) {
- x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
-
- x86_64_mov_reg_membase(cd, REG_RESULT, REG_SP, 0 * 8);
- x86_64_movq_reg_membase(cd, REG_FRESULT, REG_SP, 1 * 8);
-
- x86_64_mov_imm_reg(cd, (u8) m, rd->argintregs[0]);
- x86_64_mov_reg_reg(cd, REG_RESULT, rd->argintregs[1]);
- M_FLTMOVE(REG_FRESULT, rd->argfltregs[0]);
- M_FLTMOVE(REG_FRESULT, rd->argfltregs[1]);
- x86_64_mov_imm_reg(cd, (u8) builtin_displaymethodstop, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
+ p = cd->stackframesize;
- x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_RESULT);
- x86_64_movq_membase_reg(cd, REG_SP, 1 * 8, REG_FRESULT);
+#if !defined(NDEBUG)
+ if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
+ emit_verbosecall_exit(jd);
+#endif /* !defined(NDEBUG) */
- x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
- }
-
-#if defined(USE_THREADS)
+#if defined(ENABLE_THREADS)
if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
- x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, rd->argintregs[0]);
+ M_ALD(REG_A0, REG_SP, rd->memuse * 8);
/* we need to save the proper return value */
switch (iptr->opc) {
case ICMD_IRETURN:
case ICMD_ARETURN:
case ICMD_LRETURN:
- x86_64_mov_reg_membase(cd, REG_RESULT, REG_SP, rd->maxmemuse * 8);
+ M_LST(REG_RESULT, REG_SP, rd->memuse * 8);
break;
case ICMD_FRETURN:
case ICMD_DRETURN:
- x86_64_movq_reg_membase(cd, REG_FRESULT, REG_SP, rd->maxmemuse * 8);
+ M_DST(REG_FRESULT, REG_SP, rd->memuse * 8);
break;
}
- x86_64_mov_imm_reg(cd, (u8) builtin_monitorexit, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
+ M_MOV_IMM(LOCK_monitor_exit, REG_ITMP1);
+ M_CALL(REG_ITMP1);
/* and now restore the proper return value */
switch (iptr->opc) {
case ICMD_IRETURN:
case ICMD_ARETURN:
case ICMD_LRETURN:
- x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, REG_RESULT);
+ M_LLD(REG_RESULT, REG_SP, rd->memuse * 8);
break;
case ICMD_FRETURN:
case ICMD_DRETURN:
- x86_64_movq_membase_reg(cd, REG_SP, rd->maxmemuse * 8, REG_FRESULT);
+ M_DLD(REG_FRESULT, REG_SP, rd->memuse * 8);
break;
}
}
#endif
- /* restore saved registers */
- for (i = rd->savintregcnt - 1; i >= rd->maxsavintreguse; i--) {
- p--; x86_64_mov_membase_reg(cd, REG_SP, p * 8, rd->savintregs[i]);
+ /* restore saved registers */
+
+ for (i = INT_SAV_CNT - 1; i >= rd->savintreguse; i--) {
+ p--; M_LLD(rd->savintregs[i], REG_SP, p * 8);
}
- for (i = rd->savfltregcnt - 1; i >= rd->maxsavfltreguse; i--) {
- p--; x86_64_movq_membase_reg(cd, REG_SP, p * 8, rd->savfltregs[i]);
+ for (i = FLT_SAV_CNT - 1; i >= rd->savfltreguse; i--) {
+ p--; M_DLD(rd->savfltregs[i], REG_SP, p * 8);
}
- /* deallocate stack */
- if (parentargs_base) {
- x86_64_alu_imm_reg(cd, X86_64_ADD, parentargs_base * 8, REG_SP);
- }
+ /* deallocate stack */
- x86_64_ret(cd);
+ if (cd->stackframesize)
+ M_AADD_IMM(cd->stackframesize * 8, REG_SP);
+
+ /* generate method profiling code */
+
+ PROFILE_CYCLE_STOP;
+
+ M_RET;
}
break;
case ICMD_TABLESWITCH: /* ..., index ==> ... */
{
- s4 i, l, *s4ptr;
- void **tptr;
+ s4 i, l;
+ branch_target_t *table;
- tptr = (void **) iptr->target;
+ table = iptr->dst.table;
- s4ptr = iptr->val.a;
- l = s4ptr[1]; /* low */
- i = s4ptr[2]; /* high */
+ l = iptr->sx.s23.s2.tablelow;
+ i = iptr->sx.s23.s3.tablehigh;
- var_to_reg_int(s1, src, REG_ITMP1);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
M_INTMOVE(s1, REG_ITMP1);
- if (l != 0) {
- x86_64_alul_imm_reg(cd, X86_64_SUB, l, REG_ITMP1);
- }
+
+ if (l != 0)
+ M_ISUB_IMM(l, REG_ITMP1);
+
+ /* number of targets */
i = i - l + 1;
/* range check */
- x86_64_alul_imm_reg(cd, X86_64_CMP, i - 1, REG_ITMP1);
- x86_64_jcc(cd, X86_64_CC_A, 0);
- /* codegen_addreference(cd, BlockPtrOfPC(s4ptr[0]), cd->mcodeptr); */
- codegen_addreference(cd, (basicblock *) tptr[0], cd->mcodeptr);
+ M_ICMP_IMM(i - 1, REG_ITMP1);
+ emit_bugt(cd, table[0].block);
/* build jump table top down and use address of lowest entry */
- /* s4ptr += 3 + i; */
- tptr += i;
+ table += i;
while (--i >= 0) {
- /* dseg_addtarget(cd, BlockPtrOfPC(*--s4ptr)); */
- dseg_addtarget(cd, (basicblock *) tptr[0]);
- --tptr;
+ dseg_add_target(cd, table->block);
+ --table;
}
- /* length of dataseg after last dseg_addtarget is used by load */
+ /* length of dataseg after last dseg_add_target is used
+ by load */
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2);
- dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_memindex_reg(cd, -(cd->dseglen), REG_ITMP2, REG_ITMP1, 3, REG_ITMP1);
- x86_64_jmp_reg(cd, REG_ITMP1);
+ M_MOV_IMM(0, REG_ITMP2);
+ dseg_adddata(cd);
+ emit_mov_memindex_reg(cd, -(cd->dseglen), REG_ITMP2, REG_ITMP1, 3, REG_ITMP1);
+ M_JMP(REG_ITMP1);
}
break;
case ICMD_LOOKUPSWITCH: /* ..., key ==> ... */
{
- s4 i, l, val, *s4ptr;
- void **tptr;
+ s4 i;
+ lookup_target_t *lookup;
- tptr = (void **) iptr->target;
+ lookup = iptr->dst.lookup;
- s4ptr = iptr->val.a;
- l = s4ptr[0]; /* default */
- i = s4ptr[1]; /* count */
+ i = iptr->sx.s23.s2.lookupcount;
- MCODECHECK((i<<2)+8);
- var_to_reg_int(s1, src, REG_ITMP1); /* reg compare should always be faster */
+ MCODECHECK(8 + ((7 + 6) * i) + 5);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+
while (--i >= 0) {
- s4ptr += 2;
- ++tptr;
-
- val = s4ptr[0];
- x86_64_alul_imm_reg(cd, X86_64_CMP, val, s1);
- x86_64_jcc(cd, X86_64_CC_E, 0);
- /* codegen_addreference(cd, BlockPtrOfPC(s4ptr[1]), cd->mcodeptr); */
- codegen_addreference(cd, (basicblock *) tptr[0], cd->mcodeptr);
+ M_ICMP_IMM(lookup->value, s1);
+ emit_beq(cd, lookup->target.block);
+ lookup++;
}
- x86_64_jmp_imm(cd, 0);
- /* codegen_addreference(cd, BlockPtrOfPC(l), cd->mcodeptr); */
-
- tptr = (void **) iptr->target;
- codegen_addreference(cd, (basicblock *) tptr[0], cd->mcodeptr);
+ emit_br(cd, iptr->sx.s23.s3.lookupdefault.block);
+ ALIGNCODENOP;
}
break;
- case ICMD_BUILTIN3: /* ..., arg1, arg2, arg3 ==> ... */
- /* op1 = return type, val.a = function pointer*/
- s3 = 3;
- goto gen_method;
-
- case ICMD_BUILTIN2: /* ..., arg1, arg2 ==> ... */
- /* op1 = return type, val.a = function pointer*/
- s3 = 2;
- goto gen_method;
+ case ICMD_BUILTIN: /* ..., [arg1, [arg2 ...]] ==> ... */
- case ICMD_BUILTIN1: /* ..., arg1 ==> ... */
- /* op1 = return type, val.a = function pointer*/
- s3 = 1;
+ bte = iptr->sx.s23.s3.bte;
+ md = bte->md;
goto gen_method;
case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ... */
- /* op1 = arg count, val.a = method pointer */
case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
case ICMD_INVOKEINTERFACE:
- s3 = iptr->op1;
-
-gen_method: {
- methodinfo *lm;
- classinfo *ci;
- stackptr tmpsrc;
- s4 iarg, farg;
+ REPLACEMENT_POINT_INVOKE(cd, iptr);
- MCODECHECK((s3 << 1) + 64);
-
- tmpsrc = src;
- s2 = s3;
- iarg = 0;
- farg = 0;
-
- /* copy arguments to registers or stack location ******************/
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ lm = NULL;
+ um = iptr->sx.s23.s3.um;
+ md = um->methodref->parseddesc.md;
+ }
+ else {
+ lm = iptr->sx.s23.s3.fmiref->p.method;
+ um = NULL;
+ md = lm->parseddesc;
+ }
- /* count integer and float arguments */
+gen_method:
+ s3 = md->paramcount;
- for (; --s3 >= 0; src = src->prev) {
- IS_INT_LNG_TYPE(src->type) ? iarg++ : farg++;
- }
+ MCODECHECK((20 * s3) + 128);
- src = tmpsrc;
- s3 = s2;
+ /* copy arguments to registers or stack location */
- /* calculate amount of arguments to be on stack */
+ for (s3 = s3 - 1; s3 >= 0; s3--) {
+ var = VAR(iptr->sx.s23.s2.args[s3]);
+ d = md->params[s3].regoff;
- s2 = (iarg > INT_ARG_CNT) ? iarg - INT_ARG_CNT : 0 +
- (farg > FLT_ARG_CNT) ? farg - FLT_ARG_CNT : 0;
+ /* already preallocated (ARGVAR)? */
- for (; --s3 >= 0; src = src->prev) {
- /* decrement the current argument type */
- IS_INT_LNG_TYPE(src->type) ? iarg-- : farg--;
+ if (var->flags & PREALLOC)
+ continue;
- if (src->varkind == ARGVAR) {
- if (IS_INT_LNG_TYPE(src->type)) {
- if (iarg >= INT_ARG_CNT) {
- s2--;
- }
- } else {
- if (farg >= FLT_ARG_CNT) {
- s2--;
- }
+ if (IS_INT_LNG_TYPE(var->type)) {
+ if (!md->params[s3].inmemory) {
+ s1 = emit_load(jd, iptr, var, d);
+ M_INTMOVE(s1, d);
+ }
+ else {
+ s1 = emit_load(jd, iptr, var, REG_ITMP1);
+ M_LST(s1, REG_SP, d);
}
- continue;
}
-
- if (IS_INT_LNG_TYPE(src->type)) {
- if (iarg < INT_ARG_CNT) {
- s1 = rd->argintregs[iarg];
- var_to_reg_int(d, src, s1);
- M_INTMOVE(d, s1);
-
- } else {
- var_to_reg_int(d, src, REG_ITMP1);
- s2--;
- x86_64_mov_reg_membase(cd, d, REG_SP, s2 * 8);
+ else {
+ if (!md->params[s3].inmemory) {
+ s1 = emit_load(jd, iptr, var, d);
+ M_FLTMOVE(s1, d);
}
+ else {
+ s1 = emit_load(jd, iptr, var, REG_FTMP1);
- } else {
- if (farg < FLT_ARG_CNT) {
- s1 = rd->argfltregs[farg];
- var_to_reg_flt(d, src, s1);
- M_FLTMOVE(d, s1);
-
- } else {
- var_to_reg_flt(d, src, REG_FTMP1);
- s2--;
- x86_64_movq_reg_membase(cd, d, REG_SP, s2 * 8);
+ if (IS_2_WORD_TYPE(var->type))
+ M_DST(s1, REG_SP, d);
+ else
+ M_FST(s1, REG_SP, d);
}
}
- } /* end of for */
+ }
+
+ /* generate method profiling code */
+
+ PROFILE_CYCLE_STOP;
- lm = iptr->val.a;
switch (iptr->opc) {
- case ICMD_BUILTIN3:
- case ICMD_BUILTIN2:
- case ICMD_BUILTIN1:
- a = (s8) lm;
- d = iptr->op1;
-
- x86_64_mov_imm_reg(cd, a, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
+ case ICMD_BUILTIN:
+ M_MOV_IMM(bte->fp, REG_ITMP1);
+ M_CALL(REG_ITMP1);
+
+ emit_exception_check(cd, iptr);
break;
+ case ICMD_INVOKESPECIAL:
+ emit_nullpointer_check(cd, iptr, REG_A0);
+ /* fall through */
+
case ICMD_INVOKESTATIC:
- a = (s8) lm->stubroutine;
- d = lm->returntype;
+ if (lm == NULL) {
+ disp = dseg_add_unique_address(cd, NULL);
+ disp = disp + -((cd->mcodeptr + 7) - cd->mcodebase);
- x86_64_mov_imm_reg(cd, a, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
- break;
+ /* must be calculated before codegen_add_patch_ref */
- case ICMD_INVOKESPECIAL:
- a = (s8) lm->stubroutine;
- d = lm->returntype;
+ if (opt_shownops)
+ disp -= PATCHER_CALL_SIZE;
- gen_nullptr_check(rd->argintregs[0]); /* first argument contains pointer */
- x86_64_mov_membase_reg(cd, rd->argintregs[0], 0, REG_ITMP2); /* access memory for hardware nullptr */
- x86_64_mov_imm_reg(cd, a, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
+ codegen_add_patch_ref(cd, PATCHER_invokestatic_special,
+ um, disp);
+
+/* a = 0; */
+ }
+ else {
+ disp = dseg_add_functionptr(cd, lm->stubroutine);
+ disp = disp + -((cd->mcodeptr + 7) - cd->mcodebase);
+
+/* a = (ptrint) lm->stubroutine; */
+ }
+
+/* M_MOV_IMM(a, REG_ITMP2); */
+ M_ALD(REG_ITMP2, RIP, disp);
+ M_CALL(REG_ITMP2);
break;
case ICMD_INVOKEVIRTUAL:
- d = lm->returntype;
+ if (lm == NULL) {
+ codegen_add_patch_ref(cd, PATCHER_invokevirtual, um, 0);
+
+ s1 = 0;
+ }
+ else {
+ s1 = OFFSET(vftbl_t, table[0]) +
+ sizeof(methodptr) * lm->vftblindex;
+ }
- gen_nullptr_check(rd->argintregs[0]);
- x86_64_mov_membase_reg(cd, rd->argintregs[0], OFFSET(java_objectheader, vftbl), REG_ITMP2);
- x86_64_mov_membase32_reg(cd, REG_ITMP2, OFFSET(vftbl_t, table[0]) + sizeof(methodptr) * lm->vftblindex, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
+ /* implicit null-pointer check */
+ M_ALD(REG_METHODPTR, REG_A0, OFFSET(java_object_t, vftbl));
+ M_ALD32(REG_ITMP3, REG_METHODPTR, s1);
+ M_CALL(REG_ITMP3);
break;
case ICMD_INVOKEINTERFACE:
- ci = lm->class;
- d = lm->returntype;
-
- gen_nullptr_check(rd->argintregs[0]);
- x86_64_mov_membase_reg(cd, rd->argintregs[0], OFFSET(java_objectheader, vftbl), REG_ITMP2);
- x86_64_mov_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, interfacetable[0]) - sizeof(methodptr) * ci->index, REG_ITMP2);
- x86_64_mov_membase32_reg(cd, REG_ITMP2, sizeof(methodptr) * (lm - ci->methods), REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
+ if (lm == NULL) {
+ codegen_add_patch_ref(cd, PATCHER_invokeinterface, um, 0);
+
+ s1 = 0;
+ s2 = 0;
+ }
+ else {
+ s1 = OFFSET(vftbl_t, interfacetable[0]) -
+ sizeof(methodptr) * lm->class->index;
+
+ s2 = sizeof(methodptr) * (lm - lm->class->methods);
+ }
+
+ /* implicit null-pointer check */
+ M_ALD(REG_METHODPTR, REG_A0, OFFSET(java_object_t, vftbl));
+ M_ALD32(REG_METHODPTR, REG_METHODPTR, s1);
+ M_ALD32(REG_ITMP3, REG_METHODPTR, s2);
+ M_CALL(REG_ITMP3);
break;
}
- /* d contains return type */
+ /* generate method profiling code */
- if (d != TYPE_VOID) {
- if (IS_INT_LNG_TYPE(iptr->dst->type)) {
- s1 = reg_of_var(rd, iptr->dst, REG_RESULT);
- M_INTMOVE(REG_RESULT, s1);
- store_reg_to_var_int(iptr->dst, s1);
+ PROFILE_CYCLE_START;
- } else {
- s1 = reg_of_var(rd, iptr->dst, REG_FRESULT);
- M_FLTMOVE(REG_FRESULT, s1);
- store_reg_to_var_flt(iptr->dst, s1);
- }
- }
+ /* store size of call code in replacement point */
+
+ REPLACEMENT_POINT_INVOKE_RETURN(cd, iptr);
+
+ /* store return value */
+
+ switch (md->returntype.type) {
+ case TYPE_INT:
+ case TYPE_LNG:
+ case TYPE_ADR:
+ s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
+ M_INTMOVE(REG_RESULT, s1);
+ emit_store_dst(jd, iptr, s1);
+ break;
+ case TYPE_FLT:
+ case TYPE_DBL:
+ s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
+ M_FLTMOVE(REG_FRESULT, s1);
+ emit_store_dst(jd, iptr, s1);
+ break;
+ default:
+ /* TYPE_VOID */
+ break;
}
break;
- case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
+ case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
- /* op1: 0 == array, 1 == class */
- /* val.a: (classinfo*) superclass */
-
-/* superclass is an interface:
- *
- * return (sub != NULL) &&
- * (sub->vftbl->interfacetablelength > super->index) &&
- * (sub->vftbl->interfacetable[-super->index] != NULL);
- *
- * superclass is a class:
- *
- * return ((sub != NULL) && (0
- * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
- * super->vftbl->diffvall));
- */
+ if (!(iptr->flags.bits & INS_FLAG_ARRAY)) {
+ /* object type cast-check */
- {
- classinfo *super = (classinfo*) iptr->val.a;
-
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritrestart(cd, cd->mcodeptr - cd->mcodebase);
-#endif
+ classinfo *super;
+ vftbl_t *supervftbl;
+ s4 superindex;
- var_to_reg_int(s1, src, REG_ITMP1);
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- if (s1 == d) {
- M_INTMOVE(s1, REG_ITMP1);
- s1 = REG_ITMP1;
- }
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
- if (iptr->op1) { /* class/interface */
- if (super->flags & ACC_INTERFACE) { /* interface */
- x86_64_test_reg_reg(cd, s1, s1);
-
- /* TODO: clean up this calculation */
- a = 3; /* mov_membase_reg */
- CALCOFFSETBYTES(a, s1, OFFSET(java_objectheader, vftbl));
-
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength));
-
- a += 3; /* sub */
- CALCIMMEDIATEBYTES(a, super->index);
-
- a += 3; /* test */
-
- a += 6; /* jcc */
- a += 3; /* mov_membase_reg */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*));
-
- a += 3; /* test */
- a += 4; /* setcc */
-
- x86_64_jcc(cd, X86_64_CC_E, a);
-
- x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP1);
- x86_64_movl_membase_reg(cd, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength), REG_ITMP2);
- x86_64_alu_imm_reg(cd, X86_64_SUB, super->index, REG_ITMP2);
- x86_64_test_reg_reg(cd, REG_ITMP2, REG_ITMP2);
-
- /* TODO: clean up this calculation */
- a = 0;
- a += 3; /* mov_membase_reg */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*));
-
- a += 3; /* test */
- a += 4; /* setcc */
-
- x86_64_jcc(cd, X86_64_CC_LE, a);
- x86_64_mov_membase_reg(cd, REG_ITMP1, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*), REG_ITMP1);
- x86_64_test_reg_reg(cd, REG_ITMP1, REG_ITMP1);
- x86_64_setcc_reg(cd, X86_64_CC_NE, d);
-
- } else { /* class */
- x86_64_test_reg_reg(cd, s1, s1);
-
- /* TODO: clean up this calculation */
- a = 3; /* mov_membase_reg */
- CALCOFFSETBYTES(a, s1, OFFSET(java_objectheader, vftbl));
-
- a += 10; /* mov_imm_reg */
-
- a += 2; /* movl_membase_reg - only if REG_ITMP1 == RAX */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, baseval));
-
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, baseval));
-
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, diffval));
-
- a += 3; /* sub */
- a += 3; /* xor */
- a += 3; /* cmp */
- a += 4; /* setcc */
-
- x86_64_jcc(cd, X86_64_CC_E, a);
-
- x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP1);
- x86_64_mov_imm_reg(cd, (s8) super->vftbl, REG_ITMP2);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstart(cd, cd->mcodeptr - cd->mcodebase);
-#endif
- x86_64_movl_membase_reg(cd, REG_ITMP1, OFFSET(vftbl_t, baseval), REG_ITMP1);
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, baseval), REG_ITMP3);
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, diffval), REG_ITMP2);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
-#endif
- x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP3, REG_ITMP1);
- x86_64_alu_reg_reg(cd, X86_64_XOR, d, d);
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, REG_ITMP1);
- x86_64_setcc_reg(cd, X86_64_CC_BE, d);
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ super = NULL;
+ superindex = 0;
+ supervftbl = NULL;
+ }
+ else {
+ super = iptr->sx.s23.s3.c.cls;
+ superindex = super->index;
+ supervftbl = super->vftbl;
}
- }
- else
- panic("internal error: no inlined array instanceof");
- }
- store_reg_to_var_int(iptr->dst, d);
- break;
- case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
+ if ((super == NULL) || !(super->flags & ACC_INTERFACE))
+ CODEGEN_CRITICAL_SECTION_NEW;
- /* op1: 0 == array, 1 == class */
- /* val.a: (classinfo*) superclass */
-
-/* superclass is an interface:
- *
- * OK if ((sub == NULL) ||
- * (sub->vftbl->interfacetablelength > super->index) &&
- * (sub->vftbl->interfacetable[-super->index] != NULL));
- *
- * superclass is a class:
- *
- * OK if ((sub == NULL) || (0
- * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
- * super->vftbl->diffvall));
- */
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
- {
- classinfo *super = (classinfo*) iptr->val.a;
-
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritrestart(cd, cd->mcodeptr - cd->mcodebase);
-#endif
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- var_to_reg_int(s1, src, d);
- if (iptr->op1) { /* class/interface */
- if (super->flags & ACC_INTERFACE) { /* interface */
- x86_64_test_reg_reg(cd, s1, s1);
-
- /* TODO: clean up this calculation */
- a = 3; /* mov_membase_reg */
- CALCOFFSETBYTES(a, s1, OFFSET(java_objectheader, vftbl));
-
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength));
-
- a += 3; /* sub */
- CALCIMMEDIATEBYTES(a, super->index);
-
- a += 3; /* test */
- a += 6; /* jcc */
-
- a += 3; /* mov_membase_reg */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*));
-
- a += 3; /* test */
- a += 6; /* jcc */
-
- x86_64_jcc(cd, X86_64_CC_E, a);
-
- x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP1);
- x86_64_movl_membase_reg(cd, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength), REG_ITMP2);
- x86_64_alu_imm_reg(cd, X86_64_SUB, super->index, REG_ITMP2);
- x86_64_test_reg_reg(cd, REG_ITMP2, REG_ITMP2);
- x86_64_jcc(cd, X86_64_CC_LE, 0);
- codegen_addxcastrefs(cd, cd->mcodeptr);
- x86_64_mov_membase_reg(cd, REG_ITMP1, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*), REG_ITMP2);
- x86_64_test_reg_reg(cd, REG_ITMP2, REG_ITMP2);
- x86_64_jcc(cd, X86_64_CC_E, 0);
- codegen_addxcastrefs(cd, cd->mcodeptr);
-
- } else { /* class */
- x86_64_test_reg_reg(cd, s1, s1);
-
- /* TODO: clean up this calculation */
- a = 3; /* mov_membase_reg */
- CALCOFFSETBYTES(a, s1, OFFSET(java_objectheader, vftbl));
- a += 10; /* mov_imm_reg */
- a += 2; /* movl_membase_reg - only if REG_ITMP1 == RAX */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, baseval));
-
- if (d != REG_ITMP3) {
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, baseval));
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, diffval));
- a += 3; /* sub */
-
- } else {
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, baseval));
- a += 3; /* sub */
- a += 10; /* mov_imm_reg */
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, diffval));
- }
+ /* if class is not resolved, check which code to call */
- a += 3; /* cmp */
- a += 6; /* jcc */
+ if (super == NULL) {
+ M_TEST(s1);
+ emit_label_beq(cd, BRANCH_LABEL_1);
- x86_64_jcc(cd, X86_64_CC_E, a);
+ codegen_add_patch_ref(cd, PATCHER_checkcast_instanceof_flags,
+ iptr->sx.s23.s3.c.ref, 0);
- x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP1);
- x86_64_mov_imm_reg(cd, (s8) super->vftbl, REG_ITMP2);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstart(cd, cd->mcodeptr - cd->mcodebase);
-#endif
- x86_64_movl_membase_reg(cd, REG_ITMP1, OFFSET(vftbl_t, baseval), REG_ITMP1);
- if (d != REG_ITMP3) {
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, baseval), REG_ITMP3);
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, diffval), REG_ITMP2);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
-#endif
- x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP3, REG_ITMP1);
-
- } else {
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, baseval), REG_ITMP2);
- x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP2, REG_ITMP1);
- x86_64_mov_imm_reg(cd, (s8) super->vftbl, REG_ITMP2);
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, diffval), REG_ITMP2);
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
-#endif
- }
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, REG_ITMP1);
- x86_64_jcc(cd, X86_64_CC_A, 0); /* (u) REG_ITMP1 > (u) REG_ITMP2 -> jump */
- codegen_addxcastrefs(cd, cd->mcodeptr);
+ M_IMOV_IMM(0, REG_ITMP2); /* super->flags */
+ M_IAND_IMM(ACC_INTERFACE, REG_ITMP2);
+ emit_label_beq(cd, BRANCH_LABEL_2);
}
- } else
- panic("internal error: no inlined array checkcast");
- }
- M_INTMOVE(s1, d);
- store_reg_to_var_int(iptr->dst, d);
- break;
+ /* interface checkcast code */
- case ICMD_CHECKASIZE: /* ..., size ==> ..., size */
+ if ((super == NULL) || (super->flags & ACC_INTERFACE)) {
+ if (super != NULL) {
+ M_TEST(s1);
+ emit_label_beq(cd, BRANCH_LABEL_3);
+ }
- if (src->flags & INMEMORY) {
- x86_64_alul_imm_membase(cd, X86_64_CMP, 0, REG_SP, src->regoff * 8);
-
- } else {
- x86_64_testl_reg_reg(cd, src->regoff, src->regoff);
- }
- x86_64_jcc(cd, X86_64_CC_L, 0);
- codegen_addxcheckarefs(cd, cd->mcodeptr);
- break;
+ M_ALD(REG_ITMP2, s1, OFFSET(java_object_t, vftbl));
- case ICMD_CHECKEXCEPTION: /* ... ==> ... */
+ if (super == NULL) {
+ codegen_add_patch_ref(cd, PATCHER_checkcast_interface,
+ iptr->sx.s23.s3.c.ref,
+ 0);
+ }
- x86_64_test_reg_reg(cd, REG_RESULT, REG_RESULT);
- x86_64_jcc(cd, X86_64_CC_E, 0);
- codegen_addxexceptionrefs(cd, cd->mcodeptr);
- break;
+ M_ILD32(REG_ITMP3,
+ REG_ITMP2, OFFSET(vftbl_t, interfacetablelength));
+ M_ICMP_IMM32(superindex, REG_ITMP3);
+ emit_classcast_check(cd, iptr, BRANCH_LE, REG_ITMP3, s1);
+
+ M_ALD32(REG_ITMP3, REG_ITMP2,
+ OFFSET(vftbl_t, interfacetable[0]) -
+ superindex * sizeof(methodptr*));
+ M_TEST(REG_ITMP3);
+ emit_classcast_check(cd, iptr, BRANCH_EQ, REG_ITMP3, s1);
+
+ if (super == NULL)
+ emit_label_br(cd, BRANCH_LABEL_4);
+ else
+ emit_label(cd, BRANCH_LABEL_3);
+ }
- case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
- /* op1 = dimension, val.a = array descriptor */
+ /* class checkcast code */
- /* check for negative sizes and copy sizes to stack if necessary */
+ if ((super == NULL) || !(super->flags & ACC_INTERFACE)) {
+ if (super == NULL) {
+ emit_label(cd, BRANCH_LABEL_2);
+ }
+ else {
+ M_TEST(s1);
+ emit_label_beq(cd, BRANCH_LABEL_5);
+ }
- MCODECHECK((iptr->op1 << 1) + 64);
+ M_ALD(REG_ITMP2, s1, OFFSET(java_object_t, vftbl));
- for (s1 = iptr->op1; --s1 >= 0; src = src->prev) {
- var_to_reg_int(s2, src, REG_ITMP1);
- x86_64_testl_reg_reg(cd, s2, s2);
- x86_64_jcc(cd, X86_64_CC_L, 0);
- codegen_addxcheckarefs(cd, cd->mcodeptr);
+ if (super == NULL) {
+ codegen_add_patch_ref(cd, PATCHER_checkcast_class,
+ iptr->sx.s23.s3.c.ref,
+ 0);
+ }
- /* copy SAVEDVAR sizes to stack */
+ M_MOV_IMM(supervftbl, REG_ITMP3);
- if (src->varkind != ARGVAR) {
- x86_64_mov_reg_membase(cd, s2, REG_SP, s1 * 8);
- }
- }
+ CODEGEN_CRITICAL_SECTION_START;
- /* a0 = dimension count */
- x86_64_mov_imm_reg(cd, iptr->op1, rd->argintregs[0]);
+ M_ILD32(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, baseval));
- /* a1 = arraydescriptor */
- x86_64_mov_imm_reg(cd, (u8) iptr->val.a, rd->argintregs[1]);
+ /* if (s1 != REG_ITMP1) { */
+ /* emit_movl_membase_reg(cd, REG_ITMP3, */
+ /* OFFSET(vftbl_t, baseval), */
+ /* REG_ITMP1); */
+ /* emit_movl_membase_reg(cd, REG_ITMP3, */
+ /* OFFSET(vftbl_t, diffval), */
+ /* REG_ITMP3); */
+ /* #if defined(ENABLE_THREADS) */
+ /* codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase); */
+ /* #endif */
+ /* emit_alu_reg_reg(cd, ALU_SUB, REG_ITMP1, REG_ITMP2); */
- /* a2 = pointer to dimensions = stack pointer */
- x86_64_mov_reg_reg(cd, REG_SP, rd->argintregs[2]);
+ /* } else { */
- x86_64_mov_imm_reg(cd, (u8) builtin_nmultianewarray, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
+ M_ILD32(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, baseval));
+ M_ISUB(REG_ITMP3, REG_ITMP2);
+ M_MOV_IMM(supervftbl, REG_ITMP3);
+ M_ILD(REG_ITMP3, REG_ITMP3, OFFSET(vftbl_t, diffval));
+ /* } */
- s1 = reg_of_var(rd, iptr->dst, REG_RESULT);
- M_INTMOVE(REG_RESULT, s1);
- store_reg_to_var_int(iptr->dst, s1);
- break;
+ CODEGEN_CRITICAL_SECTION_END;
- default:
- throw_cacao_exception_exit(string_java_lang_InternalError,
- "Unknown ICMD %d", iptr->opc);
- } /* switch */
-
- } /* for instruction */
-
- /* copy values to interface registers */
+ M_ICMP(REG_ITMP3, REG_ITMP2);
+ emit_classcast_check(cd, iptr, BRANCH_UGT, REG_ITMP3, s1);
- src = bptr->outstack;
- len = bptr->outdepth;
- MCODECHECK(64 + len);
-#ifdef LSRA
- if (!opt_lsra)
-#endif
- while (src) {
- len--;
- if ((src->varkind != STACKVAR)) {
- s2 = src->type;
- if (IS_FLT_DBL_TYPE(s2)) {
- var_to_reg_flt(s1, src, REG_FTMP1);
- if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
- M_FLTMOVE(s1, rd->interfaces[len][s2].regoff);
-
- } else {
- x86_64_movq_reg_membase(cd, s1, REG_SP, rd->interfaces[len][s2].regoff * 8);
+ if (super != NULL)
+ emit_label(cd, BRANCH_LABEL_5);
}
- } else {
- var_to_reg_int(s1, src, REG_ITMP1);
- if (!(rd->interfaces[len][s2].flags & INMEMORY)) {
- M_INTMOVE(s1, rd->interfaces[len][s2].regoff);
-
- } else {
- x86_64_mov_reg_membase(cd, s1, REG_SP, rd->interfaces[len][s2].regoff * 8);
+ if (super == NULL) {
+ emit_label(cd, BRANCH_LABEL_1);
+ emit_label(cd, BRANCH_LABEL_4);
}
+
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
}
- }
- src = src->prev;
- }
- } /* if (bptr -> flags >= BBREACHED) */
- } /* for basic block */
+ else {
+ /* array type cast-check */
- {
+ s1 = emit_load_s1(jd, iptr, REG_ITMP2);
+ M_INTMOVE(s1, REG_A0);
- /* generate bound check stubs */
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ codegen_add_patch_ref(cd, PATCHER_builtin_arraycheckcast,
+ iptr->sx.s23.s3.c.ref, 0);
+ }
- u1 *xcodeptr = NULL;
- branchref *bref;
+ M_MOV_IMM(iptr->sx.s23.s3.c.cls, REG_A1);
+ M_MOV_IMM(BUILTIN_arraycheckcast, REG_ITMP1);
+ M_CALL(REG_ITMP1);
- for (bref = cd->xboundrefs; bref != NULL; bref = bref->next) {
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- cd->mcodeptr - cd->mcodebase);
+ /* s1 may have been destroyed over the function call */
+ s1 = emit_load_s1(jd, iptr, REG_ITMP2);
+ M_TEST(REG_RESULT);
+ emit_classcast_check(cd, iptr, BRANCH_EQ, REG_RESULT, s1);
- MCODECHECK(100);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
+ }
- /* move index register into REG_ITMP1 */
- x86_64_mov_reg_reg(cd, bref->reg, REG_ITMP1); /* 3 bytes */
+ M_INTMOVE(s1, d);
+ emit_store_dst(jd, iptr, d);
+ break;
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
- dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
+ case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
- if (xcodeptr != NULL) {
- x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
+ {
+ classinfo *super;
+ vftbl_t *supervftbl;
+ s4 superindex;
- } else {
- xcodeptr = cd->mcodeptr;
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ super = NULL;
+ superindex = 0;
+ supervftbl = NULL;
+ }
+ else {
+ super = iptr->sx.s23.s3.c.cls;
+ superindex = super->index;
+ supervftbl = super->vftbl;
+ }
- x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
- x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
+ if ((super == NULL) || !(super->flags & ACC_INTERFACE))
+ CODEGEN_CRITICAL_SECTION_NEW;
- x86_64_mov_reg_reg(cd, REG_ITMP1, rd->argintregs[0]);
- x86_64_mov_imm_reg(cd, (u8) new_arrayindexoutofboundsexception, REG_ITMP3);
- x86_64_call_reg(cd, REG_ITMP3);
+ s1 = emit_load_s1(jd, iptr, REG_ITMP1);
+ d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
- x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
- x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
+ if (s1 == d) {
+ M_INTMOVE(s1, REG_ITMP1);
+ s1 = REG_ITMP1;
+ }
- x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
- x86_64_jmp_reg(cd, REG_ITMP3);
- }
- }
+ M_CLR(d);
- /* generate negative array size check stubs */
+ /* if class is not resolved, check which code to call */
- xcodeptr = NULL;
-
- for (bref = cd->xcheckarefs; bref != NULL; bref = bref->next) {
- if ((cd->exceptiontablelength == 0) && (xcodeptr != NULL)) {
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- xcodeptr - cd->mcodebase - (10 + 10 + 3));
- continue;
- }
+ if (super == NULL) {
+ M_TEST(s1);
+ emit_label_beq(cd, BRANCH_LABEL_1);
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- cd->mcodeptr - cd->mcodebase);
+ codegen_add_patch_ref(cd, PATCHER_checkcast_instanceof_flags,
+ iptr->sx.s23.s3.c.ref, 0);
- MCODECHECK(100);
+ M_IMOV_IMM(0, REG_ITMP3); /* super->flags */
+ M_IAND_IMM(ACC_INTERFACE, REG_ITMP3);
+ emit_label_beq(cd, BRANCH_LABEL_2);
+ }
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
- dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
+ /* interface instanceof code */
- if (xcodeptr != NULL) {
- x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
+ if ((super == NULL) || (super->flags & ACC_INTERFACE)) {
+ if (super != NULL) {
+ M_TEST(s1);
+ emit_label_beq(cd, BRANCH_LABEL_3);
+ }
- } else {
- xcodeptr = cd->mcodeptr;
+ M_ALD(REG_ITMP1, s1, OFFSET(java_object_t, vftbl));
- x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
- x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
+ if (super == NULL) {
+ codegen_add_patch_ref(cd, PATCHER_instanceof_interface,
+ iptr->sx.s23.s3.c.ref, 0);
+ }
- x86_64_mov_imm_reg(cd, (u8) new_negativearraysizeexception, REG_ITMP3);
- x86_64_call_reg(cd, REG_ITMP3);
+ M_ILD32(REG_ITMP3,
+ REG_ITMP1, OFFSET(vftbl_t, interfacetablelength));
+ M_ICMP_IMM32(superindex, REG_ITMP3);
- x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
- x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
+ a = 3 + 4 /* mov_membase32_reg */ + 3 /* test */ + 4 /* setcc */;
- x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
- x86_64_jmp_reg(cd, REG_ITMP3);
- }
- }
+ M_BLE(a);
+ M_ALD32(REG_ITMP1, REG_ITMP1,
+ OFFSET(vftbl_t, interfacetable[0]) -
+ superindex * sizeof(methodptr*));
+ M_TEST(REG_ITMP1);
+ M_SETNE(d);
- /* generate cast check stubs */
+ if (super == NULL)
+ emit_label_br(cd, BRANCH_LABEL_4);
+ else
+ emit_label(cd, BRANCH_LABEL_3);
+ }
- xcodeptr = NULL;
-
- for (bref = cd->xcastrefs; bref != NULL; bref = bref->next) {
- if ((cd->exceptiontablelength == 0) && (xcodeptr != NULL)) {
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- xcodeptr - cd->mcodebase - (10 + 10 + 3));
- continue;
- }
+ /* class instanceof code */
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- cd->mcodeptr - cd->mcodebase);
+ if ((super == NULL) || !(super->flags & ACC_INTERFACE)) {
+ if (super == NULL) {
+ emit_label(cd, BRANCH_LABEL_2);
+ }
+ else {
+ M_TEST(s1);
+ emit_label_beq(cd, BRANCH_LABEL_5);
+ }
- MCODECHECK(100);
+ M_ALD(REG_ITMP1, s1, OFFSET(java_object_t, vftbl));
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
- dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
+ if (super == NULL) {
+ codegen_add_patch_ref(cd, PATCHER_instanceof_class,
+ iptr->sx.s23.s3.c.ref, 0);
+ }
- if (xcodeptr != NULL) {
- x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
-
- } else {
- xcodeptr = cd->mcodeptr;
+ M_MOV_IMM(supervftbl, REG_ITMP2);
- x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
- x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
+ CODEGEN_CRITICAL_SECTION_START;
- x86_64_mov_imm_reg(cd, (u8) new_classcastexception, REG_ITMP3);
- x86_64_call_reg(cd, REG_ITMP3);
+ M_ILD(REG_ITMP1, REG_ITMP1, OFFSET(vftbl_t, baseval));
+ M_ILD(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, diffval));
+ M_ILD(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, baseval));
- x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
- x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
+ CODEGEN_CRITICAL_SECTION_END;
- x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
- x86_64_jmp_reg(cd, REG_ITMP3);
- }
- }
+ M_ISUB(REG_ITMP2, REG_ITMP1);
+ M_CLR(d); /* may be REG_ITMP2 */
+ M_ICMP(REG_ITMP3, REG_ITMP1);
+ M_SETULE(d);
- /* generate divide by zero check stubs */
+ if (super != NULL)
+ emit_label(cd, BRANCH_LABEL_5);
+ }
- xcodeptr = NULL;
-
- for (bref = cd->xdivrefs; bref != NULL; bref = bref->next) {
- if ((cd->exceptiontablelength == 0) && (xcodeptr != NULL)) {
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- xcodeptr - cd->mcodebase - (10 + 10 + 3));
- continue;
- }
+ if (super == NULL) {
+ emit_label(cd, BRANCH_LABEL_1);
+ emit_label(cd, BRANCH_LABEL_4);
+ }
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- cd->mcodeptr - cd->mcodebase);
+ emit_store_dst(jd, iptr, d);
+ }
+ break;
- MCODECHECK(100);
+ case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
- dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
+ /* check for negative sizes and copy sizes to stack if necessary */
- if (xcodeptr != NULL) {
- x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
-
- } else {
- xcodeptr = cd->mcodeptr;
+ MCODECHECK((10 * 4 * iptr->s1.argcount) + 5 + 10 * 8);
- x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
- x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
+ for (s1 = iptr->s1.argcount; --s1 >= 0; ) {
- x86_64_mov_imm_reg(cd, (u8) new_arithmeticexception, REG_ITMP3);
- x86_64_call_reg(cd, REG_ITMP3);
+ /* copy SAVEDVAR sizes to stack */
+ var = VAR(iptr->sx.s23.s2.args[s1]);
- x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
- x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
+ /* Already Preallocated? */
+ if (!(var->flags & PREALLOC)) {
+ s2 = emit_load(jd, iptr, var, REG_ITMP1);
+ M_LST(s2, REG_SP, s1 * 8);
+ }
+ }
- x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
- x86_64_jmp_reg(cd, REG_ITMP3);
- }
- }
+ /* is a patcher function set? */
- /* generate exception check stubs */
+ if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
+ codegen_add_patch_ref(cd, PATCHER_builtin_multianewarray,
+ iptr->sx.s23.s3.c.ref, 0);
+ }
- xcodeptr = NULL;
-
- for (bref = cd->xexceptionrefs; bref != NULL; bref = bref->next) {
- if ((cd->exceptiontablelength == 0) && (xcodeptr != NULL)) {
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- xcodeptr - cd->mcodebase - (10 + 10 + 3));
- continue;
- }
+ /* a0 = dimension count */
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- cd->mcodeptr - cd->mcodebase);
+ M_MOV_IMM(iptr->s1.argcount, REG_A0);
- MCODECHECK(100);
+ /* a1 = classinfo */
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
- dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP1); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP1, REG_ITMP2_XPC); /* 3 bytes */
+ M_MOV_IMM(iptr->sx.s23.s3.c.cls, REG_A1);
- if (xcodeptr != NULL) {
- x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
-
- } else {
- xcodeptr = cd->mcodeptr;
-
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- x86_64_alu_imm_reg(cd, X86_64_SUB, 8, REG_SP);
- x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0);
- x86_64_mov_imm_reg(cd, (u8) &builtin_get_exceptionptrptr, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
- x86_64_mov_membase_reg(cd, REG_RESULT, 0, REG_ITMP3);
- x86_64_mov_imm_membase(cd, 0, REG_RESULT, 0);
- x86_64_mov_reg_reg(cd, REG_ITMP3, REG_ITMP1_XPTR);
- x86_64_mov_membase_reg(cd, REG_SP, 0, REG_ITMP2_XPC);
- x86_64_alu_imm_reg(cd, X86_64_ADD, 8, REG_SP);
-#else
- x86_64_mov_imm_reg(cd, (u8) &_exceptionptr, REG_ITMP3);
- x86_64_mov_membase_reg(cd, REG_ITMP3, 0, REG_ITMP1_XPTR);
- x86_64_mov_imm_membase(cd, 0, REG_ITMP3, 0);
-#endif
+ /* a2 = pointer to dimensions = stack pointer */
- x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
- x86_64_jmp_reg(cd, REG_ITMP3);
- }
- }
+ M_MOV(REG_SP, REG_A2);
- /* generate null pointer check stubs */
+ M_MOV_IMM(BUILTIN_multianewarray, REG_ITMP1);
+ M_CALL(REG_ITMP1);
- xcodeptr = NULL;
-
- for (bref = cd->xnullrefs; bref != NULL; bref = bref->next) {
- if ((cd->exceptiontablelength == 0) && (xcodeptr != NULL)) {
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- xcodeptr - cd->mcodebase - (10 + 10 + 3));
- continue;
- }
+ /* check for exception before result assignment */
- gen_resolvebranch(cd->mcodebase + bref->branchpos,
- bref->branchpos,
- cd->mcodeptr - cd->mcodebase);
+ emit_exception_check(cd, iptr);
- MCODECHECK(100);
+ s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
+ M_INTMOVE(REG_RESULT, s1);
+ emit_store_dst(jd, iptr, s1);
+ break;
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
- dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP1); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP1, REG_ITMP2_XPC); /* 3 bytes */
+ default:
+ exceptions_throw_internalerror("Unknown ICMD %d during code generation",
+ iptr->opc);
+ return false;
+ } /* switch */
- if (xcodeptr != NULL) {
- x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
+ } /* for instruction */
- } else {
- xcodeptr = cd->mcodeptr;
-
- x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
- x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
+ MCODECHECK(512); /* XXX require a lower number? */
- x86_64_mov_imm_reg(cd, (u8) new_nullpointerexception, REG_ITMP3);
- x86_64_call_reg(cd, REG_ITMP3);
+ /* At the end of a basic block we may have to append some nops,
+ because the patcher stub calling code might be longer than the
+ actual instruction. So codepatching does not change the
+ following block unintentionally. */
- x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
- x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
-
- x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
- x86_64_jmp_reg(cd, REG_ITMP3);
+ if (cd->mcodeptr < cd->lastmcodeptr) {
+ while (cd->mcodeptr < cd->lastmcodeptr) {
+ M_NOP;
}
}
- /* generate put/getstatic stub call code */
-
- {
- clinitref *cref;
- codegendata *tmpcd;
- u1 xmcode;
- u4 mcode;
-
- tmpcd = DNEW(codegendata);
-
- for (cref = cd->clinitrefs; cref != NULL; cref = cref->next) {
- /* Get machine code which is patched back in later. A */
- /* `call rel32' is 5 bytes long. */
- xcodeptr = cd->mcodebase + cref->branchpos;
- xmcode = *xcodeptr;
- mcode = *((u4 *) (xcodeptr + 1));
+ } /* if (bptr -> flags >= BBREACHED) */
+ } /* for basic block */
- MCODECHECK(50);
+ dseg_createlinenumbertable(cd);
- /* patch in `call rel32' to call the following code */
- tmpcd->mcodeptr = xcodeptr; /* set dummy mcode pointer */
- x86_64_call_imm(tmpcd, cd->mcodeptr - (xcodeptr + 5));
+ /* generate stubs */
- /* Push machine code bytes to patch onto the stack. */
- x86_64_push_imm(cd, (u1) xmcode);
- x86_64_push_imm(cd, (u4) mcode);
+ emit_patcher_stubs(jd);
- x86_64_push_imm(cd, (u8) cref->class);
+ /* everything's ok */
- x86_64_mov_imm_reg(cd, (u8) asm_check_clinit, REG_ITMP1);
- x86_64_jmp_reg(cd, REG_ITMP1);
- }
- }
- }
-
- codegen_finish(m, cd, (s4) ((u1 *) cd->mcodeptr - cd->mcodebase));
+ return true;
}
-/* function createcompilerstub *************************************************
+/* codegen_emit_stub_compiler **************************************************
- creates a stub routine which calls the compiler
+ Emit a stub routine which calls the compiler.
*******************************************************************************/
-#define COMPSTUBSIZE 23
-
-u1 *createcompilerstub(methodinfo *m)
+void codegen_emit_stub_compiler(jitdata *jd)
{
- u1 *s = CNEW(u1, COMPSTUBSIZE); /* memory to hold the stub */
+ methodinfo *m;
codegendata *cd;
- s4 dumpsize;
-
- /* mark start of dump memory area */
- dumpsize = dump_size();
+ /* get required compiler data */
- cd = DNEW(codegendata);
- cd->mcodeptr = s;
+ m = jd->m;
+ cd = jd->cd;
/* code for the stub */
- x86_64_mov_imm_reg(cd, (u8) m, REG_ITMP1); /* pass method to compiler */
- x86_64_mov_imm_reg(cd, (u8) asm_call_jit_compiler, REG_ITMP3);/* load address */
- x86_64_jmp_reg(cd, REG_ITMP3); /* jump to compiler */
-
-#if defined(STATISTICS)
- if (opt_stat)
- count_cstub_len += COMPSTUBSIZE;
-#endif
-
- /* release dump area */
-
- dump_release(dumpsize);
- return s;
+ M_ALD(REG_ITMP1, RIP, -(7 * 1 + 2 * SIZEOF_VOID_P)); /* methodinfo */
+ M_ALD(REG_ITMP3, RIP, -(7 * 2 + 3 * SIZEOF_VOID_P)); /* compiler pointer */
+ M_JMP(REG_ITMP3);
}
-/* function removecompilerstub *************************************************
+/* codegen_emit_stub_native ****************************************************
- deletes a compilerstub from memory (simply by freeing it)
+ Emits a stub routine which calls a native method.
*******************************************************************************/
-void removecompilerstub(u1 *stub)
+void codegen_emit_stub_native(jitdata *jd, methoddesc *nmd, functionptr f)
{
- CFREE(stub, COMPSTUBSIZE);
-}
-
-
-/* function: createnativestub **************************************************
-
- creates a stub routine which calls a native method
-
-*******************************************************************************/
+ methodinfo *m;
+ codeinfo *code;
+ codegendata *cd;
+ methoddesc *md;
+ s4 nativeparams;
+ s4 i, j;
+ s4 t;
+ s4 s1, s2;
-/* #if defined(USE_THREADS) && defined(NATIVE_THREADS) */
-/* static java_objectheader **(*callgetexceptionptrptr)() = builtin_get_exceptionptrptr; */
-/* #endif */
+ /* get required compiler data */
-#define NATIVESTUBSIZE 700 /* keep this size high enough! */
-
-u1 *createnativestub(functionptr f, methodinfo *m)
-{
- u1 *s; /* pointer to stub memory */
- codegendata *cd;
- registerdata *rd;
- t_inlining_globals *id;
- s4 dumpsize;
- s4 stackframesize; /* size of stackframe if needed */
- u1 *tptr;
- s4 iargs; /* count of integer arguments */
- s4 fargs; /* count of float arguments */
- s4 i; /* counter */
-
- void **callAddrPatchPos=0;
- u1 *jmpInstrPos=0;
- void **jmpInstrPatchPos=0;
+ m = jd->m;
+ code = jd->code;
+ cd = jd->cd;
/* initialize variables */
- iargs = 0;
- fargs = 0;
-
- /* mark start of dump memory area */
-
- dumpsize = dump_size();
+ md = m->parseddesc;
+ nativeparams = (m->flags & ACC_STATIC) ? 2 : 1;
- cd = DNEW(codegendata);
- rd = DNEW(registerdata);
- id = DNEW(t_inlining_globals);
+ /* calculate stack frame size */
- /* setup registers before using it */
+ cd->stackframesize =
+ sizeof(stackframeinfo) / SIZEOF_VOID_P +
+ sizeof(localref_table) / SIZEOF_VOID_P +
+ md->paramcount +
+ 1 + /* functionptr, TODO: store in data segment */
+ nmd->memuse;
- inlining_setup(m, id);
- reg_setup(m, rd, id);
+ cd->stackframesize |= 0x1; /* keep stack 16-byte aligned */
- /* set paramcount and paramtypes */
-
- descriptor2types(m);
-
- /* count integer and float arguments */
-
- tptr = m->paramtypes;
- for (i = 0; i < m->paramcount; i++) {
- IS_INT_LNG_TYPE(*tptr++) ? iargs++ : fargs++;
- }
+ /* create method header */
- s = CNEW(u1, NATIVESTUBSIZE); /* memory to hold the stub */
+ (void) dseg_add_unique_address(cd, code); /* CodeinfoPointer */
+ (void) dseg_add_unique_s4(cd, cd->stackframesize * 8); /* FrameSize */
+ (void) dseg_add_unique_s4(cd, 0); /* IsSync */
+ (void) dseg_add_unique_s4(cd, 0); /* IsLeaf */
+ (void) dseg_add_unique_s4(cd, 0); /* IntSave */
+ (void) dseg_add_unique_s4(cd, 0); /* FltSave */
+ (void) dseg_addlinenumbertablesize(cd);
+ (void) dseg_add_unique_s4(cd, 0); /* ExTableSize */
- /* set some required varibles which are normally set by codegen_setup */
- cd->mcodebase = s;
- cd->mcodeptr = s;
- cd->clinitrefs = NULL;
+#if defined(ENABLE_PROFILING)
+ /* generate native method profiling code */
- /* if function is static, check for initialized */
+ if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
+ /* count frequency */
- if ((m->flags & ACC_STATIC) && !m->class->initialized) {
- codegen_addclinitref(cd, cd->mcodeptr, m->class);
+ M_MOV_IMM(code, REG_ITMP3);
+ M_IINC_MEMBASE(REG_ITMP3, OFFSET(codeinfo, frequency));
}
+#endif
- if (runverbose) {
- s4 l, s1;
+ /* generate stub code */
- x86_64_alu_imm_reg(cd, X86_64_SUB, (INT_ARG_CNT + FLT_ARG_CNT + 1) * 8, REG_SP);
+ M_ASUB_IMM(cd->stackframesize * 8, REG_SP);
- /* save integer and float argument registers */
+#if !defined(NDEBUG)
+ if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
+ emit_verbosecall_enter(jd);
+#endif
- for (i = 0; i < INT_ARG_CNT; i++) {
- x86_64_mov_reg_membase(cd, rd->argintregs[i], REG_SP, (1 + i) * 8);
- }
+ /* get function address (this must happen before the stackframeinfo) */
- for (i = 0; i < FLT_ARG_CNT; i++) {
- x86_64_movq_reg_membase(cd, rd->argfltregs[i], REG_SP, (1 + INT_ARG_CNT + i) * 8);
- }
+#if !defined(WITH_STATIC_CLASSPATH)
+ if (f == NULL)
+ codegen_add_patch_ref(cd, PATCHER_resolve_native, m, 0);
+#endif
- /* show integer hex code for float arguments */
+ M_MOV_IMM(f, REG_ITMP3);
- for (i = 0, l = 0; i < m->paramcount && i < INT_ARG_CNT; i++) {
- /* if the paramtype is a float, we have to right shift all */
- /* following integer registers */
+ /* save integer and float argument registers */
- if (IS_FLT_DBL_TYPE(m->paramtypes[i])) {
- for (s1 = INT_ARG_CNT - 2; s1 >= i; s1--) {
- x86_64_mov_reg_reg(cd, rd->argintregs[s1], rd->argintregs[s1 + 1]);
- }
+ for (i = 0; i < md->paramcount; i++) {
+ if (!md->params[i].inmemory) {
+ s1 = md->params[i].regoff;
- x86_64_movd_freg_reg(cd, rd->argfltregs[l], rd->argintregs[i]);
- l++;
+ switch (md->paramtypes[i].type) {
+ case TYPE_INT:
+ case TYPE_LNG:
+ case TYPE_ADR:
+ M_LST(s1, REG_SP, i * 8);
+ break;
+ case TYPE_FLT:
+ case TYPE_DBL:
+ M_DST(s1, REG_SP, i * 8);
+ break;
}
}
-
- x86_64_mov_imm_reg(cd, (u8) m, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, 0 * 8);
- x86_64_mov_imm_reg(cd, (u8) builtin_trace_args, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
-
- /* restore integer and float argument registers */
-
- for (i = 0; i < INT_ARG_CNT; i++) {
- x86_64_mov_membase_reg(cd, REG_SP, (1 + i) * 8, rd->argintregs[i]);
- }
-
- for (i = 0; i < FLT_ARG_CNT; i++) {
- x86_64_movq_membase_reg(cd, REG_SP, (1 + INT_ARG_CNT + i) * 8, rd->argfltregs[i]);
- }
-
- x86_64_alu_imm_reg(cd, X86_64_ADD, (INT_ARG_CNT + FLT_ARG_CNT + 1) * 8, REG_SP);
}
-#if !defined(STATIC_CLASSPATH)
- /* call method to resolve native function if needed */
- if (f == NULL) {
- x86_64_alu_imm_reg(cd, X86_64_SUB, (INT_ARG_CNT + FLT_ARG_CNT + 1) * 8, REG_SP);
-
- x86_64_mov_reg_membase(cd, rd->argintregs[0], REG_SP, 1 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[1], REG_SP, 2 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[2], REG_SP, 3 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[3], REG_SP, 4 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[4], REG_SP, 5 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[5], REG_SP, 6 * 8);
-
- x86_64_movq_reg_membase(cd, rd->argfltregs[0], REG_SP, 7 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[1], REG_SP, 8 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[2], REG_SP, 9 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[3], REG_SP, 10 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[4], REG_SP, 11 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[5], REG_SP, 12 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[6], REG_SP, 13 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[7], REG_SP, 14 * 8);
-
- /* needed to patch a jump over this block */
- x86_64_jmp_imm(cd, 0);
- jmpInstrPos = cd->mcodeptr - 4;
-
- x86_64_mov_imm_reg(cd, (u8) m, rd->argintregs[0]);
-
- x86_64_mov_imm_reg(cd, 0, rd->argintregs[1]);
- callAddrPatchPos = cd->mcodeptr - 8; /* at this position the place is specified where the native function adress should be patched into*/
-
- x86_64_mov_imm_reg(cd, 0, rd->argintregs[2]);
- jmpInstrPatchPos = cd->mcodeptr - 8;
-
- x86_64_mov_imm_reg(cd, jmpInstrPos, rd->argintregs[3]);
-
- x86_64_mov_imm_reg(cd, (u8) codegen_resolve_native, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
-
- *(jmpInstrPatchPos) = cd->mcodeptr - jmpInstrPos - 1; /*=opcode jmp_imm size*/
-
- x86_64_mov_membase_reg(cd, REG_SP, 1 * 8, rd->argintregs[0]);
- x86_64_mov_membase_reg(cd, REG_SP, 2 * 8, rd->argintregs[1]);
- x86_64_mov_membase_reg(cd, REG_SP, 3 * 8, rd->argintregs[2]);
- x86_64_mov_membase_reg(cd, REG_SP, 4 * 8, rd->argintregs[3]);
- x86_64_mov_membase_reg(cd, REG_SP, 5 * 8, rd->argintregs[4]);
- x86_64_mov_membase_reg(cd, REG_SP, 6 * 8, rd->argintregs[5]);
-
- x86_64_movq_membase_reg(cd, REG_SP, 7 * 8, rd->argfltregs[0]);
- x86_64_movq_membase_reg(cd, REG_SP, 8 * 8, rd->argfltregs[1]);
- x86_64_movq_membase_reg(cd, REG_SP, 9 * 8, rd->argfltregs[2]);
- x86_64_movq_membase_reg(cd, REG_SP, 10 * 8, rd->argfltregs[3]);
- x86_64_movq_membase_reg(cd, REG_SP, 11 * 8, rd->argfltregs[4]);
- x86_64_movq_membase_reg(cd, REG_SP, 12 * 8, rd->argfltregs[5]);
- x86_64_movq_membase_reg(cd, REG_SP, 13 * 8, rd->argfltregs[6]);
- x86_64_movq_membase_reg(cd, REG_SP, 14 * 8, rd->argfltregs[7]);
-
- x86_64_alu_imm_reg(cd, X86_64_ADD, (INT_ARG_CNT + FLT_ARG_CNT + 1) * 8, REG_SP);
- }
-#endif
+ M_AST(REG_ITMP3, REG_SP, md->paramcount * 8);
- /* save argument registers on stack -- if we have to */
+ /* create dynamic stack info */
- if ((((m->flags & ACC_STATIC) && iargs > (INT_ARG_CNT - 2)) || iargs > (INT_ARG_CNT - 1)) ||
- (fargs > FLT_ARG_CNT)) {
- s4 paramshiftcnt;
- s4 stackparamcnt;
+ M_ALEA(REG_SP, cd->stackframesize * 8, REG_A0);
+ emit_lea_membase_reg(cd, RIP, -((cd->mcodeptr + 7) - cd->mcodebase), REG_A1);
+ M_ALEA(REG_SP, cd->stackframesize * 8 + SIZEOF_VOID_P, REG_A2);
+ M_ALD(REG_A3, REG_SP, cd->stackframesize * 8);
+ M_MOV_IMM(codegen_start_native_call, REG_ITMP1);
+ M_CALL(REG_ITMP1);
- paramshiftcnt = 0;
- stackparamcnt = 0;
+ /* restore integer and float argument registers */
- /* do we need to shift integer argument register onto stack? */
+ for (i = 0; i < md->paramcount; i++) {
+ if (!md->params[i].inmemory) {
+ s1 = md->params[i].regoff;
- if ((m->flags & ACC_STATIC) && iargs > (INT_ARG_CNT - 2)) {
- /* do we need to shift 2 arguments? */
- if (iargs > (INT_ARG_CNT - 1)) {
- paramshiftcnt = 2;
-
- } else {
- paramshiftcnt = 1;
+ switch (md->paramtypes[i].type) {
+ case TYPE_INT:
+ case TYPE_LNG:
+ case TYPE_ADR:
+ M_LLD(s1, REG_SP, i * 8);
+ break;
+ case TYPE_FLT:
+ case TYPE_DBL:
+ M_DLD(s1, REG_SP, i * 8);
+ break;
}
-
- } else if (iargs > (INT_ARG_CNT - 1)) {
- paramshiftcnt = 1;
}
+ }
- /* calculate required stack space */
-
- stackparamcnt += (iargs > INT_ARG_CNT) ? iargs - INT_ARG_CNT : 0;
- stackparamcnt += (fargs > FLT_ARG_CNT) ? fargs - FLT_ARG_CNT : 0;
-
- stackframesize = stackparamcnt + paramshiftcnt;
-
- /* keep stack 16-byte aligned */
- if (!(stackframesize & 0x1))
- stackframesize++;
-
- x86_64_alu_imm_reg(cd, X86_64_SUB, stackframesize * 8, REG_SP);
-
- /* shift integer arguments if required */
-
- if ((m->flags & ACC_STATIC) && iargs > (INT_ARG_CNT - 2)) {
- /* do we need to shift 2 arguments? */
- if (iargs > (INT_ARG_CNT - 1))
- x86_64_mov_reg_membase(cd, rd->argintregs[5], REG_SP, 1 * 8);
+ M_ALD(REG_ITMP3, REG_SP, md->paramcount * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[4], REG_SP, 0 * 8);
+ /* copy or spill arguments to new locations */
- } else if (iargs > (INT_ARG_CNT - 1)) {
- x86_64_mov_reg_membase(cd, rd->argintregs[5], REG_SP, 0 * 8);
- }
+ for (i = md->paramcount - 1, j = i + nativeparams; i >= 0; i--, j--) {
+ t = md->paramtypes[i].type;
+ s2 = nmd->params[j].regoff;
- /* copy stack arguments into new stack frame -- if any */
- for (i = 0; i < stackparamcnt; i++) {
- x86_64_mov_membase_reg(cd, REG_SP, (stackframesize + 1 + i) * 8, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, (paramshiftcnt + i) * 8);
- }
+ if (IS_INT_LNG_TYPE(t)) {
+ if (!md->params[i].inmemory) {
+ s1 = md->params[i].regoff;
- } else {
- /* keep stack 16-byte aligned */
- x86_64_alu_imm_reg(cd, X86_64_SUB, 1 * 8, REG_SP);
- stackframesize = 1;
- }
-
- /* shift integer arguments for `env' and `class' arguments */
-
- if (m->flags & ACC_STATIC) {
- /* shift iargs count if less than INT_ARG_CNT, or all */
- for (i = (iargs < (INT_ARG_CNT - 2)) ? iargs : (INT_ARG_CNT - 2); i >= 0; i--) {
- x86_64_mov_reg_reg(cd, rd->argintregs[i], rd->argintregs[i + 2]);
+ if (!nmd->params[j].inmemory)
+ M_INTMOVE(s1, s2);
+ else
+ M_LST(s1, REG_SP, s2);
+ }
+ else {
+ s1 = md->params[i].regoff + cd->stackframesize * 8 + 8;/* +1 (RA) */
+ M_LLD(REG_ITMP1, REG_SP, s1);
+ M_LST(REG_ITMP1, REG_SP, s2);
+ }
}
+ else {
+ /* We only copy spilled float arguments, as the float
+ argument registers keep unchanged. */
- /* put class into second argument register */
- x86_64_mov_imm_reg(cd, (u8) m->class, rd->argintregs[1]);
+ if (md->params[i].inmemory) {
+ s1 = md->params[i].regoff + cd->stackframesize * 8 + 8;/* +1 (RA) */
- } else {
- /* shift iargs count if less than INT_ARG_CNT, or all */
- for (i = (iargs < (INT_ARG_CNT - 1)) ? iargs : (INT_ARG_CNT - 1); i >= 0; i--) {
- x86_64_mov_reg_reg(cd, rd->argintregs[i], rd->argintregs[i + 1]);
+ if (IS_2_WORD_TYPE(t)) {
+ M_DLD(REG_FTMP1, REG_SP, s1);
+ M_DST(REG_FTMP1, REG_SP, s2);
+ }
+ else {
+ M_FLD(REG_FTMP1, REG_SP, s1);
+ M_FST(REG_FTMP1, REG_SP, s2);
+ }
+ }
}
}
- /* put env into first argument register */
- x86_64_mov_imm_reg(cd, (u8) &env, rd->argintregs[0]);
+ /* put class into second argument register */
- /* do the native function call */
- x86_64_mov_imm_reg(cd, (u8) f, REG_ITMP1);
-#if !defined(STATIC_CLASSPATH)
- if (f == NULL)
- (*callAddrPatchPos) = cd->mcodeptr - 8;
-#endif
- x86_64_call_reg(cd, REG_ITMP1);
+ if (m->flags & ACC_STATIC)
+ M_MOV_IMM(m->class, REG_A1);
- /* remove stackframe if there is one */
- if (stackframesize) {
- x86_64_alu_imm_reg(cd, X86_64_ADD, stackframesize * 8, REG_SP);
- }
-
- if (runverbose) {
- x86_64_alu_imm_reg(cd, X86_64_SUB, 3 * 8, REG_SP); /* keep stack 16-byte aligned */
-
- x86_64_mov_reg_membase(cd, REG_RESULT, REG_SP, 0 * 8);
- x86_64_movq_reg_membase(cd, REG_FRESULT, REG_SP, 1 * 8);
-
- x86_64_mov_imm_reg(cd, (u8) m, rd->argintregs[0]);
- x86_64_mov_reg_reg(cd, REG_RESULT, rd->argintregs[1]);
- M_FLTMOVE(REG_FRESULT, rd->argfltregs[0]);
- M_FLTMOVE(REG_FRESULT, rd->argfltregs[1]);
+ /* put env into first argument register */
- x86_64_mov_imm_reg(cd, (u8) builtin_displaymethodstop, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
+ M_MOV_IMM(_Jv_env, REG_A0);
- x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_RESULT);
- x86_64_movq_membase_reg(cd, REG_SP, 1 * 8, REG_FRESULT);
+ /* do the native function call */
- x86_64_alu_imm_reg(cd, X86_64_ADD, 3 * 8, REG_SP); /* keep stack 16-byte aligned */
+ M_CALL(REG_ITMP3);
+
+ /* save return value */
+
+ switch (md->returntype.type) {
+ case TYPE_INT:
+ case TYPE_LNG:
+ case TYPE_ADR:
+ M_LST(REG_RESULT, REG_SP, 0 * 8);
+ break;
+ case TYPE_FLT:
+ case TYPE_DBL:
+ M_DST(REG_FRESULT, REG_SP, 0 * 8);
+ break;
+ case TYPE_VOID:
+ break;
}
- /* check for exception */
-
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- x86_64_push_reg(cd, REG_RESULT);
-/* x86_64_call_mem(cd, (u8) &callgetexceptionptrptr); */
- x86_64_mov_imm_reg(cd, (u8) builtin_get_exceptionptrptr, REG_ITMP3);
- x86_64_call_reg(cd, REG_ITMP3);
- x86_64_mov_membase_reg(cd, REG_RESULT, 0, REG_ITMP3);
- x86_64_pop_reg(cd, REG_RESULT);
-#else
- x86_64_mov_imm_reg(cd, (u8) &_exceptionptr, REG_ITMP3);
- x86_64_mov_membase_reg(cd, REG_ITMP3, 0, REG_ITMP3);
-#endif
- x86_64_test_reg_reg(cd, REG_ITMP3, REG_ITMP3);
- x86_64_jcc(cd, X86_64_CC_NE, 1);
-
- x86_64_ret(cd);
-
- /* handle exception */
-
-#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- x86_64_push_reg(cd, REG_ITMP3);
-/* x86_64_call_mem(cd, (u8) &callgetexceptionptrptr); */
- x86_64_mov_imm_reg(cd, (u8) builtin_get_exceptionptrptr, REG_ITMP3);
- x86_64_call_reg(cd, REG_ITMP3);
- x86_64_mov_imm_membase(cd, 0, REG_RESULT, 0);
- x86_64_pop_reg(cd, REG_ITMP1_XPTR);
-#else
- x86_64_mov_reg_reg(cd, REG_ITMP3, REG_ITMP1_XPTR);
- x86_64_mov_imm_reg(cd, (u8) &_exceptionptr, REG_ITMP3);
- x86_64_alu_reg_reg(cd, X86_64_XOR, REG_ITMP2, REG_ITMP2);
- x86_64_mov_reg_membase(cd, REG_ITMP2, REG_ITMP3, 0); /* clear exception pointer */
+#if !defined(NDEBUG)
+ if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
+ emit_verbosecall_exit(jd);
#endif
- x86_64_mov_membase_reg(cd, REG_SP, 0, REG_ITMP2_XPC); /* get return address from stack */
- x86_64_alu_imm_reg(cd, X86_64_SUB, 3, REG_ITMP2_XPC); /* callq */
-
- x86_64_mov_imm_reg(cd, (u8) asm_handle_nat_exception, REG_ITMP3);
- x86_64_jmp_reg(cd, REG_ITMP3);
-
-
- /* patch in a clinit call if required *************************************/
-
- {
- u1 *xcodeptr;
- clinitref *cref;
- codegendata *tmpcd;
- u1 xmcode;
- u4 mcode;
-
- tmpcd = DNEW(codegendata);
-
- /* there can only be one clinit ref entry */
- cref = cd->clinitrefs;
-
- if (cref) {
- /* Get machine code which is patched back in later. A */
- /* `call rel32' is 5 bytes long. */
- xcodeptr = cd->mcodebase + cref->branchpos;
- xmcode = *xcodeptr;
- mcode = *((u4 *) (xcodeptr + 1));
-
- /* patch in `call rel32' to call the following code */
- tmpcd->mcodeptr = xcodeptr; /* set dummy mcode pointer */
- x86_64_call_imm(tmpcd, cd->mcodeptr - (xcodeptr + 5));
-
- /* Push machine code bytes to patch onto the stack. */
- x86_64_push_imm(cd, (u1) xmcode);
- x86_64_push_imm(cd, (u4) mcode);
-
- x86_64_push_imm(cd, (u8) cref->class);
-
- x86_64_mov_imm_reg(cd, (u8) asm_check_clinit, REG_ITMP1);
- x86_64_jmp_reg(cd, REG_ITMP1);
- }
+ /* remove native stackframe info */
+
+ M_ALEA(REG_SP, cd->stackframesize * 8, REG_A0);
+ M_MOV_IMM(codegen_finish_native_call, REG_ITMP1);
+ M_CALL(REG_ITMP1);
+ M_MOV(REG_RESULT, REG_ITMP3);
+
+ /* restore return value */
+
+ switch (md->returntype.type) {
+ case TYPE_INT:
+ case TYPE_LNG:
+ case TYPE_ADR:
+ M_LLD(REG_RESULT, REG_SP, 0 * 8);
+ break;
+ case TYPE_FLT:
+ case TYPE_DBL:
+ M_DLD(REG_FRESULT, REG_SP, 0 * 8);
+ break;
+ case TYPE_VOID:
+ break;
}
- /* Check if the stub size is big enough to hold the whole stub generated. */
- /* If not, this can lead into unpredictable crashes, because of heap */
- /* corruption. */
- if ((s4) (cd->mcodeptr - s) > NATIVESTUBSIZE) {
- throw_cacao_exception_exit(string_java_lang_InternalError,
- "Native stub size %d is to small for current stub size %d",
- NATIVESTUBSIZE, (s4) (cd->mcodeptr - s));
- }
+ /* remove stackframe */
-#if defined(STATISTICS)
- if (opt_stat)
- count_nstub_len += NATIVESTUBSIZE;
-#endif
+ M_AADD_IMM(cd->stackframesize * 8, REG_SP);
- /* release dump area */
+ /* test for exception */
- dump_release(dumpsize);
+ M_TEST(REG_ITMP3);
+ M_BNE(1);
+ M_RET;
- return s;
-}
+ /* handle exception */
+ M_MOV(REG_ITMP3, REG_ITMP1_XPTR);
+ M_ALD(REG_ITMP2_XPC, REG_SP, 0 * 8); /* get return address from stack */
+ M_ASUB_IMM(3, REG_ITMP2_XPC); /* callq */
-/* function: removenativestub **************************************************
+ M_MOV_IMM(asm_handle_nat_exception, REG_ITMP3);
+ M_JMP(REG_ITMP3);
- removes a previously created native-stub from memory
-
-*******************************************************************************/
+ /* generate patcher stubs */
-void removenativestub(u1 *stub)
-{
- CFREE(stub, NATIVESTUBSIZE);
+ emit_patcher_stubs(jd);
}
* c-basic-offset: 4
* tab-width: 4
* End:
+ * vim:noexpandtab:sw=4:ts=4:
*/