-/* jit/x86_64/codegen.c - machine code generator for x86_64
+/* vm/jit/x86_64/codegen.c - machine code generator for x86_64
- Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
- Institut f. Computersprachen, TU Wien
- R. Grafl, A. Krall, C. Kruegel, C. Oates, R. Obermaisser, M. Probst,
- S. Ring, E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich,
- J. Wenninger
+ Copyright (C) 1996-2005 R. Grafl, A. Krall, C. Kruegel, C. Oates,
+ R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner,
+ C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich, J. Wenninger,
+ Institut f. Computersprachen - TU Wien
This file is part of CACAO.
Authors: Andreas Krall
Christian Thalinger
- $Id: codegen.c 1641 2004-12-01 13:13:31Z christian $
+ $Id: codegen.c 2179 2005-04-01 13:28:16Z twisti $
*/
#include <stdio.h>
#include <ucontext.h>
+#include "cacao/cacao.h"
#include "native/native.h"
-/* #include "native/jni.h" */
#include "vm/global.h"
#include "vm/builtin.h"
#include "vm/loader.h"
#include "vm/jit/x86_64/codegen.h"
#include "vm/jit/x86_64/emitfuncs.h"
#include "vm/jit/x86_64/types.h"
+#include "vm/jit/x86_64/asmoffsets.h"
/* register descripton - array ************************************************/
static int nregdescfloat[] = {
-/* REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_TMP, REG_TMP, REG_TMP, REG_TMP, */
-/* REG_RES, REG_RES, REG_RES, REG_SAV, REG_SAV, REG_SAV, REG_SAV, REG_SAV, */
- REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_TMP, REG_TMP, REG_TMP, REG_TMP,
+ REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_ARG, REG_ARG,
REG_RES, REG_RES, REG_RES, REG_TMP, REG_TMP, REG_TMP, REG_TMP, REG_TMP,
REG_END
};
-/*******************************************************************************
-
- include independent code generation stuff -- include after register
- descriptions to avoid extern definitions
-
-*******************************************************************************/
+/* Include independent code generation stuff -- include after register */
+/* descriptions to avoid extern definitions. */
#include "vm/jit/codegen.inc"
#include "vm/jit/reg.inc"
void catch_NullPointerException(int sig, siginfo_t *siginfo, void *_p)
{
sigset_t nsig;
- /* int instr; */
- /* long faultaddr; */
struct ucontext *_uc = (struct ucontext *) _p;
struct sigcontext *sigctx = (struct sigcontext *) &_uc->uc_mcontext;
java_objectheader *xptr;
/* Reset signal handler - necessary for SysV, does no harm for BSD */
-
-/* instr = *((int*)(sigctx->rip)); */
-/* faultaddr = sigctx->sc_regs[(instr >> 16) & 0x1f]; */
-
-/* if (faultaddr == 0) { */
- act.sa_sigaction = (functionptr) catch_NullPointerException; /* reinstall handler */
+ act.sa_sigaction = catch_NullPointerException; /* reinstall handler */
act.sa_flags = SA_SIGINFO;
sigaction(sig, &act, NULL);
sigctx->rip = (u8) asm_handle_exception;
return;
-
-/* } else { */
-/* faultaddr += (long) ((instr << 16) >> 16); */
-/* fprintf(stderr, "faulting address: 0x%08x\n", faultaddr); */
-/* panic("Stack overflow"); */
-/* } */
}
/* Reset signal handler - necessary for SysV, does no harm for BSD */
- act.sa_sigaction = (functionptr) catch_ArithmeticException; /* reinstall handler */
+ act.sa_sigaction = catch_ArithmeticException; /* reinstall handler */
act.sa_flags = SA_SIGINFO;
sigaction(sig, &act, NULL);
if (!checknull) {
#if defined(SIGSEGV)
- act.sa_sigaction = (functionptr) catch_NullPointerException;
+ act.sa_sigaction = catch_NullPointerException;
act.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV, &act, NULL);
#endif
#if defined(SIGBUS)
- act.sa_sigaction = (functionptr) catch_NullPointerException;
+ act.sa_sigaction = catch_NullPointerException;
act.sa_flags = SA_SIGINFO;
sigaction(SIGBUS, &act, NULL);
#endif
}
- act.sa_sigaction = (functionptr) catch_ArithmeticException;
+ act.sa_sigaction = catch_ArithmeticException;
act.sa_flags = SA_SIGINFO;
sigaction(SIGFPE, &act, NULL);
}
#endif
- /* keep stack 16-byte aligned for calls into native code e.g. libc or jni */
- /* (alignment problems with movaps) */
+ /* Keep stack of non-leaf functions 16-byte aligned for calls into native */
+ /* code e.g. libc or jni (alignment problems with movaps). */
- if (!(parentargs_base & 0x1)) {
- parentargs_base++;
+ if (!m->isleafmethod || runverbose) {
+ parentargs_base |= 0x1;
}
/* create method header */
x86_64_alu_imm_reg(cd, X86_64_SUB, parentargs_base * 8, REG_SP);
}
- /* save return address and used callee saved registers */
+ /* save used callee saved registers */
p = parentargs_base;
for (i = rd->savintregcnt - 1; i >= rd->maxsavintreguse; i--) {
p--; x86_64_movq_reg_membase(cd, rd->savfltregs[i], REG_SP, p * 8);
}
- /* save monitorenter argument */
-
-#if defined(USE_THREADS)
- if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
- if (m->flags & ACC_STATIC) {
- x86_64_mov_imm_reg(cd, (s8) m->class, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, rd->maxmemuse * 8);
-
- } else {
- x86_64_mov_reg_membase(cd, rd->argintregs[0], REG_SP, rd->maxmemuse * 8);
- }
- }
-#endif
-
- /* copy argument registers to stack and call trace function with pointer
- to arguments on stack.
- */
- if (runverbose) {
- x86_64_alu_imm_reg(cd, X86_64_SUB, (6 + 8 + 1 + 1) * 8, REG_SP);
-
- x86_64_mov_reg_membase(cd, rd->argintregs[0], REG_SP, 1 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[1], REG_SP, 2 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[2], REG_SP, 3 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[3], REG_SP, 4 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[4], REG_SP, 5 * 8);
- x86_64_mov_reg_membase(cd, rd->argintregs[5], REG_SP, 6 * 8);
-
- x86_64_movq_reg_membase(cd, rd->argfltregs[0], REG_SP, 7 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[1], REG_SP, 8 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[2], REG_SP, 9 * 8);
- x86_64_movq_reg_membase(cd, rd->argfltregs[3], REG_SP, 10 * 8);
-/* x86_64_movq_reg_membase(cd, rd->argfltregs[4], REG_SP, 11 * 8); */
-/* x86_64_movq_reg_membase(cd, rd->argfltregs[5], REG_SP, 12 * 8); */
-/* x86_64_movq_reg_membase(cd, rd->argfltregs[6], REG_SP, 13 * 8); */
-/* x86_64_movq_reg_membase(cd, rd->argfltregs[7], REG_SP, 14 * 8); */
-
- for (p = 0, l = 0; p < m->paramcount; p++) {
- t = m->paramtypes[p];
-
- if (IS_FLT_DBL_TYPE(t)) {
- for (s1 = (m->paramcount > INT_ARG_CNT) ? INT_ARG_CNT - 2 : m->paramcount - 2; s1 >= p; s1--) {
- x86_64_mov_reg_reg(cd, rd->argintregs[s1], rd->argintregs[s1 + 1]);
- }
-
- x86_64_movd_freg_reg(cd, rd->argfltregs[l], rd->argintregs[p]);
- l++;
- }
- }
-
- x86_64_mov_imm_reg(cd, (s8) m, REG_ITMP2);
- x86_64_mov_reg_membase(cd, REG_ITMP2, REG_SP, 0 * 8);
- x86_64_mov_imm_reg(cd, (s8) builtin_trace_args, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
-
- x86_64_mov_membase_reg(cd, REG_SP, 1 * 8, rd->argintregs[0]);
- x86_64_mov_membase_reg(cd, REG_SP, 2 * 8, rd->argintregs[1]);
- x86_64_mov_membase_reg(cd, REG_SP, 3 * 8, rd->argintregs[2]);
- x86_64_mov_membase_reg(cd, REG_SP, 4 * 8, rd->argintregs[3]);
- x86_64_mov_membase_reg(cd, REG_SP, 5 * 8, rd->argintregs[4]);
- x86_64_mov_membase_reg(cd, REG_SP, 6 * 8, rd->argintregs[5]);
-
- x86_64_movq_membase_reg(cd, REG_SP, 7 * 8, rd->argfltregs[0]);
- x86_64_movq_membase_reg(cd, REG_SP, 8 * 8, rd->argfltregs[1]);
- x86_64_movq_membase_reg(cd, REG_SP, 9 * 8, rd->argfltregs[2]);
- x86_64_movq_membase_reg(cd, REG_SP, 10 * 8, rd->argfltregs[3]);
-/* x86_64_movq_membase_reg(cd, REG_SP, 11 * 8, rd->argfltregs[4]); */
-/* x86_64_movq_membase_reg(cd, REG_SP, 12 * 8, rd->argfltregs[5]); */
-/* x86_64_movq_membase_reg(cd, REG_SP, 13 * 8, rd->argfltregs[6]); */
-/* x86_64_movq_membase_reg(cd, REG_SP, 14 * 8, rd->argfltregs[7]); */
-
- x86_64_alu_imm_reg(cd, X86_64_ADD, (6 + 8 + 1 + 1) * 8, REG_SP);
- }
-
/* take arguments out of register or stack frame */
for (p = 0, l = 0, s1 = 0, s2 = 0; p < m->paramcount; p++) {
if (s2 >= FLT_ARG_CNT) {
pa += s2 - FLT_ARG_CNT;
}
- if (!(var->flags & INMEMORY)) { /* stack arg -> register */
+ if (!(var->flags & INMEMORY)) { /* stack arg -> register */
x86_64_mov_membase_reg(cd, REG_SP, (parentargs_base + pa) * 8 + 8, var->regoff); /* + 8 for return address */
} else { /* stack arg -> spilled */
x86_64_mov_membase_reg(cd, REG_SP, (parentargs_base + pa) * 8 + 8, REG_ITMP1); /* + 8 for return address */
}
s1++;
- } else { /* floating args */
- if (s2 < FLT_ARG_CNT) { /* register arguments */
+ } else { /* floating args */
+ if (s2 < FLT_ARG_CNT) { /* register arguments */
if (!(var->flags & INMEMORY)) { /* reg arg -> register */
M_FLTMOVE(rd->argfltregs[s2], var->regoff);
}
} /* end for */
- /* call monitorenter function */
+ /* save monitorenter argument */
#if defined(USE_THREADS)
if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
- s8 func_enter = (m->flags & ACC_STATIC) ?
- (s8) builtin_staticmonitorenter : (s8) builtin_monitorenter;
+ u8 func_enter;
+
+ if (m->flags & ACC_STATIC) {
+ func_enter = (u8) builtin_staticmonitorenter;
+ x86_64_mov_imm_reg(cd, (s8) m->class, REG_ITMP1);
+ x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, rd->maxmemuse * 8);
+
+ } else {
+ func_enter = (u8) builtin_monitorenter;
+ x86_64_mov_reg_membase(cd, rd->argintregs[0], REG_SP, rd->maxmemuse * 8);
+ }
+
+ /* call monitorenter function */
+
x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, rd->argintregs[0]);
x86_64_mov_imm_reg(cd, func_enter, REG_ITMP1);
x86_64_call_reg(cd, REG_ITMP1);
- }
+ }
#endif
+
+ /* Copy argument registers to stack and call trace function with pointer */
+ /* to arguments on stack. */
+
+ if (runverbose) {
+ x86_64_alu_imm_reg(cd, X86_64_SUB, (INT_ARG_CNT + FLT_ARG_CNT + 1 + 1) * 8, REG_SP);
+
+ /* save integer argument registers */
+
+ for (p = 0; p < INT_ARG_CNT; p++) {
+ x86_64_mov_reg_membase(cd, rd->argintregs[p], REG_SP, (1 + p) * 8);
+ }
+
+ /* save float argument registers */
+
+ for (p = 0; p < FLT_ARG_CNT; p++) {
+ x86_64_movq_reg_membase(cd, rd->argfltregs[p], REG_SP, (1 + INT_ARG_CNT + p) * 8);
+ }
+
+ /* show integer hex code for float arguments */
+
+ for (p = 0, l = 0; p < m->paramcount && p < INT_ARG_CNT; p++) {
+ t = m->paramtypes[p];
+
+ /* if the paramtype is a float, we have to right shift all */
+ /* following integer registers */
+
+ if (IS_FLT_DBL_TYPE(t)) {
+ for (s1 = INT_ARG_CNT - 2; s1 >= p; s1--) {
+ x86_64_mov_reg_reg(cd, rd->argintregs[s1], rd->argintregs[s1 + 1]);
+ }
+
+ x86_64_movd_freg_reg(cd, rd->argfltregs[l], rd->argintregs[p]);
+ l++;
+ }
+ }
+
+ x86_64_mov_imm_reg(cd, (u8) m, REG_ITMP2);
+ x86_64_mov_reg_membase(cd, REG_ITMP2, REG_SP, 0 * 8);
+ x86_64_mov_imm_reg(cd, (u8) builtin_trace_args, REG_ITMP1);
+ x86_64_call_reg(cd, REG_ITMP1);
+
+ /* restore integer argument registers */
+
+ for (p = 0; p < INT_ARG_CNT; p++) {
+ x86_64_mov_membase_reg(cd, REG_SP, (1 + p) * 8, rd->argintregs[p]);
+ }
+
+ /* restore float argument registers */
+
+ for (p = 0; p < FLT_ARG_CNT; p++) {
+ x86_64_movq_membase_reg(cd, REG_SP, (1 + INT_ARG_CNT + p) * 8, rd->argfltregs[p]);
+ }
+
+ x86_64_alu_imm_reg(cd, X86_64_ADD, (6 + 8 + 1 + 1) * 8, REG_SP);
+ }
+
}
/* end of header generation */
src = bptr->instack;
len = bptr->indepth;
MCODECHECK(64 + len);
- while (src != NULL) {
+
+#ifdef LSRA
+ if (opt_lsra) {
+ while (src != NULL) {
+ len--;
+ if ((len == 0) && (bptr->type != BBTYPE_STD)) {
+ if (bptr->type == BBTYPE_SBR) {
+ /* d = reg_of_var(rd, src, REG_ITMP1); */
+ if (!(src->flags & INMEMORY))
+ d= src->regoff;
+ else
+ d=REG_ITMP1;
+ x86_64_pop_reg(cd, d);
+ store_reg_to_var_int(src, d);
+
+ } else if (bptr->type == BBTYPE_EXH) {
+ /* d = reg_of_var(rd, src, REG_ITMP1); */
+ if (!(src->flags & INMEMORY))
+ d= src->regoff;
+ else
+ d=REG_ITMP1;
+ M_INTMOVE(REG_ITMP1, d);
+ store_reg_to_var_int(src, d);
+ }
+ }
+ src = src->prev;
+ }
+
+ } else {
+#endif
+
+ while (src != NULL) {
len--;
if ((len == 0) && (bptr->type != BBTYPE_STD)) {
if (bptr->type == BBTYPE_SBR) {
}
src = src->prev;
}
-
+#ifdef LSRA
+ }
+#endif
/* walk through all instructions */
src = bptr->instack;
MCODECHECK(64); /* an instruction usually needs < 64 words */
switch (iptr->opc) {
+ case ICMD_INLINE_START: /* internal ICMDs */
+ case ICMD_INLINE_END:
+ break;
case ICMD_NOP: /* ... ==> ... */
break;
d = reg_of_var(rd, iptr->dst, REG_NULL);
if (iptr->dst->flags & INMEMORY) {
if (src->flags & INMEMORY) {
- if (x86_64_is_imm32(iptr->val.l)) {
+ if (IS_IMM32(iptr->val.l)) {
x86_64_imul_imm_membase_reg(cd, iptr->val.l, REG_SP, src->regoff * 8, REG_ITMP1);
} else {
x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, iptr->dst->regoff * 8);
} else {
- if (x86_64_is_imm32(iptr->val.l)) {
+ if (IS_IMM32(iptr->val.l)) {
x86_64_imul_imm_reg_reg(cd, iptr->val.l, src->regoff, REG_ITMP1);
} else {
} else {
if (src->flags & INMEMORY) {
- if (x86_64_is_imm32(iptr->val.l)) {
+ if (IS_IMM32(iptr->val.l)) {
x86_64_imul_imm_membase_reg(cd, iptr->val.l, REG_SP, src->regoff * 8, iptr->dst->regoff);
} else {
x86_64_alul_reg_reg(cd, X86_64_ADD, iptr->dst->regoff, iptr->dst->regoff);
} else {
- if (x86_64_is_imm32(iptr->val.l)) {
+ if (IS_IMM32(iptr->val.l)) {
x86_64_imul_imm_reg_reg(cd, iptr->val.l, src->regoff, iptr->dst->regoff); /* 4 cycles */
} else {
}
gen_div_check(src);
+ x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
+
x86_64_alul_imm_reg(cd, X86_64_CMP, 0x80000000, RAX); /* check as described in jvm spec */
x86_64_jcc(cd, X86_64_CC_NE, 2 + 4 + 6);
+
+
x86_64_alul_reg_reg(cd, X86_64_XOR, RDX, RDX); /* 2 bytes */
x86_64_alul_imm_reg(cd, X86_64_CMP, -1, REG_ITMP3); /* 4 bytes */
- x86_64_jcc(cd, X86_64_CC_E, 3 + 1 + 3); /* 6 bytes */
+ x86_64_jcc(cd, X86_64_CC_E, 1 + 3); /* 6 bytes */
- x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
x86_64_cltd(cd);
x86_64_idivl_reg(cd, REG_ITMP3);
}
gen_div_check(src);
+ x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
+
x86_64_mov_imm_reg(cd, 0x8000000000000000LL, REG_ITMP2); /* check as described in jvm spec */
x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, REG_ITMP1);
x86_64_jcc(cd, X86_64_CC_NE, 2 + 4 + 6);
+
+
x86_64_alul_reg_reg(cd, X86_64_XOR, RDX, RDX); /* 2 bytes */
x86_64_alu_imm_reg(cd, X86_64_CMP, -1, REG_ITMP3); /* 4 bytes */
- x86_64_jcc(cd, X86_64_CC_E, 3 + 2 + 3); /* 6 bytes */
+ x86_64_jcc(cd, X86_64_CC_E, 2 + 3); /* 6 bytes */
- x86_64_mov_reg_reg(cd, RDX, REG_ITMP2); /* save %rdx, cause it's an argument register */
x86_64_cqto(cd);
x86_64_idiv_reg(cd, REG_ITMP3);
gen_bound_check;
}
- if (x86_64_is_imm32(iptr->val.l)) {
+ if (IS_IMM32(iptr->val.l)) {
x86_64_mov_imm_memindex(cd, (u4) (iptr->val.l & 0x00000000ffffffff), OFFSET(java_longarray, data[0]), s1, s2, 3);
} else {
/* read patched code. Here we patch the following 5 nop's */
/* so that the real code keeps untouched. */
if (showdisassemble) {
- x86_64_nop(cd);
- x86_64_nop(cd);
- x86_64_nop(cd);
- x86_64_nop(cd);
- x86_64_nop(cd);
+ x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd);
+ x86_64_nop(cd); x86_64_nop(cd);
}
}
var_to_reg_flt(s2, src, REG_FTMP1);
x86_64_movsd_reg_membase(cd, s2, REG_ITMP2, 0);
break;
- default:
- throw_cacao_exception_exit(string_java_lang_InternalError,
- "Unknown PUTSTATIC operand type %d",
- iptr->op1);
+ }
+ break;
+
+ case ICMD_PUTSTATICCONST: /* ... ==> ... */
+ /* val = value (in current instruction) */
+ /* op1 = type, val.a = field address (in */
+ /* following NOP) */
+
+ /* If the static fields' class is not yet initialized, we do it */
+ /* now. The call code is generated later. */
+ if (!((fieldinfo *) iptr[1].val.a)->class->initialized) {
+ codegen_addclinitref(cd, cd->mcodeptr, ((fieldinfo *) iptr[1].val.a)->class);
+
+ /* This is just for debugging purposes. Is very difficult to */
+ /* read patched code. Here we patch the following 5 nop's */
+ /* so that the real code keeps untouched. */
+ if (showdisassemble) {
+ x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd);
+ x86_64_nop(cd); x86_64_nop(cd);
+ }
+ }
+
+ /* This approach is much faster than moving the field address */
+ /* inline into a register. */
+ a = dseg_addaddress(cd, &(((fieldinfo *) iptr[1].val.a)->value));
+ x86_64_mov_membase_reg(cd, RIP, -(((ptrint) cd->mcodeptr + 7) - (ptrint) cd->mcodebase) + a, REG_ITMP1);
+ switch (iptr->op1) {
+ case TYPE_INT:
+ case TYPE_FLT:
+ x86_64_movl_imm_membase(cd, iptr->val.i, REG_ITMP1, 0);
+ break;
+ case TYPE_LNG:
+ case TYPE_ADR:
+ case TYPE_DBL:
+ if (IS_IMM32(iptr->val.l)) {
+ x86_64_mov_imm_membase(cd, iptr->val.l, REG_ITMP1, 0);
+ } else {
+ x86_64_movl_imm_membase(cd, iptr->val.l, REG_ITMP1, 0);
+ x86_64_movl_imm_membase(cd, iptr->val.l >> 32, REG_ITMP1, 4);
+ }
+ break;
}
break;
/* read patched code. Here we patch the following 5 nop's */
/* so that the real code keeps untouched. */
if (showdisassemble) {
- x86_64_nop(cd);
- x86_64_nop(cd);
- x86_64_nop(cd);
- x86_64_nop(cd);
- x86_64_nop(cd);
+ x86_64_nop(cd); x86_64_nop(cd); x86_64_nop(cd);
+ x86_64_nop(cd); x86_64_nop(cd);
}
}
x86_64_movsd_membase_reg(cd, REG_ITMP2, 0, d);
store_reg_to_var_flt(iptr->dst, d);
break;
- default:
- throw_cacao_exception_exit(string_java_lang_InternalError,
- "Unknown GETSTATIC operand type %d",
- iptr->op1);
}
break;
- case ICMD_PUTFIELD: /* ..., value ==> ... */
+ case ICMD_PUTFIELD: /* ..., objectref, value ==> ... */
/* op1 = type, val.i = field offset */
a = ((fieldinfo *)(iptr->val.a))->offset;
var_to_reg_int(s1, src->prev, REG_ITMP1);
+ gen_nullptr_check(s1);
switch (iptr->op1) {
case TYPE_INT:
var_to_reg_int(s2, src, REG_ITMP2);
- gen_nullptr_check(s1);
x86_64_movl_reg_membase(cd, s2, s1, a);
break;
case TYPE_LNG:
case TYPE_ADR:
var_to_reg_int(s2, src, REG_ITMP2);
- gen_nullptr_check(s1);
x86_64_mov_reg_membase(cd, s2, s1, a);
break;
case TYPE_FLT:
var_to_reg_flt(s2, src, REG_FTMP2);
- gen_nullptr_check(s1);
x86_64_movss_reg_membase(cd, s2, s1, a);
break;
case TYPE_DBL:
var_to_reg_flt(s2, src, REG_FTMP2);
- gen_nullptr_check(s1);
x86_64_movsd_reg_membase(cd, s2, s1, a);
break;
- default:
- throw_cacao_exception_exit(string_java_lang_InternalError,
- "Unknown PUTFIELD operand type %d",
- iptr->op1);
+ }
+ break;
+
+ case ICMD_PUTFIELDCONST: /* ..., objectref, value ==> ... */
+ /* val = value (in current instruction) */
+ /* op1 = type, val.a = field address (in */
+ /* following NOP) */
+
+ a = ((fieldinfo *) iptr[1].val.a)->offset;
+ var_to_reg_int(s1, src, REG_ITMP1);
+ gen_nullptr_check(s1);
+ switch (iptr->op1) {
+ case TYPE_INT:
+ case TYPE_FLT:
+ x86_64_movl_imm_membase(cd, iptr->val.i, s1, a);
+ break;
+ case TYPE_LNG:
+ case TYPE_ADR:
+ case TYPE_DBL:
+ if (IS_IMM32(iptr->val.l)) {
+ x86_64_mov_imm_membase(cd, iptr->val.l, s1, a);
+ } else {
+ x86_64_movl_imm_membase(cd, iptr->val.l, s1, a);
+ x86_64_movl_imm_membase(cd, iptr->val.l >> 32, s1, a + 4);
+ }
+ break;
}
break;
a = ((fieldinfo *)(iptr->val.a))->offset;
var_to_reg_int(s1, src, REG_ITMP1);
+ gen_nullptr_check(s1);
switch (iptr->op1) {
case TYPE_INT:
d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- gen_nullptr_check(s1);
x86_64_movl_membase_reg(cd, s1, a, d);
store_reg_to_var_int(iptr->dst, d);
break;
case TYPE_LNG:
case TYPE_ADR:
d = reg_of_var(rd, iptr->dst, REG_ITMP1);
- gen_nullptr_check(s1);
x86_64_mov_membase_reg(cd, s1, a, d);
store_reg_to_var_int(iptr->dst, d);
break;
case TYPE_FLT:
d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- gen_nullptr_check(s1);
x86_64_movss_membase_reg(cd, s1, a, d);
store_reg_to_var_flt(iptr->dst, d);
break;
case TYPE_DBL:
d = reg_of_var(rd, iptr->dst, REG_FTMP1);
- gen_nullptr_check(s1);
x86_64_movsd_membase_reg(cd, s1, a, d);
store_reg_to_var_flt(iptr->dst, d);
break;
- default:
- throw_cacao_exception_exit(string_java_lang_InternalError,
- "Unknown GETFIELD operand type %d",
- iptr->op1);
}
break;
x86_64_mov_imm_reg(cd, (s8) asm_handle_exception, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
- ALIGNCODENOP;
break;
case ICMD_GOTO: /* ... ==> ... */
x86_64_jmp_imm(cd, 0);
codegen_addreference(cd, BlockPtrOfPC(iptr->op1), cd->mcodeptr);
- ALIGNCODENOP;
break;
case ICMD_JSR: /* ... ==> ... */
var_to_reg_int(s1, src, REG_ITMP1);
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- s3 = iptr->val.i;
if (iptr[1].opc == ICMD_ELSE_ICONST) {
if (s1 == d) {
M_INTMOVE(s1, REG_ITMP1);
}
x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
}
- x86_64_movl_imm_reg(cd, s3, REG_ITMP2);
+ x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
x86_64_testl_reg_reg(cd, s1, s1);
x86_64_cmovccl_reg_reg(cd, X86_64_CC_E, REG_ITMP2, d);
store_reg_to_var_int(iptr->dst, d);
var_to_reg_int(s1, src, REG_ITMP1);
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- s3 = iptr->val.i;
if (iptr[1].opc == ICMD_ELSE_ICONST) {
if (s1 == d) {
M_INTMOVE(s1, REG_ITMP1);
}
x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
}
- x86_64_movl_imm_reg(cd, s3, REG_ITMP2);
+ x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
x86_64_testl_reg_reg(cd, s1, s1);
x86_64_cmovccl_reg_reg(cd, X86_64_CC_NE, REG_ITMP2, d);
store_reg_to_var_int(iptr->dst, d);
var_to_reg_int(s1, src, REG_ITMP1);
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- s3 = iptr->val.i;
if (iptr[1].opc == ICMD_ELSE_ICONST) {
if (s1 == d) {
M_INTMOVE(s1, REG_ITMP1);
}
x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
}
- x86_64_movl_imm_reg(cd, s3, REG_ITMP2);
+ x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
x86_64_testl_reg_reg(cd, s1, s1);
x86_64_cmovccl_reg_reg(cd, X86_64_CC_L, REG_ITMP2, d);
store_reg_to_var_int(iptr->dst, d);
var_to_reg_int(s1, src, REG_ITMP1);
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- s3 = iptr->val.i;
if (iptr[1].opc == ICMD_ELSE_ICONST) {
if (s1 == d) {
M_INTMOVE(s1, REG_ITMP1);
}
x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
}
- x86_64_movl_imm_reg(cd, s3, REG_ITMP2);
+ x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
x86_64_testl_reg_reg(cd, s1, s1);
x86_64_cmovccl_reg_reg(cd, X86_64_CC_GE, REG_ITMP2, d);
store_reg_to_var_int(iptr->dst, d);
var_to_reg_int(s1, src, REG_ITMP1);
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- s3 = iptr->val.i;
if (iptr[1].opc == ICMD_ELSE_ICONST) {
if (s1 == d) {
M_INTMOVE(s1, REG_ITMP1);
}
x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
}
- x86_64_movl_imm_reg(cd, s3, REG_ITMP2);
+ x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
x86_64_testl_reg_reg(cd, s1, s1);
x86_64_cmovccl_reg_reg(cd, X86_64_CC_G, REG_ITMP2, d);
store_reg_to_var_int(iptr->dst, d);
var_to_reg_int(s1, src, REG_ITMP1);
d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- s3 = iptr->val.i;
if (iptr[1].opc == ICMD_ELSE_ICONST) {
if (s1 == d) {
M_INTMOVE(s1, REG_ITMP1);
}
x86_64_movl_imm_reg(cd, iptr[1].val.i, d);
}
- x86_64_movl_imm_reg(cd, s3, REG_ITMP2);
+ x86_64_movl_imm_reg(cd, iptr->val.i, REG_ITMP2);
x86_64_testl_reg_reg(cd, s1, s1);
x86_64_cmovccl_reg_reg(cd, X86_64_CC_LE, REG_ITMP2, d);
store_reg_to_var_int(iptr->dst, d);
var_to_reg_int(s1, src, REG_RESULT);
M_INTMOVE(s1, REG_RESULT);
-#if defined(USE_THREADS)
- if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
- x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, rd->argintregs[0]);
- x86_64_mov_reg_membase(cd, REG_RESULT, REG_SP, rd->maxmemuse * 8);
- x86_64_mov_imm_reg(cd, (u8) builtin_monitorexit, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
- x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, REG_RESULT);
- }
-#endif
-
goto nowperformreturn;
case ICMD_FRETURN: /* ..., retvalue ==> ... */
var_to_reg_flt(s1, src, REG_FRESULT);
M_FLTMOVE(s1, REG_FRESULT);
-#if defined(USE_THREADS)
- if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
- x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, rd->argintregs[0]);
- x86_64_movq_reg_membase(cd, REG_FRESULT, REG_SP, rd->maxmemuse * 8);
- x86_64_mov_imm_reg(cd, (u8) builtin_monitorexit, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
- x86_64_movq_membase_reg(cd, REG_SP, rd->maxmemuse * 8, REG_FRESULT);
- }
-#endif
-
goto nowperformreturn;
case ICMD_RETURN: /* ... ==> ... */
-#if defined(USE_THREADS)
- if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
- x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, rd->argintregs[0]);
- x86_64_mov_imm_reg(cd, (u8) builtin_monitorexit, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
- }
-#endif
-
nowperformreturn:
{
s4 i, p;
x86_64_mov_reg_membase(cd, REG_RESULT, REG_SP, 0 * 8);
x86_64_movq_reg_membase(cd, REG_FRESULT, REG_SP, 1 * 8);
- x86_64_mov_imm_reg(cd, (s8) m, rd->argintregs[0]);
+ x86_64_mov_imm_reg(cd, (u8) m, rd->argintregs[0]);
x86_64_mov_reg_reg(cd, REG_RESULT, rd->argintregs[1]);
M_FLTMOVE(REG_FRESULT, rd->argfltregs[0]);
M_FLTMOVE(REG_FRESULT, rd->argfltregs[1]);
- x86_64_mov_imm_reg(cd, (s8) builtin_displaymethodstop, REG_ITMP1);
+ x86_64_mov_imm_reg(cd, (u8) builtin_displaymethodstop, REG_ITMP1);
x86_64_call_reg(cd, REG_ITMP1);
x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_RESULT);
x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
}
+#if defined(USE_THREADS)
+ if (checksync && (m->flags & ACC_SYNCHRONIZED)) {
+ x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, rd->argintregs[0]);
+
+ /* we need to save the proper return value */
+ switch (iptr->opc) {
+ case ICMD_IRETURN:
+ case ICMD_ARETURN:
+ case ICMD_LRETURN:
+ x86_64_mov_reg_membase(cd, REG_RESULT, REG_SP, rd->maxmemuse * 8);
+ break;
+ case ICMD_FRETURN:
+ case ICMD_DRETURN:
+ x86_64_movq_reg_membase(cd, REG_FRESULT, REG_SP, rd->maxmemuse * 8);
+ break;
+ }
+
+ x86_64_mov_imm_reg(cd, (u8) builtin_monitorexit, REG_ITMP1);
+ x86_64_call_reg(cd, REG_ITMP1);
+
+ /* and now restore the proper return value */
+ switch (iptr->opc) {
+ case ICMD_IRETURN:
+ case ICMD_ARETURN:
+ case ICMD_LRETURN:
+ x86_64_mov_membase_reg(cd, REG_SP, rd->maxmemuse * 8, REG_RESULT);
+ break;
+ case ICMD_FRETURN:
+ case ICMD_DRETURN:
+ x86_64_movq_membase_reg(cd, REG_SP, rd->maxmemuse * 8, REG_FRESULT);
+ break;
+ }
+ }
+#endif
+
/* restore saved registers */
for (i = rd->savintregcnt - 1; i >= rd->maxsavintreguse; i--) {
p--; x86_64_mov_membase_reg(cd, REG_SP, p * 8, rd->savintregs[i]);
}
x86_64_ret(cd);
- ALIGNCODENOP;
}
break;
dseg_adddata(cd, cd->mcodeptr);
x86_64_mov_memindex_reg(cd, -(cd->dseglen), REG_ITMP2, REG_ITMP1, 3, REG_ITMP1);
x86_64_jmp_reg(cd, REG_ITMP1);
- ALIGNCODENOP;
}
break;
tptr = (void **) iptr->target;
codegen_addreference(cd, (basicblock *) tptr[0], cd->mcodeptr);
-
- ALIGNCODENOP;
}
break;
/* op1 = arg count, val.a = method pointer */
case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
- /* op1 = arg count, val.a = method pointer */
-
- case ICMD_INVOKEVIRTUAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
- /* op1 = arg count, val.a = method pointer */
-
- case ICMD_INVOKEINTERFACE:/*.., objectref, [arg1, [arg2 ...]] ==> ... */
- /* op1 = arg count, val.a = method pointer */
+ case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
+ case ICMD_INVOKEINTERFACE:
s3 = iptr->op1;
iarg = 0;
farg = 0;
- /* copy arguments to registers or stack location */
+ /* copy arguments to registers or stack location ******************/
+
+ /* count integer and float arguments */
+
for (; --s3 >= 0; src = src->prev) {
IS_INT_LNG_TYPE(src->type) ? iarg++ : farg++;
}
src = tmpsrc;
s3 = s2;
- s2 = (iarg > INT_ARG_CNT) ? iarg - INT_ARG_CNT : 0 + (farg > FLT_ARG_CNT) ? farg - FLT_ARG_CNT : 0;
+ /* calculate amount of arguments to be on stack */
+
+ s2 = (iarg > INT_ARG_CNT) ? iarg - INT_ARG_CNT : 0 +
+ (farg > FLT_ARG_CNT) ? farg - FLT_ARG_CNT : 0;
for (; --s3 >= 0; src = src->prev) {
+ /* decrement the current argument type */
IS_INT_LNG_TYPE(src->type) ? iarg-- : farg--;
+
if (src->varkind == ARGVAR) {
if (IS_INT_LNG_TYPE(src->type)) {
if (iarg >= INT_ARG_CNT) {
lm = iptr->val.a;
switch (iptr->opc) {
- case ICMD_BUILTIN3:
- case ICMD_BUILTIN2:
- case ICMD_BUILTIN1:
-
- a = (s8) lm;
- d = iptr->op1;
-
- x86_64_mov_imm_reg(cd, a, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
- break;
-
- case ICMD_INVOKESTATIC:
-
- a = (s8) lm->stubroutine;
- d = lm->returntype;
+ case ICMD_BUILTIN3:
+ case ICMD_BUILTIN2:
+ case ICMD_BUILTIN1:
+ a = (s8) lm;
+ d = iptr->op1;
- x86_64_mov_imm_reg(cd, a, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
- break;
-
- case ICMD_INVOKESPECIAL:
-
- a = (s8) lm->stubroutine;
- d = lm->returntype;
+ x86_64_mov_imm_reg(cd, a, REG_ITMP1);
+ x86_64_call_reg(cd, REG_ITMP1);
+ break;
- gen_nullptr_check(rd->argintregs[0]); /* first argument contains pointer */
- x86_64_mov_membase_reg(cd, rd->argintregs[0], 0, REG_ITMP2); /* access memory for hardware nullptr */
- x86_64_mov_imm_reg(cd, a, REG_ITMP2);
- x86_64_call_reg(cd, REG_ITMP2);
- break;
+ case ICMD_INVOKESTATIC:
+ a = (s8) lm->stubroutine;
+ d = lm->returntype;
- case ICMD_INVOKEVIRTUAL:
+ x86_64_mov_imm_reg(cd, a, REG_ITMP2);
+ x86_64_call_reg(cd, REG_ITMP2);
+ break;
- d = lm->returntype;
+ case ICMD_INVOKESPECIAL:
+ a = (s8) lm->stubroutine;
+ d = lm->returntype;
- gen_nullptr_check(rd->argintregs[0]);
- x86_64_mov_membase_reg(cd, rd->argintregs[0], OFFSET(java_objectheader, vftbl), REG_ITMP2);
- x86_64_mov_membase32_reg(cd, REG_ITMP2, OFFSET(vftbl_t, table[0]) + sizeof(methodptr) * lm->vftblindex, REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
- break;
+ gen_nullptr_check(rd->argintregs[0]); /* first argument contains pointer */
+ x86_64_mov_membase_reg(cd, rd->argintregs[0], 0, REG_ITMP2); /* access memory for hardware nullptr */
+ x86_64_mov_imm_reg(cd, a, REG_ITMP2);
+ x86_64_call_reg(cd, REG_ITMP2);
+ break;
- case ICMD_INVOKEINTERFACE:
+ case ICMD_INVOKEVIRTUAL:
+ d = lm->returntype;
- ci = lm->class;
- d = lm->returntype;
+ gen_nullptr_check(rd->argintregs[0]);
+ x86_64_mov_membase_reg(cd, rd->argintregs[0], OFFSET(java_objectheader, vftbl), REG_ITMP2);
+ x86_64_mov_membase32_reg(cd, REG_ITMP2, OFFSET(vftbl_t, table[0]) + sizeof(methodptr) * lm->vftblindex, REG_ITMP1);
+ x86_64_call_reg(cd, REG_ITMP1);
+ break;
- gen_nullptr_check(rd->argintregs[0]);
- x86_64_mov_membase_reg(cd, rd->argintregs[0], OFFSET(java_objectheader, vftbl), REG_ITMP2);
- x86_64_mov_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, interfacetable[0]) - sizeof(methodptr) * ci->index, REG_ITMP2);
- x86_64_mov_membase32_reg(cd, REG_ITMP2, sizeof(methodptr) * (lm - ci->methods), REG_ITMP1);
- x86_64_call_reg(cd, REG_ITMP1);
- break;
+ case ICMD_INVOKEINTERFACE:
+ ci = lm->class;
+ d = lm->returntype;
- default:
- d = 0;
- error("Unkown ICMD-Command: %d", iptr->opc);
- }
+ gen_nullptr_check(rd->argintregs[0]);
+ x86_64_mov_membase_reg(cd, rd->argintregs[0], OFFSET(java_objectheader, vftbl), REG_ITMP2);
+ x86_64_mov_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, interfacetable[0]) - sizeof(methodptr) * ci->index, REG_ITMP2);
+ x86_64_mov_membase32_reg(cd, REG_ITMP2, sizeof(methodptr) * (lm - ci->methods), REG_ITMP1);
+ x86_64_call_reg(cd, REG_ITMP1);
+ break;
+ }
/* d contains return type */
*/
{
- classinfo *super = (classinfo*) iptr->val.a;
+ classinfo *super = (classinfo *) iptr->val.a;
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
codegen_threadcritrestart(cd, cd->mcodeptr - cd->mcodebase);
x86_64_jcc(cd, X86_64_CC_E, a);
x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP1);
- x86_64_mov_imm_reg(cd, (s8) super->vftbl, REG_ITMP2);
+ x86_64_mov_imm_reg(cd, (ptrint) super->vftbl, REG_ITMP2);
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
codegen_threadcritstart(cd, cd->mcodeptr - cd->mcodebase);
#endif
/* op1: 0 == array, 1 == class */
/* val.a: (classinfo*) superclass */
-/* superclass is an interface:
- *
- * OK if ((sub == NULL) ||
- * (sub->vftbl->interfacetablelength > super->index) &&
- * (sub->vftbl->interfacetable[-super->index] != NULL));
- *
- * superclass is a class:
- *
- * OK if ((sub == NULL) || (0
- * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
- * super->vftbl->diffvall));
- */
+ /* superclass is an interface:
+ *
+ * OK if ((sub == NULL) ||
+ * (sub->vftbl->interfacetablelength > super->index) &&
+ * (sub->vftbl->interfacetable[-super->index] != NULL));
+ *
+ * superclass is a class:
+ *
+ * OK if ((sub == NULL) || (0
+ * <= (sub->vftbl->baseval - super->vftbl->baseval) <=
+ * super->vftbl->diffval));
+ */
{
- classinfo *super = (classinfo*) iptr->val.a;
+ classinfo *super = (classinfo *) iptr->val.a;
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
codegen_threadcritrestart(cd, cd->mcodeptr - cd->mcodebase);
#endif
- d = reg_of_var(rd, iptr->dst, REG_ITMP3);
- var_to_reg_int(s1, src, d);
+ var_to_reg_int(s1, src, REG_ITMP1);
if (iptr->op1) { /* class/interface */
if (super->flags & ACC_INTERFACE) { /* interface */
x86_64_test_reg_reg(cd, s1, s1);
a = 3; /* mov_membase_reg */
CALCOFFSETBYTES(a, s1, OFFSET(java_objectheader, vftbl));
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength));
+ a += 3; /* movl_membase_reg - if REG_ITMP3 == R10 */
+ CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, interfacetablelength));
a += 3; /* sub */
CALCIMMEDIATEBYTES(a, super->index);
a += 6; /* jcc */
a += 3; /* mov_membase_reg */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*));
+ CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*));
a += 3; /* test */
a += 6; /* jcc */
x86_64_jcc(cd, X86_64_CC_E, a);
- x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP1);
- x86_64_movl_membase_reg(cd, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength), REG_ITMP2);
- x86_64_alu_imm_reg(cd, X86_64_SUB, super->index, REG_ITMP2);
- x86_64_test_reg_reg(cd, REG_ITMP2, REG_ITMP2);
+ x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP2);
+ x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, interfacetablelength), REG_ITMP3);
+ x86_64_alu_imm_reg(cd, X86_64_SUB, super->index, REG_ITMP3);
+ x86_64_test_reg_reg(cd, REG_ITMP3, REG_ITMP3);
x86_64_jcc(cd, X86_64_CC_LE, 0);
codegen_addxcastrefs(cd, cd->mcodeptr);
- x86_64_mov_membase_reg(cd, REG_ITMP1, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*), REG_ITMP2);
- x86_64_test_reg_reg(cd, REG_ITMP2, REG_ITMP2);
+ x86_64_mov_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, interfacetable[0]) - super->index * sizeof(methodptr*), REG_ITMP3);
+ x86_64_test_reg_reg(cd, REG_ITMP3, REG_ITMP3);
x86_64_jcc(cd, X86_64_CC_E, 0);
codegen_addxcastrefs(cd, cd->mcodeptr);
- } else { /* class */
+ } else { /* class */
x86_64_test_reg_reg(cd, s1, s1);
/* TODO: clean up this calculation */
- a = 3; /* mov_membase_reg */
+ a = 3; /* mov_membase_reg */
CALCOFFSETBYTES(a, s1, OFFSET(java_objectheader, vftbl));
a += 10; /* mov_imm_reg */
- a += 2; /* movl_membase_reg - only if REG_ITMP1 == RAX */
- CALCOFFSETBYTES(a, REG_ITMP1, OFFSET(vftbl_t, baseval));
+ a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
+ CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, baseval));
- if (d != REG_ITMP3) {
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, baseval));
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, diffval));
+ if (s1 != REG_ITMP1) {
+ a += 3; /* movl_membase_reg - only if REG_ITMP3 == R11 */
+ CALCOFFSETBYTES(a, REG_ITMP3, OFFSET(vftbl_t, baseval));
+ a += 3; /* movl_membase_reg - only if REG_ITMP3 == R11 */
+ CALCOFFSETBYTES(a, REG_ITMP3, OFFSET(vftbl_t, diffval));
a += 3; /* sub */
-
+
} else {
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, baseval));
+ a += 3; /* movl_membase_reg - only if REG_ITMP3 == R11 */
+ CALCOFFSETBYTES(a, REG_ITMP3, OFFSET(vftbl_t, baseval));
a += 3; /* sub */
a += 10; /* mov_imm_reg */
- a += 3; /* movl_membase_reg - only if REG_ITMP2 == R10 */
- CALCOFFSETBYTES(a, REG_ITMP2, OFFSET(vftbl_t, diffval));
+ a += 3; /* movl_membase_reg - only if REG_ITMP3 == R11 */
+ CALCOFFSETBYTES(a, REG_ITMP3, OFFSET(vftbl_t, diffval));
}
a += 3; /* cmp */
x86_64_jcc(cd, X86_64_CC_E, a);
- x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP1);
- x86_64_mov_imm_reg(cd, (s8) super->vftbl, REG_ITMP2);
+ x86_64_mov_membase_reg(cd, s1, OFFSET(java_objectheader, vftbl), REG_ITMP2);
+ x86_64_mov_imm_reg(cd, (ptrint) super->vftbl, REG_ITMP3);
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstart(cd, cd->mcodeptr - cd->mcodebase);
+ codegen_threadcritstart(cd, cd->mcodeptr - cd->mcodebase);
#endif
- x86_64_movl_membase_reg(cd, REG_ITMP1, OFFSET(vftbl_t, baseval), REG_ITMP1);
- if (d != REG_ITMP3) {
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, baseval), REG_ITMP3);
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, diffval), REG_ITMP2);
+ x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, baseval), REG_ITMP2);
+ if (s1 != REG_ITMP1) {
+ x86_64_movl_membase_reg(cd, REG_ITMP3, OFFSET(vftbl_t, baseval), REG_ITMP1);
+ x86_64_movl_membase_reg(cd, REG_ITMP3, OFFSET(vftbl_t, diffval), REG_ITMP3);
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
+ codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
#endif
- x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP3, REG_ITMP1);
+ x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP1, REG_ITMP2);
} else {
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, baseval), REG_ITMP2);
- x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP2, REG_ITMP1);
- x86_64_mov_imm_reg(cd, (s8) super->vftbl, REG_ITMP2);
- x86_64_movl_membase_reg(cd, REG_ITMP2, OFFSET(vftbl_t, diffval), REG_ITMP2);
+ x86_64_movl_membase_reg(cd, REG_ITMP3, OFFSET(vftbl_t, baseval), REG_ITMP3);
+ x86_64_alu_reg_reg(cd, X86_64_SUB, REG_ITMP3, REG_ITMP2);
+ x86_64_mov_imm_reg(cd, (ptrint) super->vftbl, REG_ITMP3);
+ x86_64_movl_membase_reg(cd, REG_ITMP3, OFFSET(vftbl_t, diffval), REG_ITMP3);
+ }
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
- codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
+ codegen_threadcritstop(cd, cd->mcodeptr - cd->mcodebase);
#endif
- }
- x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP2, REG_ITMP1);
+ x86_64_alu_reg_reg(cd, X86_64_CMP, REG_ITMP3, REG_ITMP2);
x86_64_jcc(cd, X86_64_CC_A, 0); /* (u) REG_ITMP1 > (u) REG_ITMP2 -> jump */
codegen_addxcastrefs(cd, cd->mcodeptr);
}
} else
panic("internal error: no inlined array checkcast");
}
+ d = reg_of_var(rd, iptr->dst, REG_ITMP3);
M_INTMOVE(s1, d);
store_reg_to_var_int(iptr->dst, d);
+/* if (iptr->dst->flags & INMEMORY) { */
+/* x86_64_mov_reg_membase(cd, s1, REG_SP, iptr->dst->regoff * 8); */
+/* } else { */
+/* M_INTMOVE(s1, iptr->dst->regoff); */
+/* } */
break;
case ICMD_CHECKASIZE: /* ..., size ==> ..., size */
x86_64_jcc(cd, X86_64_CC_L, 0);
codegen_addxcheckarefs(cd, cd->mcodeptr);
- /* copy sizes to stack (argument numbers >= INT_ARG_CNT) */
+ /* copy SAVEDVAR sizes to stack */
if (src->varkind != ARGVAR) {
- x86_64_mov_reg_membase(cd, s2, REG_SP, (s1 + INT_ARG_CNT) * 8);
+ x86_64_mov_reg_membase(cd, s2, REG_SP, s1 * 8);
}
}
x86_64_mov_imm_reg(cd, iptr->op1, rd->argintregs[0]);
/* a1 = arraydescriptor */
- x86_64_mov_imm_reg(cd, (s8) iptr->val.a, rd->argintregs[1]);
+ x86_64_mov_imm_reg(cd, (u8) iptr->val.a, rd->argintregs[1]);
/* a2 = pointer to dimensions = stack pointer */
x86_64_mov_reg_reg(cd, REG_SP, rd->argintregs[2]);
- x86_64_mov_imm_reg(cd, (s8) builtin_nmultianewarray, REG_ITMP1);
+ x86_64_mov_imm_reg(cd, (u8) builtin_nmultianewarray, REG_ITMP1);
x86_64_call_reg(cd, REG_ITMP1);
s1 = reg_of_var(rd, iptr->dst, REG_RESULT);
src = bptr->outstack;
len = bptr->outdepth;
MCODECHECK(64 + len);
+#ifdef LSRA
+ if (!opt_lsra)
+#endif
while (src) {
len--;
if ((src->varkind != STACKVAR)) {
bref->branchpos,
cd->mcodeptr - cd->mcodebase);
- MCODECHECK(50);
+ MCODECHECK(100);
/* move index register into REG_ITMP1 */
- x86_64_mov_reg_reg(cd, bref->reg, REG_ITMP1); /* 3 bytes */
+ x86_64_mov_reg_reg(cd, bref->reg, REG_ITMP1); /* 3 bytes */
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
+ x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
+ x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
if (xcodeptr != NULL) {
x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
x86_64_mov_reg_reg(cd, REG_ITMP1, rd->argintregs[0]);
- x86_64_mov_imm_reg(cd, (s8) new_arrayindexoutofboundsexception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) new_arrayindexoutofboundsexception, REG_ITMP3);
x86_64_call_reg(cd, REG_ITMP3);
x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
- x86_64_mov_imm_reg(cd, (s8) asm_handle_exception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
}
}
bref->branchpos,
cd->mcodeptr - cd->mcodebase);
- MCODECHECK(50);
+ MCODECHECK(100);
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
+ x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
+ x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
+ x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
if (xcodeptr != NULL) {
x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
- x86_64_mov_imm_reg(cd, (s8) new_negativearraysizeexception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) new_negativearraysizeexception, REG_ITMP3);
x86_64_call_reg(cd, REG_ITMP3);
x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
- x86_64_mov_imm_reg(cd, (s8) asm_handle_exception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
}
}
bref->branchpos,
cd->mcodeptr - cd->mcodebase);
- MCODECHECK(50);
+ MCODECHECK(100);
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
+ x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
+ x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
+ x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
if (xcodeptr != NULL) {
x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
- x86_64_mov_imm_reg(cd, (s8) new_classcastexception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) new_classcastexception, REG_ITMP3);
x86_64_call_reg(cd, REG_ITMP3);
x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
- x86_64_mov_imm_reg(cd, (s8) asm_handle_exception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
}
}
bref->branchpos,
cd->mcodeptr - cd->mcodebase);
- MCODECHECK(50);
+ MCODECHECK(100);
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
+ x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
+ x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP3); /* 10 bytes */
+ x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP3, REG_ITMP2_XPC); /* 3 bytes */
if (xcodeptr != NULL) {
x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
bref->branchpos,
cd->mcodeptr - cd->mcodebase);
- MCODECHECK(50);
+ MCODECHECK(100);
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
+ x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP1); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP1, REG_ITMP2_XPC); /* 3 bytes */
+ x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP1); /* 10 bytes */
+ x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP1, REG_ITMP2_XPC); /* 3 bytes */
if (xcodeptr != NULL) {
x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
bref->branchpos,
cd->mcodeptr - cd->mcodebase);
- MCODECHECK(50);
+ MCODECHECK(100);
- x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
+ x86_64_mov_imm_reg(cd, 0, REG_ITMP2_XPC); /* 10 bytes */
dseg_adddata(cd, cd->mcodeptr);
- x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP1); /* 10 bytes */
- x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP1, REG_ITMP2_XPC); /* 3 bytes */
+ x86_64_mov_imm_reg(cd, bref->branchpos - 6, REG_ITMP1); /* 10 bytes */
+ x86_64_alu_reg_reg(cd, X86_64_ADD, REG_ITMP1, REG_ITMP2_XPC); /* 3 bytes */
if (xcodeptr != NULL) {
x86_64_jmp_imm(cd, xcodeptr - cd->mcodeptr - 5);
x86_64_alu_imm_reg(cd, X86_64_SUB, 2 * 8, REG_SP);
x86_64_mov_reg_membase(cd, REG_ITMP2_XPC, REG_SP, 0 * 8);
- x86_64_mov_imm_reg(cd, (s8) new_nullpointerexception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) new_nullpointerexception, REG_ITMP3);
x86_64_call_reg(cd, REG_ITMP3);
x86_64_mov_membase_reg(cd, REG_SP, 0 * 8, REG_ITMP2_XPC);
x86_64_alu_imm_reg(cd, X86_64_ADD, 2 * 8, REG_SP);
- x86_64_mov_imm_reg(cd, (s8) asm_handle_exception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) asm_handle_exception, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
}
}
tmpcd->mcodeptr = xcodeptr; /* set dummy mcode pointer */
x86_64_call_imm(tmpcd, cd->mcodeptr - (xcodeptr + 5));
- /* Save current stack pointer into a temporary register. */
- x86_64_mov_reg_reg(cd, REG_SP, REG_ITMP1);
-
/* Push machine code bytes to patch onto the stack. */
x86_64_push_imm(cd, (u1) xmcode);
x86_64_push_imm(cd, (u4) mcode);
x86_64_push_imm(cd, (u8) cref->class);
- /* Push previously saved stack pointer onto stack. */
- x86_64_push_reg(cd, REG_ITMP1);
-
x86_64_mov_imm_reg(cd, (u8) asm_check_clinit, REG_ITMP1);
x86_64_jmp_reg(cd, REG_ITMP1);
}
*******************************************************************************/
-#define COMPSTUBSIZE 23
+#define COMPSTUBSIZE 23
u1 *createcompilerstub(methodinfo *m)
{
/* static java_objectheader **(*callgetexceptionptrptr)() = builtin_get_exceptionptrptr; */
/* #endif */
-#define NATIVESTUBSIZE 420
+#define NATIVESTUBSIZE 700 /* keep this size high enough! */
u1 *createnativestub(functionptr f, methodinfo *m)
{
- u1 *s = CNEW(u1, NATIVESTUBSIZE); /* memory to hold the stub */
- s4 stackframesize; /* size of stackframe if needed */
- codegendata *cd;
- registerdata *rd;
+ u1 *s; /* pointer to stub memory */
+ codegendata *cd;
+ registerdata *rd;
t_inlining_globals *id;
- s4 dumpsize;
+ s4 dumpsize;
+ s4 stackframesize; /* size of stackframe if needed */
+ u1 *tptr;
+ s4 iargs; /* count of integer arguments */
+ s4 fargs; /* count of float arguments */
+ s4 i; /* counter */
+
+ void **callAddrPatchPos=0;
+ u1 *jmpInstrPos=0;
+ void **jmpInstrPatchPos=0;
+
+ /* initialize variables */
+
+ iargs = 0;
+ fargs = 0;
/* mark start of dump memory area */
inlining_setup(m, id);
reg_setup(m, rd, id);
+ /* set paramcount and paramtypes */
+
+ descriptor2types(m);
+
+ /* count integer and float arguments */
+
+ tptr = m->paramtypes;
+ for (i = 0; i < m->paramcount; i++) {
+ IS_INT_LNG_TYPE(*tptr++) ? iargs++ : fargs++;
+ }
+
+ s = CNEW(u1, NATIVESTUBSIZE); /* memory to hold the stub */
+
/* set some required varibles which are normally set by codegen_setup */
cd->mcodebase = s;
cd->mcodeptr = s;
cd->clinitrefs = NULL;
- descriptor2types(m); /* set paramcount and paramtypes */
-
/* if function is static, check for initialized */
- if (m->flags & ACC_STATIC) {
- /* if class isn't yet initialized, do it */
- if (!m->class->initialized) {
- codegen_addclinitref(cd, cd->mcodeptr, m->class);
- }
+ if ((m->flags & ACC_STATIC) && !m->class->initialized) {
+ codegen_addclinitref(cd, cd->mcodeptr, m->class);
}
if (runverbose) {
- s4 p, l, s1;
+ s4 l, s1;
+
+ x86_64_alu_imm_reg(cd, X86_64_SUB, (INT_ARG_CNT + FLT_ARG_CNT + 1) * 8, REG_SP);
+
+ /* save integer and float argument registers */
+
+ for (i = 0; i < INT_ARG_CNT; i++) {
+ x86_64_mov_reg_membase(cd, rd->argintregs[i], REG_SP, (1 + i) * 8);
+ }
+
+ for (i = 0; i < FLT_ARG_CNT; i++) {
+ x86_64_movq_reg_membase(cd, rd->argfltregs[i], REG_SP, (1 + INT_ARG_CNT + i) * 8);
+ }
+
+ /* show integer hex code for float arguments */
+
+ for (i = 0, l = 0; i < m->paramcount && i < INT_ARG_CNT; i++) {
+ /* if the paramtype is a float, we have to right shift all */
+ /* following integer registers */
+
+ if (IS_FLT_DBL_TYPE(m->paramtypes[i])) {
+ for (s1 = INT_ARG_CNT - 2; s1 >= i; s1--) {
+ x86_64_mov_reg_reg(cd, rd->argintregs[s1], rd->argintregs[s1 + 1]);
+ }
+
+ x86_64_movd_freg_reg(cd, rd->argfltregs[l], rd->argintregs[i]);
+ l++;
+ }
+ }
+
+ x86_64_mov_imm_reg(cd, (u8) m, REG_ITMP1);
+ x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, 0 * 8);
+ x86_64_mov_imm_reg(cd, (u8) builtin_trace_args, REG_ITMP1);
+ x86_64_call_reg(cd, REG_ITMP1);
+
+ /* restore integer and float argument registers */
+
+ for (i = 0; i < INT_ARG_CNT; i++) {
+ x86_64_mov_membase_reg(cd, REG_SP, (1 + i) * 8, rd->argintregs[i]);
+ }
+
+ for (i = 0; i < FLT_ARG_CNT; i++) {
+ x86_64_movq_membase_reg(cd, REG_SP, (1 + INT_ARG_CNT + i) * 8, rd->argfltregs[i]);
+ }
+
+ x86_64_alu_imm_reg(cd, X86_64_ADD, (INT_ARG_CNT + FLT_ARG_CNT + 1) * 8, REG_SP);
+ }
+#if !defined(STATIC_CLASSPATH)
+ /* call method to resolve native function if needed */
+ if (f == NULL) {
x86_64_alu_imm_reg(cd, X86_64_SUB, (INT_ARG_CNT + FLT_ARG_CNT + 1) * 8, REG_SP);
x86_64_mov_reg_membase(cd, rd->argintregs[0], REG_SP, 1 * 8);
x86_64_movq_reg_membase(cd, rd->argfltregs[1], REG_SP, 8 * 8);
x86_64_movq_reg_membase(cd, rd->argfltregs[2], REG_SP, 9 * 8);
x86_64_movq_reg_membase(cd, rd->argfltregs[3], REG_SP, 10 * 8);
-/* x86_64_movq_reg_membase(cd, rd->argfltregs[4], REG_SP, 11 * 8); */
-/* x86_64_movq_reg_membase(cd, rd->argfltregs[5], REG_SP, 12 * 8); */
-/* x86_64_movq_reg_membase(cd, rd->argfltregs[6], REG_SP, 13 * 8); */
-/* x86_64_movq_reg_membase(cd, rd->argfltregs[7], REG_SP, 14 * 8); */
+ x86_64_movq_reg_membase(cd, rd->argfltregs[4], REG_SP, 11 * 8);
+ x86_64_movq_reg_membase(cd, rd->argfltregs[5], REG_SP, 12 * 8);
+ x86_64_movq_reg_membase(cd, rd->argfltregs[6], REG_SP, 13 * 8);
+ x86_64_movq_reg_membase(cd, rd->argfltregs[7], REG_SP, 14 * 8);
- /* show integer hex code for float arguments */
- for (p = 0, l = 0; p < m->paramcount; p++) {
- if (IS_FLT_DBL_TYPE(m->paramtypes[p])) {
- for (s1 = (m->paramcount > INT_ARG_CNT) ? INT_ARG_CNT - 2 : m->paramcount - 2; s1 >= p; s1--) {
- x86_64_mov_reg_reg(cd, rd->argintregs[s1], rd->argintregs[s1 + 1]);
- }
+ /* needed to patch a jump over this block */
+ x86_64_jmp_imm(cd, 0);
+ jmpInstrPos = cd->mcodeptr - 4;
- x86_64_movd_freg_reg(cd, rd->argfltregs[l], rd->argintregs[p]);
- l++;
- }
- }
+ x86_64_mov_imm_reg(cd, (u8) m, rd->argintregs[0]);
- x86_64_mov_imm_reg(cd, (s8) m, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, 0 * 8);
- x86_64_mov_imm_reg(cd, (s8) builtin_trace_args, REG_ITMP1);
+ x86_64_mov_imm_reg(cd, 0, rd->argintregs[1]);
+ callAddrPatchPos = cd->mcodeptr - 8; /* at this position the place is specified where the native function adress should be patched into*/
+
+ x86_64_mov_imm_reg(cd, 0, rd->argintregs[2]);
+ jmpInstrPatchPos = cd->mcodeptr - 8;
+
+ x86_64_mov_imm_reg(cd, jmpInstrPos, rd->argintregs[3]);
+
+ x86_64_mov_imm_reg(cd, (u8) codegen_resolve_native, REG_ITMP1);
x86_64_call_reg(cd, REG_ITMP1);
+ *(jmpInstrPatchPos) = cd->mcodeptr - jmpInstrPos - 1; /*=opcode jmp_imm size*/
+
x86_64_mov_membase_reg(cd, REG_SP, 1 * 8, rd->argintregs[0]);
x86_64_mov_membase_reg(cd, REG_SP, 2 * 8, rd->argintregs[1]);
x86_64_mov_membase_reg(cd, REG_SP, 3 * 8, rd->argintregs[2]);
x86_64_movq_membase_reg(cd, REG_SP, 8 * 8, rd->argfltregs[1]);
x86_64_movq_membase_reg(cd, REG_SP, 9 * 8, rd->argfltregs[2]);
x86_64_movq_membase_reg(cd, REG_SP, 10 * 8, rd->argfltregs[3]);
-/* x86_64_movq_membase_reg(cd, REG_SP, 11 * 8, rd->argfltregs[4]); */
-/* x86_64_movq_membase_reg(cd, REG_SP, 12 * 8, rd->argfltregs[5]); */
-/* x86_64_movq_membase_reg(cd, REG_SP, 13 * 8, rd->argfltregs[6]); */
-/* x86_64_movq_membase_reg(cd, REG_SP, 14 * 8, rd->argfltregs[7]); */
+ x86_64_movq_membase_reg(cd, REG_SP, 11 * 8, rd->argfltregs[4]);
+ x86_64_movq_membase_reg(cd, REG_SP, 12 * 8, rd->argfltregs[5]);
+ x86_64_movq_membase_reg(cd, REG_SP, 13 * 8, rd->argfltregs[6]);
+ x86_64_movq_membase_reg(cd, REG_SP, 14 * 8, rd->argfltregs[7]);
x86_64_alu_imm_reg(cd, X86_64_ADD, (INT_ARG_CNT + FLT_ARG_CNT + 1) * 8, REG_SP);
}
-
-#if 0
- x86_64_alu_imm_reg(cd, X86_64_SUB, 7 * 8, REG_SP); /* keep stack 16-byte aligned */
-
- /* save callee saved float registers */
- x86_64_movq_reg_membase(cd, XMM15, REG_SP, 0 * 8);
- x86_64_movq_reg_membase(cd, XMM14, REG_SP, 1 * 8);
- x86_64_movq_reg_membase(cd, XMM13, REG_SP, 2 * 8);
- x86_64_movq_reg_membase(cd, XMM12, REG_SP, 3 * 8);
- x86_64_movq_reg_membase(cd, XMM11, REG_SP, 4 * 8);
- x86_64_movq_reg_membase(cd, XMM10, REG_SP, 5 * 8);
#endif
/* save argument registers on stack -- if we have to */
- if ((m->flags & ACC_STATIC && m->paramcount > (INT_ARG_CNT - 2)) || m->paramcount > (INT_ARG_CNT - 1)) {
- s4 i;
- s4 paramshiftcnt = (m->flags & ACC_STATIC) ? 2 : 1;
- s4 stackparamcnt = (m->paramcount > INT_ARG_CNT) ? m->paramcount - INT_ARG_CNT : 0;
+
+ if ((((m->flags & ACC_STATIC) && iargs > (INT_ARG_CNT - 2)) || iargs > (INT_ARG_CNT - 1)) ||
+ (fargs > FLT_ARG_CNT)) {
+ s4 paramshiftcnt;
+ s4 stackparamcnt;
+
+ paramshiftcnt = 0;
+ stackparamcnt = 0;
+
+ /* do we need to shift integer argument register onto stack? */
+
+ if ((m->flags & ACC_STATIC) && iargs > (INT_ARG_CNT - 2)) {
+ /* do we need to shift 2 arguments? */
+ if (iargs > (INT_ARG_CNT - 1)) {
+ paramshiftcnt = 2;
+
+ } else {
+ paramshiftcnt = 1;
+ }
+
+ } else if (iargs > (INT_ARG_CNT - 1)) {
+ paramshiftcnt = 1;
+ }
+
+ /* calculate required stack space */
+
+ stackparamcnt += (iargs > INT_ARG_CNT) ? iargs - INT_ARG_CNT : 0;
+ stackparamcnt += (fargs > FLT_ARG_CNT) ? fargs - FLT_ARG_CNT : 0;
stackframesize = stackparamcnt + paramshiftcnt;
x86_64_alu_imm_reg(cd, X86_64_SUB, stackframesize * 8, REG_SP);
- /* copy stack arguments into new stack frame -- if any */
- for (i = 0; i < stackparamcnt; i++) {
- x86_64_mov_membase_reg(cd, REG_SP, (stackparamcnt + 1 + i) * 8, REG_ITMP1);
- x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, (paramshiftcnt + i) * 8);
- }
+ /* shift integer arguments if required */
+
+ if ((m->flags & ACC_STATIC) && iargs > (INT_ARG_CNT - 2)) {
+ /* do we need to shift 2 arguments? */
+ if (iargs > (INT_ARG_CNT - 1))
+ x86_64_mov_reg_membase(cd, rd->argintregs[5], REG_SP, 1 * 8);
- if (m->flags & ACC_STATIC) {
- x86_64_mov_reg_membase(cd, rd->argintregs[5], REG_SP, 1 * 8);
x86_64_mov_reg_membase(cd, rd->argintregs[4], REG_SP, 0 * 8);
- } else {
+ } else if (iargs > (INT_ARG_CNT - 1)) {
x86_64_mov_reg_membase(cd, rd->argintregs[5], REG_SP, 0 * 8);
}
+ /* copy stack arguments into new stack frame -- if any */
+ for (i = 0; i < stackparamcnt; i++) {
+ x86_64_mov_membase_reg(cd, REG_SP, (stackframesize + 1 + i) * 8, REG_ITMP1);
+ x86_64_mov_reg_membase(cd, REG_ITMP1, REG_SP, (paramshiftcnt + i) * 8);
+ }
+
} else {
/* keep stack 16-byte aligned */
- x86_64_alu_imm_reg(cd, X86_64_SUB, 8, REG_SP);
+ x86_64_alu_imm_reg(cd, X86_64_SUB, 1 * 8, REG_SP);
stackframesize = 1;
}
+ /* shift integer arguments for `env' and `class' arguments */
+
if (m->flags & ACC_STATIC) {
- x86_64_mov_reg_reg(cd, rd->argintregs[3], rd->argintregs[5]);
- x86_64_mov_reg_reg(cd, rd->argintregs[2], rd->argintregs[4]);
- x86_64_mov_reg_reg(cd, rd->argintregs[1], rd->argintregs[3]);
- x86_64_mov_reg_reg(cd, rd->argintregs[0], rd->argintregs[2]);
+ /* shift iargs count if less than INT_ARG_CNT, or all */
+ for (i = (iargs < (INT_ARG_CNT - 2)) ? iargs : (INT_ARG_CNT - 2); i >= 0; i--) {
+ x86_64_mov_reg_reg(cd, rd->argintregs[i], rd->argintregs[i + 2]);
+ }
/* put class into second argument register */
x86_64_mov_imm_reg(cd, (u8) m->class, rd->argintregs[1]);
} else {
- x86_64_mov_reg_reg(cd, rd->argintregs[4], rd->argintregs[5]);
- x86_64_mov_reg_reg(cd, rd->argintregs[3], rd->argintregs[4]);
- x86_64_mov_reg_reg(cd, rd->argintregs[2], rd->argintregs[3]);
- x86_64_mov_reg_reg(cd, rd->argintregs[1], rd->argintregs[2]);
- x86_64_mov_reg_reg(cd, rd->argintregs[0], rd->argintregs[1]);
+ /* shift iargs count if less than INT_ARG_CNT, or all */
+ for (i = (iargs < (INT_ARG_CNT - 1)) ? iargs : (INT_ARG_CNT - 1); i >= 0; i--) {
+ x86_64_mov_reg_reg(cd, rd->argintregs[i], rd->argintregs[i + 1]);
+ }
}
/* put env into first argument register */
x86_64_mov_imm_reg(cd, (u8) &env, rd->argintregs[0]);
+ /* do the native function call */
x86_64_mov_imm_reg(cd, (u8) f, REG_ITMP1);
+#if !defined(STATIC_CLASSPATH)
+ if (f == NULL)
+ (*callAddrPatchPos) = cd->mcodeptr - 8;
+#endif
x86_64_call_reg(cd, REG_ITMP1);
/* remove stackframe if there is one */
x86_64_alu_imm_reg(cd, X86_64_ADD, 3 * 8, REG_SP); /* keep stack 16-byte aligned */
}
-#if 0
- /* restore callee saved registers */
- x86_64_movq_membase_reg(cd, REG_SP, 0 * 8, XMM15);
- x86_64_movq_membase_reg(cd, REG_SP, 1 * 8, XMM14);
- x86_64_movq_membase_reg(cd, REG_SP, 2 * 8, XMM13);
- x86_64_movq_membase_reg(cd, REG_SP, 3 * 8, XMM12);
- x86_64_movq_membase_reg(cd, REG_SP, 4 * 8, XMM11);
- x86_64_movq_membase_reg(cd, REG_SP, 5 * 8, XMM10);
-
- x86_64_alu_imm_reg(cd, X86_64_ADD, 7 * 8, REG_SP); /* keep stack 16-byte aligned */
-#endif
+ /* check for exception */
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
x86_64_push_reg(cd, REG_RESULT);
x86_64_mov_membase_reg(cd, REG_RESULT, 0, REG_ITMP3);
x86_64_pop_reg(cd, REG_RESULT);
#else
- x86_64_mov_imm_reg(cd, (s8) &_exceptionptr, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) &_exceptionptr, REG_ITMP3);
x86_64_mov_membase_reg(cd, REG_ITMP3, 0, REG_ITMP3);
#endif
x86_64_test_reg_reg(cd, REG_ITMP3, REG_ITMP3);
x86_64_ret(cd);
+ /* handle exception */
+
#if defined(USE_THREADS) && defined(NATIVE_THREADS)
x86_64_push_reg(cd, REG_ITMP3);
/* x86_64_call_mem(cd, (u8) &callgetexceptionptrptr); */
x86_64_pop_reg(cd, REG_ITMP1_XPTR);
#else
x86_64_mov_reg_reg(cd, REG_ITMP3, REG_ITMP1_XPTR);
- x86_64_mov_imm_reg(cd, (s8) &_exceptionptr, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) &_exceptionptr, REG_ITMP3);
x86_64_alu_reg_reg(cd, X86_64_XOR, REG_ITMP2, REG_ITMP2);
x86_64_mov_reg_membase(cd, REG_ITMP2, REG_ITMP3, 0); /* clear exception pointer */
#endif
x86_64_mov_membase_reg(cd, REG_SP, 0, REG_ITMP2_XPC); /* get return address from stack */
x86_64_alu_imm_reg(cd, X86_64_SUB, 3, REG_ITMP2_XPC); /* callq */
- x86_64_mov_imm_reg(cd, (s8) asm_handle_nat_exception, REG_ITMP3);
+ x86_64_mov_imm_reg(cd, (u8) asm_handle_nat_exception, REG_ITMP3);
x86_64_jmp_reg(cd, REG_ITMP3);
+
+ /* patch in a clinit call if required *************************************/
+
{
u1 *xcodeptr;
clinitref *cref;
tmpcd->mcodeptr = xcodeptr; /* set dummy mcode pointer */
x86_64_call_imm(tmpcd, cd->mcodeptr - (xcodeptr + 5));
- /* Save current stack pointer into a temporary register. */
- x86_64_mov_reg_reg(cd, REG_SP, REG_ITMP1);
-
/* Push machine code bytes to patch onto the stack. */
x86_64_push_imm(cd, (u1) xmcode);
x86_64_push_imm(cd, (u4) mcode);
x86_64_push_imm(cd, (u8) cref->class);
- /* Push previously saved stack pointer onto stack. */
- x86_64_push_reg(cd, REG_ITMP1);
-
x86_64_mov_imm_reg(cd, (u8) asm_check_clinit, REG_ITMP1);
x86_64_jmp_reg(cd, REG_ITMP1);
}
}
-#if 0
- {
- static int stubprinted;
- if (!stubprinted)
- printf("stubsize: %d\n", ((long) cd->mcodeptr - (long) s));
- stubprinted = 1;
+ /* Check if the stub size is big enough to hold the whole stub generated. */
+ /* If not, this can lead into unpredictable crashes, because of heap */
+ /* corruption. */
+ if ((s4) (cd->mcodeptr - s) > NATIVESTUBSIZE) {
+ throw_cacao_exception_exit(string_java_lang_InternalError,
+ "Native stub size %d is to small for current stub size %d",
+ NATIVESTUBSIZE, (s4) (cd->mcodeptr - s));
}
-#endif
#if defined(STATISTICS)
if (opt_stat)