/* src/vm/jit/codegen-common.c - architecture independent code generator stuff
- Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
- C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
- E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
- J. Wenninger, Institut f. Computersprachen - TU Wien
+ Copyright (C) 1996-2005, 2006, 2007, 2008
+ CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
This file is part of CACAO.
memory. All functions writing values into the data area return the offset
relative the begin of the code area (start of procedure).
- $Id: codegen-common.c 7860 2007-05-03 12:30:05Z twisti $
-
*/
#include "vm/types.h"
-#if defined(ENABLE_JIT)
-/* this is required PATCHER_CALL_SIZE */
-# include "codegen.h"
-#endif
-
-#if defined(__ARM__)
-/* this is required for REG_SPLIT */
-# include "md-abi.h"
-#endif
+#include "codegen.h"
+#include "md.h"
+#include "md-abi.h"
#include "mm/memory.h"
#include "toolbox/logging.h"
#include "native/jni.h"
+#include "native/llni.h"
+#include "native/localref.h"
#include "native/native.h"
-#include "threads/threads-common.h"
+#if defined(WITH_JAVA_RUNTIME_LIBRARY_OPENJDK)
+# include "native/include/java_lang_Object.h"
+# include "native/include/java_lang_String.h" /* required by j.l.CL */
+# include "native/include/java_nio_ByteBuffer.h" /* required by j.l.CL */
+# include "native/include/java_lang_ClassLoader.h"
+#endif
+
+#if defined(WITH_JAVA_RUNTIME_LIBRARY_CLDC1_1)
+# include "native/include/java_lang_String.h"
+#endif
+
+#include "native/include/java_lang_Class.h"
+
+#include "threads/thread.h"
+#include "vm/builtin.h"
#include "vm/exceptions.h"
#include "vm/stringlocal.h"
#include "vm/jit/abi.h"
#include "vm/jit/asmpart.h"
+#include "vm/jit/code.h"
#include "vm/jit/codegen-common.h"
#if defined(ENABLE_DISASSEMBLER)
#include "vm/jit/dseg.h"
#include "vm/jit/emit-common.h"
#include "vm/jit/jit.h"
-#include "vm/jit/md.h"
+#include "vm/jit/linenumbertable.h"
+#include "vm/jit/methodheader.h"
+#include "vm/jit/methodtree.h"
+#include "vm/jit/patcher-common.h"
#include "vm/jit/replace.h"
+#if defined(ENABLE_SSA)
+# include "vm/jit/optimizing/lsra.h"
+# include "vm/jit/optimizing/ssa.h"
+#endif
#include "vm/jit/stacktrace.h"
+#include "vm/jit/trace.h"
#if defined(ENABLE_INTRP)
#include "vm/jit/intrp/intrp.h"
#include <vmlog_cacao.h>
#endif
-
-/* in this tree we store all method addresses *********************************/
-
-static avl_tree_t *methodtree = NULL;
-static s4 methodtree_comparator(const void *treenode, const void *node);
+#include "show.h"
/* codegen_init ****************************************************************
void codegen_init(void)
{
- /* this tree is global, not method specific */
-
- if (!methodtree) {
-#if defined(ENABLE_JIT)
- methodtree_element *mte;
-#endif
-
- methodtree = avl_create(&methodtree_comparator);
-
-#if defined(ENABLE_JIT)
- /* insert asm_vm_call_method */
-
- mte = NEW(methodtree_element);
-
- mte->startpc = (u1 *) (ptrint) asm_vm_call_method;
- mte->endpc = (u1 *) (ptrint) asm_vm_call_method_end;
-
- avl_insert(methodtree, mte);
-#endif /* defined(ENABLE_JIT) */
- }
}
cd->datareferences = NULL;
#endif
-/* cd->patchrefs = list_create_dump(OFFSET(patchref, linkage)); */
- cd->patchrefs = NULL;
cd->brancheslabel = list_create_dump(OFFSET(branch_label_ref_t, linkage));
-
- cd->linenumberreferences = NULL;
- cd->linenumbertablesizepos = 0;
- cd->linenumbertablestartpos = 0;
- cd->linenumbertab = 0;
-
-#if defined(ENABLE_THREADS)
- cd->threadcritcurrent.next = NULL;
- cd->threadcritcount = 0;
-#endif
+ cd->listcritical = list_create_dump(OFFSET(critical_section_ref_t, linkage));
+ cd->linenumbers = list_create_dump(OFFSET(linenumbertable_list_entry_t, linkage));
}
cd->datareferences = NULL;
#endif
-/* cd->patchrefs = list_create_dump(OFFSET(patchref, linkage)); */
- cd->patchrefs = NULL;
cd->brancheslabel = list_create_dump(OFFSET(branch_label_ref_t, linkage));
-
- cd->linenumberreferences = NULL;
- cd->linenumbertablesizepos = 0;
- cd->linenumbertablestartpos = 0;
- cd->linenumbertab = 0;
+ cd->listcritical = list_create_dump(OFFSET(critical_section_ref_t, linkage));
+ cd->linenumbers = list_create_dump(OFFSET(linenumbertable_list_entry_t, linkage));
-#if defined(ENABLE_THREADS)
- cd->threadcritcurrent.next = NULL;
- cd->threadcritcount = 0;
-#endif
-
/* We need to clear the mpc and the branch references from all
basic blocks as they will definitely change. */
bptr->branchrefs = NULL;
}
+ /* We need to clear all the patcher references from the codeinfo
+ since they all will be regenerated */
+
+ patcher_list_reset(code);
+
#if defined(ENABLE_REPLACEMENT)
code->rplpoints = NULL;
code->rplpointcount = 0;
cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
-#if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(ENABLE_INTRP)
+#if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(__M68K__) || defined(ENABLE_INTRP) \
+ || defined(__SPARC_64__)
/* adjust the pointer to the last patcher position */
if (cd->lastmcodeptr != NULL)
void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
{
- list_t *list;
+ list_t *l;
branch_label_ref_t *br;
s4 mpc;
- /* get the label list */
+ /* Get the label list. */
- list = cd->brancheslabel;
+ l = cd->brancheslabel;
/* calculate the current mpc */
br->reg = reg;
br->options = options;
- /* add the branch to the list */
+ /* Add the branch to the list. */
- list_add_last_unsynced(list, br);
+ list_add_last(l, br);
}
-/* codegen_add_patch_ref *******************************************************
+/* codegen_critical_section_new ************************************************
- Appends a new patcher reference to the list of patching positions.
+ Allocates a new critical-section reference and adds it to the
+ critical-section list.
*******************************************************************************/
-void codegen_add_patch_ref(codegendata *cd, functionptr patcher, voidptr ref,
- s4 disp)
+#if defined(ENABLE_THREADS)
+void codegen_critical_section_new(codegendata *cd)
{
- patchref *pr;
- s4 branchmpc;
+ list_t *l;
+ critical_section_ref_t *csr;
+ s4 mpc;
- branchmpc = cd->mcodeptr - cd->mcodebase;
+ /* Get the critical section list. */
- pr = DNEW(patchref);
+ l = cd->listcritical;
+
+ /* calculate the current mpc */
- pr->branchpos = branchmpc;
- pr->disp = disp;
- pr->patcher = patcher;
- pr->ref = ref;
+ mpc = cd->mcodeptr - cd->mcodebase;
-/* list_add_first(cd->patchrefs, pr); */
- pr->next = cd->patchrefs;
- cd->patchrefs = pr;
+ csr = DNEW(critical_section_ref_t);
- /* Generate NOPs for opt_shownops. */
+ /* We only can set restart right now, as start and end are set by
+ the following, corresponding functions. */
- if (opt_shownops)
- PATCHER_NOPS;
+ csr->start = -1;
+ csr->end = -1;
+ csr->restart = mpc;
-#if defined(ENABLE_JIT) && (defined(__I386__) || defined(__MIPS__) || defined(__X86_64__))
- /* On some architectures the patcher stub call instruction might
- be longer than the actual instruction generated. On this
- architectures we store the last patcher call position and after
- the basic block code generation is completed, we check the
- range and maybe generate some nop's. */
+ /* Add the branch to the list. */
- cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
-#endif
+ list_add_last(l, csr);
}
+#endif
-/* methodtree_comparator *******************************************************
-
- Comparator function used for the AVL tree of methods.
+/* codegen_critical_section_start **********************************************
- ARGUMENTS:
- treenode....the node from the tree
- node........the node to compare to the tree-node
+ Set the start-point of the current critical section (which is the
+ last element of the list).
*******************************************************************************/
-static s4 methodtree_comparator(const void *treenode, const void *node)
+#if defined(ENABLE_THREADS)
+void codegen_critical_section_start(codegendata *cd)
{
- methodtree_element *mte;
- methodtree_element *mtepc;
+ list_t *l;
+ critical_section_ref_t *csr;
+ s4 mpc;
- mte = (methodtree_element *) treenode;
- mtepc = (methodtree_element *) node;
+ /* Get the critical section list. */
- /* compare both startpc and endpc of pc, even if they have the same value,
- otherwise the avl_probe sometimes thinks the element is already in the
- tree */
+ l = cd->listcritical;
+
+ /* calculate the current mpc */
-#ifdef __S390__
- /* On S390 addresses are 31 bit. Compare only 31 bits of value.
- */
-# define ADDR_MASK(a) ((a) & 0x7FFFFFFF)
-#else
-# define ADDR_MASK(a) (a)
-#endif
+ mpc = cd->mcodeptr - cd->mcodebase;
- if (ADDR_MASK((long) mte->startpc) <= ADDR_MASK((long) mtepc->startpc) &&
- ADDR_MASK((long) mtepc->startpc) <= ADDR_MASK((long) mte->endpc) &&
- ADDR_MASK((long) mte->startpc) <= ADDR_MASK((long) mtepc->endpc) &&
- ADDR_MASK((long) mtepc->endpc) <= ADDR_MASK((long) mte->endpc)) {
- return 0;
+ /* Get the current critical section. */
- } else if (ADDR_MASK((long) mtepc->startpc) < ADDR_MASK((long) mte->startpc)) {
- return -1;
+ csr = list_last(l);
- } else {
- return 1;
- }
+ /* set the start point */
-# undef ADDR_MASK
+ assert(csr->start == -1);
+
+ csr->start = mpc;
}
+#endif
-/* codegen_insertmethod ********************************************************
+/* codegen_critical_section_end ************************************************
- Insert the machine code range of a method into the AVL tree of methods.
+ Set the end-point of the current critical section (which is the
+ last element of the list).
*******************************************************************************/
-void codegen_insertmethod(u1 *startpc, u1 *endpc)
+#if defined(ENABLE_THREADS)
+void codegen_critical_section_end(codegendata *cd)
{
- methodtree_element *mte;
-
- /* allocate new method entry */
-
- mte = NEW(methodtree_element);
-
- mte->startpc = startpc;
- mte->endpc = endpc;
+ list_t *l;
+ critical_section_ref_t *csr;
+ s4 mpc;
- /* this function does not return an error, but asserts for
- duplicate entries */
+ /* Get the critical section list. */
- avl_insert(methodtree, mte);
-}
+ l = cd->listcritical;
+
+ /* calculate the current mpc */
+ mpc = cd->mcodeptr - cd->mcodebase;
-/* codegen_get_pv_from_pc ******************************************************
+ /* Get the current critical section. */
- Find the PV for the given PC by searching in the AVL tree of
- methods.
+ csr = list_last(l);
-*******************************************************************************/
+ /* set the end point */
-u1 *codegen_get_pv_from_pc(u1 *pc)
-{
- methodtree_element mtepc;
- methodtree_element *mte;
-
- /* allocation of the search structure on the stack is much faster */
+ assert(csr->end == -1);
- mtepc.startpc = pc;
- mtepc.endpc = pc;
+ csr->end = mpc;
+}
+#endif
- mte = avl_find(methodtree, &mtepc);
- if (mte == NULL) {
- /* No method was found. Let's dump a stacktrace. */
+/* codegen_critical_section_finish *********************************************
-#if defined(ENABLE_VMLOG)
- vmlog_cacao_signl("SIGSEGV");
-#endif
+ Finish the critical sections, create the critical section nodes for
+ the AVL tree and insert them into the tree.
- log_println("We received a SIGSEGV and tried to handle it, but we were");
- log_println("unable to find a Java method at:");
- log_println("");
-#if SIZEOF_VOID_P == 8
- log_println("PC=0x%016lx", pc);
-#else
- log_println("PC=0x%08x", pc);
-#endif
- log_println("");
- log_println("Dumping the current stacktrace:");
+*******************************************************************************/
#if defined(ENABLE_THREADS)
- /* XXX michi: This should be available even without threads! */
- threads_print_stacktrace();
-#endif
+static void codegen_critical_section_finish(jitdata *jd)
+{
+ codeinfo *code;
+ codegendata *cd;
+ list_t *l;
+ critical_section_ref_t *csr;
+ critical_section_node_t *csn;
- vm_abort("Exiting...");
- }
+ /* get required compiler data */
- return mte->startpc;
-}
+ code = jd->code;
+ cd = jd->cd;
+ /* Get the critical section list. */
-/* codegen_get_pv_from_pc_nocheck **********************************************
+ l = cd->listcritical;
- Find the PV for the given PC by searching in the AVL tree of
- methods. This method does not check the return value and is used
- by the profiler.
+ /* iterate over all critical sections */
-*******************************************************************************/
+ for (csr = list_first(l); csr != NULL; csr = list_next(l, csr)) {
+ /* check if all points are set */
-u1 *codegen_get_pv_from_pc_nocheck(u1 *pc)
-{
- methodtree_element mtepc;
- methodtree_element *mte;
+ assert(csr->start != -1);
+ assert(csr->end != -1);
+ assert(csr->restart != -1);
- /* allocation of the search structure on the stack is much faster */
+ /* allocate tree node */
- mtepc.startpc = pc;
- mtepc.endpc = pc;
+ csn = NEW(critical_section_node_t);
- mte = avl_find(methodtree, &mtepc);
+ csn->start = code->entrypoint + csr->start;
+ csn->end = code->entrypoint + csr->end;
+ csn->restart = code->entrypoint + csr->restart;
- if (mte == NULL)
- return NULL;
- else
- return mte->startpc;
+ /* insert into the tree */
+
+ critical_section_register(csn);
+ }
}
+#endif
/* codegen_set_replacement_point_notrap ****************************************
cd->replacementpoint++;
+#if !defined(NDEBUG)
+ /* XXX actually we should use an own REPLACEMENT_NOPS here! */
+ if (opt_TestReplacement)
+ PATCHER_NOPS;
+#endif
+
/* XXX assert(cd->lastmcodeptr <= cd->mcodeptr); */
cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
s4 alignedmcodelen;
jumpref *jr;
u1 *epoint;
- s4 extralen;
s4 alignedlen;
/* get required compiler data */
mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
-#if defined(ENABLE_THREADS)
- extralen = sizeof(critical_section_node_t) * cd->threadcritcount;
-#else
- extralen = 0;
-#endif
-
#if defined(ENABLE_STATISTICS)
if (opt_stat) {
count_code_len += mcodelen;
/* allocate new memory */
code->mcodelength = mcodelen + cd->dseglen;
- code->mcode = CNEW(u1, alignedlen + extralen);
+ code->mcode = CNEW(u1, alignedlen);
/* set the entrypoint of the method */
}
#endif
+ /* Create the exception table. */
+
+ exceptiontable_create(jd);
+
+ /* Create the linenumber table. */
+
+ linenumbertable_create(jd);
+
/* jump table resolving */
for (jr = cd->jumpreferences; jr != NULL; jr = jr->next)
*((functionptr *) ((ptrint) epoint + jr->tablepos)) =
(functionptr) ((ptrint) epoint + (ptrint) jr->target->mpc);
- /* line number table resolving */
- {
- linenumberref *lr;
- ptrint lrtlen = 0;
- ptrint target;
-
- for (lr = cd->linenumberreferences; lr != NULL; lr = lr->next) {
- lrtlen++;
- target = lr->targetmpc;
- /* if the entry contains an mcode pointer (normal case), resolve it */
- /* (see doc/inlining_stacktrace.txt for details) */
- if (lr->linenumber >= -2) {
- target += (ptrint) epoint;
- }
- *((functionptr *) ((ptrint) epoint + (ptrint) lr->tablepos)) =
- (functionptr) target;
- }
-
- *((functionptr *) ((ptrint) epoint + cd->linenumbertablestartpos)) =
- (functionptr) ((ptrint) epoint + cd->linenumbertab);
+ /* patcher resolving */
- *((ptrint *) ((ptrint) epoint + cd->linenumbertablesizepos)) = lrtlen;
- }
+ patcher_resolve(jd);
#if defined(ENABLE_REPLACEMENT)
/* replacement point resolving */
int i;
rplpoint *rp;
- code->replacementstubs += (ptrint) epoint;
-
rp = code->rplpoints;
for (i=0; i<code->rplpointcount; ++i, ++rp) {
rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
}
#endif /* defined(ENABLE_REPLACEMENT) */
- /* add method into methodtree to find the entrypoint */
+ /* Insert method into methodtree to find the entrypoint. */
- codegen_insertmethod(code->entrypoint, code->entrypoint + mcodelen);
+ methodtree_insert(code->entrypoint, code->entrypoint + mcodelen);
#if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
/* resolve data segment references */
#endif
#if defined(ENABLE_THREADS)
- {
- critical_section_node_t *n = (critical_section_node_t *) ((ptrint) code->mcode + alignedlen);
- s4 i;
- codegen_critical_section_t *nt = cd->threadcrit;
-
- for (i = 0; i < cd->threadcritcount; i++) {
- n->mcodebegin = (u1 *) (ptrint) code->mcode + nt->mcodebegin;
- n->mcodeend = (u1 *) (ptrint) code->mcode + nt->mcodeend;
- n->mcoderestart = (u1 *) (ptrint) code->mcode + nt->mcoderestart;
- critical_register_critical_section(n);
- n++;
- nt = nt->next;
- }
- }
+ /* create cirtical sections */
+
+ codegen_critical_section_finish(jd);
#endif
/* flush the instruction and data caches */
codegendata *cd;
ptrint *d; /* pointer to data memory */
u1 *c; /* pointer to code memory */
- s4 dumpsize;
+ int32_t dumpmarker;
/* mark dump memory */
- dumpsize = dump_size();
+ DMARKER;
/* allocate required data structures */
cd = jd->cd;
+#if !defined(JIT_COMPILER_VIA_SIGNAL)
/* allocate code memory */
c = CNEW(u1, 3 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
/* flush caches */
md_cacheflush(cd->mcodebase, 3 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
+#else
+ /* Allocate code memory. */
+
+ c = CNEW(uint8_t, 2 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
+
+ /* Set pointers correctly. */
+
+ d = (ptrint *) c;
+
+ cd->mcodebase = c;
+
+ c = c + 2 * SIZEOF_VOID_P;
+ cd->mcodeptr = c;
+
+ /* NOTE: The codeinfo pointer is actually a pointer to the
+ methodinfo (this fakes a codeinfo structure). */
+
+ d[0] = (ptrint) m;
+ d[1] = (ptrint) &d[0]; /* fake code->m */
+
+ /* Emit the trap instruction. */
+
+ emit_trap_compiler(cd);
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ count_cstub_len += 2 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE;
+#endif
+
+ /* Flush caches. */
+
+ md_cacheflush(cd->mcodebase, 2 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
+#endif
/* release dump memory */
- dump_release(dumpsize);
+ DRELEASE;
/* return native stub code */
}
+/* codegen_generate_stub_builtin ***********************************************
+
+ Wrapper for codegen_emit_stub_native.
+
+*******************************************************************************/
+
+void codegen_generate_stub_builtin(methodinfo *m, builtintable_entry *bte)
+{
+ jitdata *jd;
+ codeinfo *code;
+ int skipparams;
+ int32_t dumpmarker;
+
+ /* mark dump memory */
+
+ DMARKER;
+
+ /* Create JIT data structure. */
+
+ jd = jit_jitdata_new(m);
+
+ /* Get required compiler data. */
+
+ code = jd->code;
+
+ /* Stubs are non-leaf methods. */
+
+ code_unflag_leafmethod(code);
+
+ /* setup code generation stuff */
+
+ codegen_setup(jd);
+
+ /* Set the number of native arguments we need to skip. */
+
+ skipparams = 0;
+
+ /* generate the code */
+
+#if defined(ENABLE_JIT)
+# if defined(ENABLE_INTRP)
+ if (!opt_intrp) {
+# endif
+ assert(bte->fp != NULL);
+ codegen_emit_stub_native(jd, bte->md, bte->fp, skipparams);
+# if defined(ENABLE_INTRP)
+ }
+# endif
+#endif
+
+ /* reallocate the memory and finish the code generation */
+
+ codegen_finish(jd);
+
+ /* set the stub entry point in the builtin table */
+
+ bte->stub = code->entrypoint;
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ size_stub_native += code->mcodelength;
+#endif
+
+#if !defined(NDEBUG) && defined(ENABLE_DISASSEMBLER)
+ /* disassemble native stub */
+
+ if (opt_DisassembleStubs) {
+ codegen_disassemble_stub(m,
+ (u1 *) (ptrint) code->entrypoint,
+ (u1 *) (ptrint) code->entrypoint + (code->mcodelength - jd->cd->dseglen));
+
+ /* show data segment */
+
+ if (opt_showddatasegment)
+ dseg_display(jd);
+ }
+#endif /* !defined(NDEBUG) && defined(ENABLE_DISASSEMBLER) */
+
+ /* release memory */
+
+ DRELEASE;
+}
+
+
/* codegen_generate_stub_native ************************************************
Wrapper for codegen_emit_stub_native.
{
jitdata *jd;
codeinfo *code;
- s4 dumpsize;
methoddesc *md;
methoddesc *nmd;
- s4 nativeparams;
+ int skipparams;
+ int32_t dumpmarker;
/* mark dump memory */
- dumpsize = dump_size();
+ DMARKER;
- jd = DNEW(jitdata);
+ /* Create JIT data structure. */
- jd->m = m;
- jd->cd = DNEW(codegendata);
- jd->rd = DNEW(registerdata);
- jd->flags = 0;
+ jd = jit_jitdata_new(m);
- /* Allocate codeinfo memory from the heap as we need to keep them. */
+ /* Get required compiler data. */
- jd->code = code_codeinfo_new(m); /* XXX check allocation */
+ code = jd->code;
- /* get required compiler data */
+ /* Stubs are non-leaf methods. */
- code = jd->code;
+ code_unflag_leafmethod(code);
/* set the flags for the current JIT run */
/* create new method descriptor with additional native parameters */
md = m->parseddesc;
- nativeparams = (m->flags & ACC_STATIC) ? 2 : 1;
+
+ /* Set the number of native arguments we need to skip. */
+
+ if (m->flags & ACC_STATIC)
+ skipparams = 2;
+ else
+ skipparams = 1;
nmd = (methoddesc *) DMNEW(u1, sizeof(methoddesc) - sizeof(typedesc) +
md->paramcount * sizeof(typedesc) +
- nativeparams * sizeof(typedesc));
+ skipparams * sizeof(typedesc));
- nmd->paramcount = md->paramcount + nativeparams;
+ nmd->paramcount = md->paramcount + skipparams;
nmd->params = DMNEW(paramdesc, nmd->paramcount);
if (m->flags & ACC_STATIC)
nmd->paramtypes[1].type = TYPE_ADR; /* add class pointer */
- MCOPY(nmd->paramtypes + nativeparams, md->paramtypes, typedesc,
+ MCOPY(nmd->paramtypes + skipparams, md->paramtypes, typedesc,
md->paramcount);
#if defined(ENABLE_JIT)
intrp_createnativestub(f, jd, nmd);
else
# endif
- codegen_emit_stub_native(jd, nmd, f);
+ codegen_emit_stub_native(jd, nmd, f, skipparams);
#else
intrp_createnativestub(f, jd, nmd);
#endif
-#if defined(ENABLE_STATISTICS)
- if (opt_stat)
- count_nstub_len += code->mcodelength;
-#endif
-
/* reallocate the memory and finish the code generation */
codegen_finish(jd);
-#if !defined(NDEBUG)
- /* disassemble native stub */
+#if defined(ENABLE_STATISTICS)
+ /* must be done after codegen_finish() */
- if (opt_shownativestub) {
-#if defined(ENABLE_DISASSEMBLER)
- codegen_disassemble_nativestub(m,
- (u1 *) (ptrint) code->entrypoint,
- (u1 *) (ptrint) code->entrypoint + (code->mcodelength - jd->cd->dseglen));
+ if (opt_stat)
+ size_stub_native += code->mcodelength;
#endif
- /* show data segment */
+#if !defined(NDEBUG) && defined(ENABLE_DISASSEMBLER)
+ /* disassemble native stub */
- if (opt_showddatasegment)
- dseg_display(jd);
+ if (opt_DisassembleStubs) {
+# if defined(ENABLE_DEBUG_FILTER)
+ if (m->filtermatches & SHOW_FILTER_FLAG_SHOW_METHOD)
+# endif
+ {
+ codegen_disassemble_stub(m,
+ (u1 *) (ptrint) code->entrypoint,
+ (u1 *) (ptrint) code->entrypoint + (code->mcodelength - jd->cd->dseglen));
+
+ /* show data segment */
+
+ if (opt_showddatasegment)
+ dseg_display(jd);
+ }
}
-#endif /* !defined(NDEBUG) */
+#endif /* !defined(NDEBUG) && defined(ENABLE_DISASSEMBLER) */
/* release memory */
- dump_release(dumpsize);
+ DRELEASE;
/* return native stub code */
/* codegen_disassemble_nativestub **********************************************
- Disassembles the generated native stub.
+ Disassembles the generated builtin or native stub.
*******************************************************************************/
#if defined(ENABLE_DISASSEMBLER)
-void codegen_disassemble_nativestub(methodinfo *m, u1 *start, u1 *end)
+void codegen_disassemble_stub(methodinfo *m, u1 *start, u1 *end)
{
- printf("Native stub: ");
- utf_fprint_printable_ascii_classname(stdout, m->class->name);
+ printf("Stub code: ");
+ if (m->clazz != NULL)
+ utf_fprint_printable_ascii_classname(stdout, m->clazz->name);
+ else
+ printf("NULL");
printf(".");
utf_fprint_printable_ascii(stdout, m->name);
utf_fprint_printable_ascii(stdout, m->descriptor);
- printf("\n\nLength: %d\n\n", (s4) (end - start));
+ printf("\nLength: %d\n\n", (s4) (end - start));
DISASSEMBLE(start, end);
}
The layout of the native stub stackframe should look like this:
- +---------------------------+ <- SP (of parent Java function)
+ +---------------------------+ <- java SP (of parent Java function)
| return address |
- +---------------------------+
+ +---------------------------+ <- data SP
| |
| stackframe info structure |
| |
| |
+---------------------------+
| |
+ | saved registers (if any) |
+ | |
+ +---------------------------+
+ | |
| arguments (if any) |
| |
- +---------------------------+ <- SP (native stub)
+ +---------------------------+ <- current SP (native stub)
*******************************************************************************/
-void codegen_start_native_call(u1 *datasp, u1 *pv, u1 *sp, u1 *ra)
+java_handle_t *codegen_start_native_call(u1 *sp, u1 *pv)
{
- stackframeinfo *sfi;
- localref_table *lrt;
+ stackframeinfo_t *sfi;
+ localref_table *lrt;
+ methodinfo *m;
+ int32_t framesize;
+
+ uint8_t *datasp;
+ uint8_t *javasp;
+ uint64_t *arg_regs;
+ uint64_t *arg_stack;
+
+ STATISTICS(count_calls_java_to_native++);
+
+ /* Get the methodinfo. */
+
+ m = code_get_methodinfo_for_pv(pv);
+
+ assert(m);
+
+ framesize = *((int32_t *) (pv + FrameSize));
+
+ assert(framesize >= sizeof(stackframeinfo_t) + sizeof(localref_table));
+
+ /* calculate needed values */
+
+#if defined(__ALPHA__) || defined(__ARM__)
+ datasp = sp + framesize - SIZEOF_VOID_P;
+ javasp = sp + framesize;
+ arg_regs = (uint64_t *) sp;
+ arg_stack = (uint64_t *) javasp;
+#elif defined(__MIPS__)
+ /* MIPS always uses 8 bytes to store the RA */
+ datasp = sp + framesize - 8;
+ javasp = sp + framesize;
+#elif defined(__S390__)
+ datasp = sp + framesize - 8;
+ javasp = sp + framesize;
+ arg_regs = (uint64_t *) (sp + 96);
+ arg_stack = (uint64_t *) javasp;
+#elif defined(__I386__) || defined(__M68K__) || defined(__X86_64__)
+ datasp = sp + framesize;
+ javasp = sp + framesize + SIZEOF_VOID_P;
+ arg_regs = (uint64_t *) sp;
+ arg_stack = (uint64_t *) javasp;
+#elif defined(__POWERPC__)
+ datasp = sp + framesize;
+ javasp = sp + framesize;
+ arg_regs = (uint64_t *) (sp + LA_SIZE + 4 * SIZEOF_VOID_P);
+ arg_stack = (uint64_t *) javasp;
+#elif defined(__POWERPC64__)
+ datasp = sp + framesize;
+ javasp = sp + framesize;
+ arg_regs = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 4 * SIZEOF_VOID_P);
+ arg_stack = (uint64_t *) javasp;
+#else
+ /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
+ /* XXX maybe we need to pass the RA as argument there */
+ vm_abort("codegen_start_native_call: unsupported architecture");
+#endif
/* get data structures from stack */
- sfi = (stackframeinfo *) (datasp - sizeof(stackframeinfo));
- lrt = (localref_table *) (datasp - sizeof(stackframeinfo) -
- sizeof(localref_table));
+ sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
+ lrt = (localref_table *) (datasp - sizeof(stackframeinfo_t) -
+ sizeof(localref_table));
- /* add a stackframeinfo to the chain */
-
- stacktrace_create_native_stackframeinfo(sfi, pv, sp, ra);
-
-#if defined(ENABLE_JAVASE)
+#if defined(ENABLE_JNI)
/* add current JNI local references table to this thread */
- lrt->capacity = LOCALREFTABLE_CAPACITY;
- lrt->used = 0;
- lrt->localframes = 1;
- lrt->prev = LOCALREFTABLE;
+ localref_table_add(lrt);
+#endif
+
+#if !defined(NDEBUG)
+# if defined(__ALPHA__) || defined(__I386__) || defined(__M68K__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
+ /* print the call-trace if necesarry */
+ /* BEFORE: filling the local reference table */
- /* clear the references array (memset is faster the a for-loop) */
+ if (opt_TraceJavaCalls)
+ trace_java_call_enter(m, arg_regs, arg_stack);
+# endif
+#endif
- MSET(lrt->refs, 0, java_objectheader*, LOCALREFTABLE_CAPACITY);
+#if defined(ENABLE_HANDLES)
+ /* place all references into the local reference table */
+ /* BEFORE: creating stackframeinfo */
- LOCALREFTABLE = lrt;
+ localref_native_enter(m, arg_regs, arg_stack);
#endif
+
+ /* Add a stackframeinfo for this native method. We don't have RA
+ and XPC here. These are determined in
+ stacktrace_stackframeinfo_add. */
+
+ stacktrace_stackframeinfo_add(sfi, pv, sp, NULL, NULL);
+
+ /* Return a wrapped classinfo for static methods. */
+
+ if (m->flags & ACC_STATIC)
+ return (java_handle_t *) LLNI_classinfo_wrap(m->clazz);
+ else
+ return NULL;
}
*******************************************************************************/
-java_objectheader *codegen_finish_native_call(u1 *datasp)
+java_object_t *codegen_finish_native_call(u1 *sp, u1 *pv)
{
- stackframeinfo *sfi;
- stackframeinfo **psfi;
-#if defined(ENABLE_JAVASE)
- localref_table *lrt;
- localref_table *plrt;
- s4 localframes;
+ stackframeinfo_t *sfi;
+ java_handle_t *e;
+ java_object_t *o;
+ codeinfo *code;
+ methodinfo *m;
+ int32_t framesize;
+
+ uint8_t *datasp;
+ uint64_t *ret_regs;
+
+ /* get information from method header */
+
+ code = code_get_codeinfo_for_pv(pv);
+
+ framesize = *((int32_t *) (pv + FrameSize));
+
+ assert(code);
+
+ /* get the methodinfo */
+
+ m = code->m;
+ assert(m);
+
+ /* calculate needed values */
+
+#if defined(__ALPHA__) || defined(__ARM__)
+ datasp = sp + framesize - SIZEOF_VOID_P;
+ ret_regs = (uint64_t *) sp;
+#elif defined(__MIPS__)
+ /* MIPS always uses 8 bytes to store the RA */
+ datasp = sp + framesize - 8;
+#elif defined(__S390__)
+ datasp = sp + framesize - 8;
+ ret_regs = (uint64_t *) (sp + 96);
+#elif defined(__I386__)
+ datasp = sp + framesize;
+ ret_regs = (uint64_t *) (sp + 2 * SIZEOF_VOID_P);
+#elif defined(__M68K__)
+ datasp = sp + framesize;
+ ret_regs = (uint64_t *) (sp + 2 * 8);
+#elif defined(__X86_64__)
+ datasp = sp + framesize;
+ ret_regs = (uint64_t *) sp;
+#elif defined(__POWERPC__)
+ datasp = sp + framesize;
+ ret_regs = (uint64_t *) (sp + LA_SIZE + 2 * SIZEOF_VOID_P);
+#elif defined(__POWERPC64__)
+ datasp = sp + framesize;
+ ret_regs = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 2 * SIZEOF_VOID_P);
+#else
+ vm_abort("codegen_finish_native_call: unsupported architecture");
#endif
- java_objectheader *e;
/* get data structures from stack */
- sfi = (stackframeinfo *) (datasp - sizeof(stackframeinfo));
-
- /* remove current stackframeinfo from chain */
-
- psfi = &STACKFRAMEINFO;
-
- *psfi = sfi->prev;
-
-#if defined(ENABLE_JAVASE)
- /* release JNI local references tables for this thread */
+ sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
- lrt = LOCALREFTABLE;
+ /* Remove current stackframeinfo from chain. */
- /* release all current local frames */
+ stacktrace_stackframeinfo_remove(sfi);
- for (localframes = lrt->localframes; localframes >= 1; localframes--) {
- /* get previous frame */
+#if defined(ENABLE_HANDLES)
+ /* unwrap the return value from the local reference table */
+ /* AFTER: removing the stackframeinfo */
+ /* BEFORE: releasing the local reference table */
- plrt = lrt->prev;
-
- /* Clear all reference entries (only for tables allocated on
- the Java heap). */
-
- if (localframes > 1)
- MSET(&lrt->refs[0], 0, java_objectheader*, lrt->capacity);
-
- lrt->prev = NULL;
+ localref_native_exit(m, ret_regs);
+#endif
- /* set new local references table */
+ /* get and unwrap the exception */
+ /* AFTER: removing the stackframe info */
+ /* BEFORE: releasing the local reference table */
- lrt = plrt;
- }
+ e = exceptions_get_and_clear_exception();
+ o = LLNI_UNWRAP(e);
- /* now store the previous local frames in the thread structure */
+#if defined(ENABLE_JNI)
+ /* release JNI local references table for this thread */
- LOCALREFTABLE = lrt;
+ localref_frame_pop_all();
+ localref_table_remove();
#endif
- /* get the exception and return it */
+#if !defined(NDEBUG)
+# if defined(__ALPHA__) || defined(__I386__) || defined(__M68K__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
+ /* print the call-trace if necesarry */
+ /* AFTER: unwrapping the return value */
- e = exceptions_get_and_clear_exception();
+ if (opt_TraceJavaCalls)
+ trace_java_call_exit(m, ret_regs);
+# endif
+#endif
- return e;
+ return o;
}
spilled) this function returns tempregnum. If not already done,
regoff and flags are set in the stack location.
- On ARM we have to check if a long/double variable is splitted
- across reg/stack (HIGH_REG == REG_SPLIT). We return the actual
- register of v for LOW_REG and the tempregnum for HIGH_REG in such
- cases. (michi 2005/07/24)
-
*******************************************************************************/
s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
{
-
-#if 0
- /* Do we have to generate a conditional move? Yes, then always
- return the temporary register. The real register is identified
- during the store. */
-
- if (opcode & ICMD_CONDITION_MASK)
- return tempregnum;
-#endif
-
- if (!(v->flags & INMEMORY)) {
-#if defined(__ARM__) && defined(__ARMEL__)
- if (IS_2_WORD_TYPE(v->type) && (GET_HIGH_REG(v->vv.regoff) == REG_SPLIT))
- return PACK_REGS(GET_LOW_REG(v->vv.regoff),
- GET_HIGH_REG(tempregnum));
-#endif
-#if defined(__ARM__) && defined(__ARMEB__)
- if (IS_2_WORD_TYPE(v->type) && (GET_LOW_REG(v->vv.regoff) == REG_SPLIT))
- return PACK_REGS(GET_LOW_REG(tempregnum),
- GET_HIGH_REG(v->vv.regoff));
-#endif
+ if (!(v->flags & INMEMORY))
return v->vv.regoff;
- }
-
-#if defined(ENABLE_STATISTICS)
- if (opt_stat)
- count_spills_read++;
-#endif
return tempregnum;
}
+
/* codegen_reg_of_dst **********************************************************
This function determines a register, to which the result of an
spilled) this function returns tempregnum. If not already done,
regoff and flags are set in the stack location.
- On ARM we have to check if a long/double variable is splitted
- across reg/stack (HIGH_REG == REG_SPLIT). We return the actual
- register of dst.var for LOW_REG and the tempregnum for HIGH_REG in such
- cases. (michi 2005/07/24)
-
*******************************************************************************/
s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
}
-#if defined(ENABLE_THREADS)
-void codegen_threadcritrestart(codegendata *cd, int offset)
-{
- cd->threadcritcurrent.mcoderestart = offset;
-}
+/* codegen_emit_phi_moves ****************************************************
+ Emits phi moves at the end of the basicblock.
-void codegen_threadcritstart(codegendata *cd, int offset)
+*******************************************************************************/
+
+#if defined(ENABLE_SSA)
+void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
{
- cd->threadcritcurrent.mcodebegin = offset;
+ int lt_d,lt_s,i;
+ lsradata *ls;
+ codegendata *cd;
+ varinfo *s, *d;
+ instruction tmp_i;
+
+ cd = jd->cd;
+ ls = jd->ls;
+
+ MCODECHECK(512);
+
+ /* Moves from phi functions with highest indices have to be */
+ /* inserted first, since this is the order as is used for */
+ /* conflict resolution */
+
+ for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
+ lt_d = ls->phi_moves[bptr->nr][i][0];
+ lt_s = ls->phi_moves[bptr->nr][i][1];
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
+#endif
+ if (lt_s == UNUSED) {
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf(" ... not processed \n");
+#endif
+ continue;
+ }
+
+ d = VAR(ls->lifetime[lt_d].v_index);
+ s = VAR(ls->lifetime[lt_s].v_index);
+
+
+ if (d->type == -1) {
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf("...returning - phi lifetimes where joined\n");
+#endif
+ continue;
+ }
+
+ if (s->type == -1) {
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf("...returning - phi lifetimes where joined\n");
+#endif
+ continue;
+ }
+
+ tmp_i.opc = 0;
+ tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
+ tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
+ emit_copy(jd, &tmp_i);
+
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose) {
+ if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
+ /* mem -> mem */
+ printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
+ }
+ else if (IS_INMEMORY(s->flags)) {
+ /* mem -> reg */
+ printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
+ }
+ else if (IS_INMEMORY(d->flags)) {
+ /* reg -> mem */
+ printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
+ }
+ else {
+ /* reg -> reg */
+ printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
+ }
+ printf("\n");
+ }
+#endif /* defined(SSA_DEBUG_VERBOSE) */
+ }
}
+#endif /* defined(ENABLE_SSA) */
-void codegen_threadcritstop(codegendata *cd, int offset)
+/* REMOVEME When we have exception handling in C. */
+
+void *md_asm_codegen_get_pv_from_pc(void *ra)
{
- cd->threadcritcurrent.next = cd->threadcrit;
- cd->threadcritcurrent.mcodeend = offset;
- cd->threadcrit = DNEW(codegen_critical_section_t);
- *(cd->threadcrit) = cd->threadcritcurrent;
- cd->threadcritcount++;
+ return md_codegen_get_pv_from_pc(ra);
}
-#endif
/*