memory. All functions writing values into the data area return the offset
relative the begin of the code area (start of procedure).
- $Id: codegen-common.c 7403 2007-02-25 21:31:58Z pm $
-
*/
# include "codegen.h"
#endif
-#if defined(__ARM__)
-/* this is required for REG_SPLIT */
-# include "md-abi.h"
-#endif
+#include "md-abi.h"
#include "mm/memory.h"
#include "toolbox/logging.h"
#include "native/jni.h"
+#include "native/llni.h"
+#include "native/localref.h"
#include "native/native.h"
-#if defined(ENABLE_THREADS)
-# include "threads/native/threads.h"
-#endif
+#include "threads/threads-common.h"
+#include "vm/builtin.h"
#include "vm/exceptions.h"
#include "vm/stringlocal.h"
#endif
#include "vm/jit/dseg.h"
+#include "vm/jit/emit-common.h"
#include "vm/jit/jit.h"
#include "vm/jit/md.h"
+#include "vm/jit/methodheader.h"
+#include "vm/jit/patcher-common.h"
#include "vm/jit/replace.h"
+#if defined(ENABLE_SSA)
+# include "vm/jit/optimizing/lsra.h"
+# include "vm/jit/optimizing/ssa.h"
+#endif
#include "vm/jit/stacktrace.h"
+#include "vm/jit/trace.h"
#if defined(ENABLE_INTRP)
#include "vm/jit/intrp/intrp.h"
#include "vmcore/method.h"
#include "vmcore/options.h"
-#if defined(ENABLE_STATISTICS)
# include "vmcore/statistics.h"
+
+#if defined(ENABLE_VMLOG)
+#include <vmlog_cacao.h>
#endif
+#include "show.h"
/* in this tree we store all method addresses *********************************/
-static avl_tree *methodtree = NULL;
-static s4 methodtree_comparator(const void *pc, const void *element);
+static avl_tree_t *methodtree = NULL;
+static s4 methodtree_comparator(const void *treenode, const void *node);
/* codegen_init ****************************************************************
mte = NEW(methodtree_element);
mte->startpc = (u1 *) (ptrint) asm_vm_call_method;
- mte->endpc = (u1 *) ((ptrint) asm_call_jit_compiler - 1);
+ mte->endpc = (u1 *) (ptrint) asm_vm_call_method_end;
avl_insert(methodtree, mte);
#endif /* defined(ENABLE_JIT) */
+
}
+
}
m = jd->m;
cd = jd->cd;
- cd->mcodebase = DMNEW(u1, MCODEINITSIZE);
- cd->mcodeend = cd->mcodebase + MCODEINITSIZE;
- cd->mcodesize = MCODEINITSIZE;
+ /* initialize members */
+
+ cd->flags = 0;
+
+ cd->mcodebase = DMNEW(u1, MCODEINITSIZE);
+ cd->mcodeend = cd->mcodebase + MCODEINITSIZE;
+ cd->mcodesize = MCODEINITSIZE;
/* initialize mcode variables */
cd->jumpreferences = NULL;
-#if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
+#if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
cd->datareferences = NULL;
#endif
- cd->exceptionrefs = NULL;
/* cd->patchrefs = list_create_dump(OFFSET(patchref, linkage)); */
cd->patchrefs = NULL;
+ cd->brancheslabel = list_create_dump(OFFSET(branch_label_ref_t, linkage));
+ cd->listcritical = list_create_dump(OFFSET(critical_section_ref_t, linkage));
cd->linenumberreferences = NULL;
cd->linenumbertablesizepos = 0;
cd->linenumbertablestartpos = 0;
cd->linenumbertab = 0;
+}
+
+
+/* codegen_reset ***************************************************************
+
+ Resets the codegen data structure so we can recompile the method.
+
+*******************************************************************************/
+
+static void codegen_reset(jitdata *jd)
+{
+ codeinfo *code;
+ codegendata *cd;
+ basicblock *bptr;
+
+ /* get required compiler data */
+
+ code = jd->code;
+ cd = jd->cd;
+
+ /* reset error flag */
+
+ cd->flags &= ~CODEGENDATA_FLAG_ERROR;
+
+ /* reset some members, we reuse the code memory already allocated
+ as this should have almost the correct size */
+
+ cd->mcodeptr = cd->mcodebase;
+ cd->lastmcodeptr = cd->mcodebase;
+
+ cd->dseg = NULL;
+ cd->dseglen = 0;
+
+ cd->jumpreferences = NULL;
+
+#if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
+ cd->datareferences = NULL;
+#endif
+
+/* cd->patchrefs = list_create_dump(OFFSET(patchref, linkage)); */
+ cd->patchrefs = NULL;
+ cd->brancheslabel = list_create_dump(OFFSET(branch_label_ref_t, linkage));
+ cd->listcritical = list_create_dump(OFFSET(critical_section_ref_t, linkage));
+
+ cd->linenumberreferences = NULL;
+ cd->linenumbertablesizepos = 0;
+ cd->linenumbertablestartpos = 0;
+ cd->linenumbertab = 0;
- cd->method = m;
+ /* We need to clear the mpc and the branch references from all
+ basic blocks as they will definitely change. */
- cd->maxstack = m->maxstack;
+ for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
+ bptr->mpc = -1;
+ bptr->branchrefs = NULL;
+ }
-#if defined(ENABLE_THREADS)
- cd->threadcritcurrent.next = NULL;
- cd->threadcritcount = 0;
+ /* We need to clear all the patcher references from the codeinfo
+ since they all will be regenerated */
+
+ patcher_list_reset(code);
+
+#if defined(ENABLE_REPLACEMENT)
+ code->rplpoints = NULL;
+ code->rplpointcount = 0;
+ code->regalloc = NULL;
+ code->regalloccount = 0;
+ code->globalcount = 0;
#endif
}
+/* codegen_generate ************************************************************
+
+ Generates the code for the currently compiled method.
+
+*******************************************************************************/
+
+bool codegen_generate(jitdata *jd)
+{
+ codegendata *cd;
+
+ /* get required compiler data */
+
+ cd = jd->cd;
+
+ /* call the machine-dependent code generation function */
+
+ if (!codegen_emit(jd))
+ return false;
+
+ /* check for an error */
+
+ if (CODEGENDATA_HAS_FLAG_ERROR(cd)) {
+ /* check for long-branches flag, if it is set we recompile the
+ method */
+
+#if !defined(NDEBUG)
+ if (compileverbose)
+ log_message_method("Re-generating code: ", jd->m);
+#endif
+
+ /* XXX maybe we should tag long-branches-methods for recompilation */
+
+ if (CODEGENDATA_HAS_FLAG_LONGBRANCHES(cd)) {
+ /* we have to reset the codegendata structure first */
+
+ codegen_reset(jd);
+
+ /* and restart the compiler run */
+
+ if (!codegen_emit(jd))
+ return false;
+ }
+ else {
+ vm_abort("codegen_generate: unknown error occurred during codegen_emit: flags=%x\n", cd->flags);
+ }
+
+#if !defined(NDEBUG)
+ if (compileverbose)
+ log_message_method("Re-generating code done: ", jd->m);
+#endif
+ }
+
+ /* reallocate the memory and finish the code generation */
+
+ codegen_finish(jd);
+
+ /* everything's ok */
+
+ return true;
+}
+
+
/* codegen_close ***************************************************************
TODO
cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
-#if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(ENABLE_INTRP)
+#if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(__M68K__) || defined(ENABLE_INTRP) \
+ || defined(__SPARC_64__)
/* adjust the pointer to the last patcher position */
if (cd->lastmcodeptr != NULL)
*******************************************************************************/
-void codegen_add_branch_ref(codegendata *cd, basicblock *target)
+void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
{
- s4 branchmpc;
+ branchref *br;
+ s4 branchmpc;
+
+ STATISTICS(count_branches_unresolved++);
/* calculate the mpc of the branch instruction */
branchmpc = cd->mcodeptr - cd->mcodebase;
-#if defined(ENABLE_JIT)
- /* Check if the target basicblock has already a start pc, so the
- jump is backward and we can resolve it immediately. */
+ br = DNEW(branchref);
- if ((target->mpc >= 0)
-# if defined(ENABLE_INTRP)
- /* The interpreter uses absolute branches, so we do branch
- resolving after the code and data segment move. */
+ br->branchmpc = branchmpc;
+ br->condition = condition;
+ br->reg = reg;
+ br->options = options;
+ br->next = target->branchrefs;
- && !opt_intrp
-# endif
- )
- {
- md_codegen_patch_branch(cd, branchmpc, target->mpc);
- }
- else
-#endif
- {
- branchref *br = DNEW(branchref);
-
- br->branchpos = branchmpc;
- br->next = target->branchrefs;
-
- target->branchrefs = br;
- }
+ target->branchrefs = br;
}
void codegen_resolve_branchrefs(codegendata *cd, basicblock *bptr)
{
branchref *br;
- s4 branchmpc;
- s4 targetmpc;
+ u1 *mcodeptr;
+
+ /* Save the mcodeptr because in the branch emitting functions
+ we generate code somewhere inside already generated code,
+ but we're still in the actual code generation phase. */
+
+ mcodeptr = cd->mcodeptr;
- /* set target */
+ /* just to make sure */
- targetmpc = bptr->mpc;
+ assert(bptr->mpc >= 0);
for (br = bptr->branchrefs; br != NULL; br = br->next) {
- branchmpc = br->branchpos;
+ /* temporary set the mcodeptr */
- md_codegen_patch_branch(cd, branchmpc, targetmpc);
+ cd->mcodeptr = cd->mcodebase + br->branchmpc;
+
+ /* emit_bccz and emit_branch emit the correct code, even if we
+ pass condition == BRANCH_UNCONDITIONAL or reg == -1. */
+
+ emit_bccz(cd, bptr, br->condition, br->reg, br->options);
}
+
+ /* restore mcodeptr */
+
+ cd->mcodeptr = mcodeptr;
}
-/* codegen_add_exception_ref ***************************************************
+/* codegen_branch_label_add ****************************************************
- Prepends an exception branch to the list.
+ Append an branch to the label-branch list.
*******************************************************************************/
-static void codegen_add_exception_ref(codegendata *cd, s4 reg,
- functionptr function)
+void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
{
- s4 branchmpc;
- exceptionref *er;
+ list_t *list;
+ branch_label_ref_t *br;
+ s4 mpc;
- branchmpc = cd->mcodeptr - cd->mcodebase;
+ /* get the label list */
+
+ list = cd->brancheslabel;
+
+ /* calculate the current mpc */
- er = DNEW(exceptionref);
+ mpc = cd->mcodeptr - cd->mcodebase;
- er->branchpos = branchmpc;
- er->reg = reg;
- er->function = function;
+ br = DNEW(branch_label_ref_t);
- er->next = cd->exceptionrefs;
+ br->mpc = mpc;
+ br->label = label;
+ br->condition = condition;
+ br->reg = reg;
+ br->options = options;
- cd->exceptionrefs = er;
+ /* add the branch to the list */
+
+ list_add_last_unsynced(list, br);
}
-/* codegen_add_arithmeticexception_ref *****************************************
+/* codegen_add_patch_ref *******************************************************
- Adds an ArithmeticException branch to the list.
+ Appends a new patcher reference to the list of patching positions.
*******************************************************************************/
-void codegen_add_arithmeticexception_ref(codegendata *cd)
+void codegen_add_patch_ref(codegendata *cd, functionptr patcher, voidptr ref,
+ s4 disp)
{
- codegen_add_exception_ref(cd, -1, STACKTRACE_inline_arithmeticexception);
-}
+ patchref *pr;
+ s4 branchmpc;
+
+ branchmpc = cd->mcodeptr - cd->mcodebase;
+
+ pr = DNEW(patchref);
+
+ pr->branchpos = branchmpc;
+ pr->disp = disp;
+ pr->patcher = patcher;
+ pr->ref = ref;
+/* list_add_first(cd->patchrefs, pr); */
+ pr->next = cd->patchrefs;
+ cd->patchrefs = pr;
-/* codegen_add_arrayindexoutofboundsexception_ref ******************************
+ /* Generate NOPs for opt_shownops. */
- Adds an ArrayIndexOutOfBoundsException branch to the list.
+ if (opt_shownops)
+ PATCHER_NOPS;
-*******************************************************************************/
+#if defined(ENABLE_JIT) && (defined(__I386__) || defined(__M68K__) || defined(__MIPS__) \
+ || defined(__SPARC_64__) || defined(__X86_64__))
-void codegen_add_arrayindexoutofboundsexception_ref(codegendata *cd, s4 reg)
-{
- codegen_add_exception_ref(cd, reg,
- STACKTRACE_inline_arrayindexoutofboundsexception);
+ /* On some architectures the patcher stub call instruction might
+ be longer than the actual instruction generated. On this
+ architectures we store the last patcher call position and after
+ the basic block code generation is completed, we check the
+ range and maybe generate some nop's. */
+ /* The nops are generated in codegen_emit in each codegen */
+
+ cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
+#endif
}
-/* codegen_add_arraystoreexception_ref *****************************************
+/* codegen_critical_section_new ************************************************
- Adds an ArrayStoreException branch to the list.
+ Allocates a new critical-section reference and adds it to the
+ critical-section list.
*******************************************************************************/
-void codegen_add_arraystoreexception_ref(codegendata *cd)
+#if defined(ENABLE_THREADS)
+void codegen_critical_section_new(codegendata *cd)
{
- codegen_add_exception_ref(cd, -1, STACKTRACE_inline_arraystoreexception);
-}
+ list_t *list;
+ critical_section_ref_t *csr;
+ s4 mpc;
+
+ /* get the critical section list */
+ list = cd->listcritical;
+
+ /* calculate the current mpc */
-/* codegen_add_classcastexception_ref ******************************************
+ mpc = cd->mcodeptr - cd->mcodebase;
- Adds an ClassCastException branch to the list.
+ csr = DNEW(critical_section_ref_t);
-*******************************************************************************/
+ /* We only can set restart right now, as start and end are set by
+ the following, corresponding functions. */
-void codegen_add_classcastexception_ref(codegendata *cd, s4 reg)
-{
- codegen_add_exception_ref(cd, reg, STACKTRACE_inline_classcastexception);
+ csr->start = -1;
+ csr->end = -1;
+ csr->restart = mpc;
+
+ /* add the branch to the list */
+
+ list_add_last_unsynced(list, csr);
}
+#endif
-/* codegen_add_nullpointerexception_ref ****************************************
+/* codegen_critical_section_start **********************************************
- Adds an NullPointerException branch to the list.
+ Set the start-point of the current critical section (which is the
+ last element of the list).
*******************************************************************************/
-void codegen_add_nullpointerexception_ref(codegendata *cd)
+#if defined(ENABLE_THREADS)
+void codegen_critical_section_start(codegendata *cd)
{
- codegen_add_exception_ref(cd, -1, STACKTRACE_inline_nullpointerexception);
+ list_t *list;
+ critical_section_ref_t *csr;
+ s4 mpc;
+
+ /* get the critical section list */
+
+ list = cd->listcritical;
+
+ /* calculate the current mpc */
+
+ mpc = cd->mcodeptr - cd->mcodebase;
+
+ /* get the current critical section */
+
+ csr = list_last_unsynced(list);
+
+ /* set the start point */
+
+ assert(csr->start == -1);
+
+ csr->start = mpc;
}
+#endif
-/* codegen_add_fillinstacktrace_ref ********************************************
+/* codegen_critical_section_end ************************************************
- Adds a fillInStackTrace branch to the list.
+ Set the end-point of the current critical section (which is the
+ last element of the list).
*******************************************************************************/
-void codegen_add_fillinstacktrace_ref(codegendata *cd)
+#if defined(ENABLE_THREADS)
+void codegen_critical_section_end(codegendata *cd)
{
- codegen_add_exception_ref(cd, -1, STACKTRACE_inline_fillInStackTrace);
+ list_t *list;
+ critical_section_ref_t *csr;
+ s4 mpc;
+
+ /* get the critical section list */
+
+ list = cd->listcritical;
+
+ /* calculate the current mpc */
+
+ mpc = cd->mcodeptr - cd->mcodebase;
+
+ /* get the current critical section */
+
+ csr = list_last_unsynced(list);
+
+ /* set the end point */
+
+ assert(csr->end == -1);
+
+ csr->end = mpc;
}
+#endif
-/* codegen_add_patch_ref *******************************************************
+/* codegen_critical_section_finish *********************************************
- Appends a new patcher reference to the list of patching positions.
+ Finish the critical sections, create the critical section nodes for
+ the AVL tree and insert them into the tree.
*******************************************************************************/
-void codegen_add_patch_ref(codegendata *cd, functionptr patcher, voidptr ref,
- s4 disp)
+#if defined(ENABLE_THREADS)
+static void codegen_critical_section_finish(jitdata *jd)
{
- patchref *pr;
- s4 branchmpc;
+ codeinfo *code;
+ codegendata *cd;
+ list_t *list;
+ critical_section_ref_t *csr;
+ critical_section_node_t *csn;
- branchmpc = cd->mcodeptr - cd->mcodebase;
+ /* get required compiler data */
- pr = DNEW(patchref);
+ code = jd->code;
+ cd = jd->cd;
- pr->branchpos = branchmpc;
- pr->disp = disp;
- pr->patcher = patcher;
- pr->ref = ref;
+ /* get the critical section list */
-/* list_add_first(cd->patchrefs, pr); */
- pr->next = cd->patchrefs;
- cd->patchrefs = pr;
+ list = cd->listcritical;
-#if defined(ENABLE_JIT) && (defined(__ALPHA__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__SPARC_64__) || defined(__X86_64__) || defined(__S390__))
- /* Generate NOPs for opt_shownops. */
+ /* iterate over all critical sections */
- if (opt_shownops)
- PATCHER_NOPS;
-#endif
+ for (csr = list_first_unsynced(list); csr != NULL;
+ csr = list_next_unsynced(list, csr)) {
+ /* check if all points are set */
-#if defined(ENABLE_JIT) && (defined(__I386__) || defined(__MIPS__) || defined(__X86_64__))
- /* On some architectures the patcher stub call instruction might
- be longer than the actual instruction generated. On this
- architectures we store the last patcher call position and after
- the basic block code generation is completed, we check the
- range and maybe generate some nop's. */
+ assert(csr->start != -1);
+ assert(csr->end != -1);
+ assert(csr->restart != -1);
- cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
-#endif
+ /* allocate tree node */
+
+ csn = NEW(critical_section_node_t);
+
+ csn->start = code->entrypoint + csr->start;
+ csn->end = code->entrypoint + csr->end;
+ csn->restart = code->entrypoint + csr->restart;
+
+ /* insert into the tree */
+
+ critical_section_register(csn);
+ }
}
+#endif
/* methodtree_comparator *******************************************************
Comparator function used for the AVL tree of methods.
+ ARGUMENTS:
+ treenode....the node from the tree
+ node........the node to compare to the tree-node
+
*******************************************************************************/
-static s4 methodtree_comparator(const void *pc, const void *element)
+static s4 methodtree_comparator(const void *treenode, const void *node)
{
methodtree_element *mte;
methodtree_element *mtepc;
- mte = (methodtree_element *) element;
- mtepc = (methodtree_element *) pc;
+ mte = (methodtree_element *) treenode;
+ mtepc = (methodtree_element *) node;
/* compare both startpc and endpc of pc, even if they have the same value,
otherwise the avl_probe sometimes thinks the element is already in the
if (mte == NULL) {
/* No method was found. Let's dump a stacktrace. */
+#if defined(ENABLE_VMLOG)
+ vmlog_cacao_signl("SIGSEGV");
+#endif
+
log_println("We received a SIGSEGV and tried to handle it, but we were");
log_println("unable to find a Java method at:");
log_println("");
log_println("PC=0x%08x", pc);
#endif
log_println("");
+ assert(0);
log_println("Dumping the current stacktrace:");
- stacktrace_dump_trace(THREADOBJECT);
+#if defined(ENABLE_THREADS)
+ /* XXX michi: This should be available even without threads! */
+ threads_print_stacktrace();
+#endif
vm_abort("Exiting...");
}
#endif
s4 alignedmcodelen;
jumpref *jr;
+ patchref_t *pr;
u1 *epoint;
- s4 extralen;
s4 alignedlen;
/* get required compiler data */
mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
-#if defined(ENABLE_THREADS)
- extralen = sizeof(critical_section_node_t) * cd->threadcritcount;
-#else
- extralen = 0;
-#endif
-
#if defined(ENABLE_STATISTICS)
if (opt_stat) {
count_code_len += mcodelen;
/* allocate new memory */
code->mcodelength = mcodelen + cd->dseglen;
- code->mcode = CNEW(u1, alignedlen + extralen);
+ code->mcode = CNEW(u1, alignedlen);
/* set the entrypoint of the method */
*((ptrint *) ((ptrint) epoint + cd->linenumbertablesizepos)) = lrtlen;
}
+ /* patcher resolving */
+
+ pr = list_first_unsynced(code->patchers);
+ while (pr) {
+ pr->mpc += (ptrint) epoint;
+ pr->datap = (ptrint) (pr->disp + epoint);
+ pr = list_next_unsynced(code->patchers, pr);
+ }
+
#if defined(ENABLE_REPLACEMENT)
/* replacement point resolving */
{
int i;
rplpoint *rp;
- code->replacementstubs += (ptrint) epoint;
-
rp = code->rplpoints;
for (i=0; i<code->rplpointcount; ++i, ++rp) {
rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
codegen_insertmethod(code->entrypoint, code->entrypoint + mcodelen);
-#if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
+#if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
/* resolve data segment references */
dseg_resolve_datareferences(jd);
#endif
#if defined(ENABLE_THREADS)
- {
- critical_section_node_t *n = (critical_section_node_t *) ((ptrint) code->mcode + alignedlen);
- s4 i;
- codegen_critical_section_t *nt = cd->threadcrit;
-
- for (i = 0; i < cd->threadcritcount; i++) {
- n->mcodebegin = (u1 *) (ptrint) code->mcode + nt->mcodebegin;
- n->mcodeend = (u1 *) (ptrint) code->mcode + nt->mcodeend;
- n->mcoderestart = (u1 *) (ptrint) code->mcode + nt->mcoderestart;
- critical_register_critical_section(n);
- n++;
- nt = nt->next;
- }
- }
+ /* create cirtical sections */
+
+ codegen_critical_section_finish(jd);
#endif
/* flush the instruction and data caches */
}
-/* codegen_createnativestub ****************************************************
+/* codegen_generate_stub_compiler **********************************************
+
+ Wrapper for codegen_emit_stub_compiler.
+
+ Returns:
+ pointer to the compiler stub code.
+
+*******************************************************************************/
+
+u1 *codegen_generate_stub_compiler(methodinfo *m)
+{
+ jitdata *jd;
+ codegendata *cd;
+ ptrint *d; /* pointer to data memory */
+ u1 *c; /* pointer to code memory */
+ s4 dumpsize;
+
+ /* mark dump memory */
+
+ dumpsize = dump_size();
+
+ /* allocate required data structures */
+
+ jd = DNEW(jitdata);
+
+ jd->m = m;
+ jd->cd = DNEW(codegendata);
+ jd->flags = 0;
+
+ /* get required compiler data */
+
+ cd = jd->cd;
+
+ /* allocate code memory */
+
+ c = CNEW(u1, 3 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
+
+ /* set pointers correctly */
+
+ d = (ptrint *) c;
+
+ cd->mcodebase = c;
+
+ c = c + 3 * SIZEOF_VOID_P;
+ cd->mcodeptr = c;
+
+ /* NOTE: The codeinfo pointer is actually a pointer to the
+ methodinfo (this fakes a codeinfo structure). */
+
+ d[0] = (ptrint) asm_call_jit_compiler;
+ d[1] = (ptrint) m;
+ d[2] = (ptrint) &d[1]; /* fake code->m */
+
+ /* call the emit function */
+
+ codegen_emit_stub_compiler(jd);
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ count_cstub_len += 3 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE;
+#endif
+
+ /* flush caches */
+
+ md_cacheflush(cd->mcodebase, 3 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
+
+ /* release dump memory */
+
+ dump_release(dumpsize);
+
+ /* return native stub code */
+
+ return c;
+}
+
+
+/* codegen_generate_stub_builtin ***********************************************
- Wrapper for createnativestub.
+ Wrapper for codegen_emit_stub_builtin.
+
+ Returns:
+ Pointer to the entrypoint of the stub.
+
+*******************************************************************************/
+
+void codegen_generate_stub_builtin(builtintable_entry *bte)
+{
+#if defined(__ARM__) || defined(__ALPHA__) || defined(__I386__) || defined(__M68K__) || defined(__POWERPC__) || defined(__SPARC64__) || defined(__X86_64__)
+ jitdata *jd;
+ codeinfo *code;
+ s4 dumpsize;
+
+ /* mark dump memory */
+
+ dumpsize = dump_size();
+
+ jd = DNEW(jitdata);
+
+ jd->m = NULL;
+ jd->cd = DNEW(codegendata);
+ jd->rd = NULL;
+ jd->flags = 0;
+
+ /* Allocate codeinfo memory from the heap as we need to keep them. */
+
+ jd->code = code_codeinfo_new(NULL);
+
+ /* get required compiler data */
+
+ code = jd->code;
+
+ /* setup code generation stuff */
+
+ codegen_setup(jd);
+
+ /* generate the code */
+
+#if defined(ENABLE_JIT)
+# if defined(ENABLE_INTRP)
+ if (!opt_intrp)
+# endif
+ codegen_emit_stub_builtin(jd, bte);
+#endif
+
+ /* reallocate the memory and finish the code generation */
+
+ codegen_finish(jd);
+
+ /* set the stub entry point in the builtin table */
+
+ bte->stub = code->entrypoint;
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ size_stub_native += code->mcodelength;
+#endif
+
+ /* release memory */
+
+ dump_release(dumpsize);
+#endif /* architecture list */
+}
+
+
+/* codegen_generate_stub_native ************************************************
+
+ Wrapper for codegen_emit_stub_native.
Returns:
the codeinfo representing the stub code.
*******************************************************************************/
-codeinfo *codegen_createnativestub(functionptr f, methodinfo *m)
+codeinfo *codegen_generate_stub_native(methodinfo *m, functionptr f)
{
jitdata *jd;
codeinfo *code;
#if defined(ENABLE_JIT)
# if defined(ENABLE_INTRP)
if (opt_intrp)
- code->entrypoint = intrp_createnativestub(f, jd, nmd);
+ intrp_createnativestub(f, jd, nmd);
else
# endif
- code->entrypoint = createnativestub(f, jd, nmd);
+ codegen_emit_stub_native(jd, nmd, f);
#else
- code->entrypoint = intrp_createnativestub(f, jd, nmd);
+ intrp_createnativestub(f, jd, nmd);
#endif
+ /* reallocate the memory and finish the code generation */
+
+ codegen_finish(jd);
+
#if defined(ENABLE_STATISTICS)
+ /* must be done after codegen_finish() */
+
if (opt_stat)
- count_nstub_len += code->mcodelength;
+ size_stub_native += code->mcodelength;
#endif
#if !defined(NDEBUG)
/* disassemble native stub */
if (opt_shownativestub) {
+#if defined(ENABLE_DEBUG_FILTER)
+ if (m->filtermatches & SHOW_FILTER_FLAG_SHOW_METHOD)
+#endif
+ {
#if defined(ENABLE_DISASSEMBLER)
- codegen_disassemble_nativestub(m,
- (u1 *) (ptrint) code->entrypoint,
- (u1 *) (ptrint) code->entrypoint + (code->mcodelength - jd->cd->dseglen));
+ codegen_disassemble_nativestub(m,
+ (u1 *) (ptrint) code->entrypoint,
+ (u1 *) (ptrint) code->entrypoint + (code->mcodelength - jd->cd->dseglen));
#endif
- /* show data segment */
+ /* show data segment */
- if (opt_showddatasegment)
- dseg_display(jd);
+ if (opt_showddatasegment)
+ dseg_display(jd);
+ }
}
#endif /* !defined(NDEBUG) */
#endif
-/* codegen_start_native_call ***************************************************
+/* codegen_stub_builtin_enter **************************************************
- Prepares the stuff required for a native (JNI) function call:
+ Prepares the stuff required for a builtin function call:
- adds a stackframe info structure to the chain, for stacktraces
- - prepares the local references table on the stack
- The layout of the native stub stackframe should look like this:
+ The layout of the builtin stub stackframe should look like this:
+---------------------------+ <- SP (of parent Java function)
| return address |
| |
+---------------------------+
| |
- | local references table |
- | |
- +---------------------------+
- | |
| arguments (if any) |
| |
+---------------------------+ <- SP (native stub)
*******************************************************************************/
-void codegen_start_native_call(u1 *datasp, u1 *pv, u1 *sp, u1 *ra)
+void codegen_stub_builtin_enter(u1 *datasp, u1 *pv, u1 *sp, u1 *ra)
{
stackframeinfo *sfi;
- localref_table *lrt;
/* get data structures from stack */
sfi = (stackframeinfo *) (datasp - sizeof(stackframeinfo));
- lrt = (localref_table *) (datasp - sizeof(stackframeinfo) -
- sizeof(localref_table));
/* add a stackframeinfo to the chain */
stacktrace_create_native_stackframeinfo(sfi, pv, sp, ra);
+}
-#if defined(ENABLE_JAVASE)
- /* add current JNI local references table to this thread */
- lrt->capacity = LOCALREFTABLE_CAPACITY;
- lrt->used = 0;
- lrt->localframes = 1;
- lrt->prev = LOCALREFTABLE;
+/* codegen_stub_builtin_exit ***************************************************
- /* clear the references array (memset is faster the a for-loop) */
+ Removes the stuff required for a builtin function call.
- MSET(lrt->refs, 0, java_objectheader*, LOCALREFTABLE_CAPACITY);
+*******************************************************************************/
- LOCALREFTABLE = lrt;
-#endif
+void codegen_stub_builtin_exit(u1 *datasp)
+{
+ stackframeinfo *sfi;
+
+ /* get data structures from stack */
+
+ sfi = (stackframeinfo *) (datasp - sizeof(stackframeinfo));
+
+ /* remove current stackframeinfo from chain */
+
+ stacktrace_remove_stackframeinfo(sfi);
}
-/* codegen_finish_native_call **************************************************
+/* codegen_start_native_call ***************************************************
- Removes the stuff required for a native (JNI) function call.
- Additionally it checks for an exceptions and in case, get the
- exception object and clear the pointer.
+ Prepares the stuff required for a native (JNI) function call:
+
+ - adds a stackframe info structure to the chain, for stacktraces
+ - prepares the local references table on the stack
+
+ The layout of the native stub stackframe should look like this:
+
+ +---------------------------+ <- java SP (of parent Java function)
+ | return address |
+ +---------------------------+ <- data SP
+ | |
+ | stackframe info structure |
+ | |
+ +---------------------------+
+ | |
+ | local references table |
+ | |
+ +---------------------------+
+ | |
+ | saved registers (if any) |
+ | |
+ +---------------------------+
+ | |
+ | arguments (if any) |
+ | |
+ +---------------------------+ <- current SP (native stub)
*******************************************************************************/
-java_objectheader *codegen_finish_native_call(u1 *datasp)
+void codegen_start_native_call(u1 *currentsp, u1 *pv)
{
- stackframeinfo *sfi;
- stackframeinfo **psfi;
- localref_table *lrt;
- localref_table *plrt;
- s4 localframes;
- java_objectheader *e;
+ stackframeinfo *sfi;
+ localref_table *lrt;
+ codeinfo *code;
+ methodinfo *m;
+ int32_t framesize;
+
+ uint8_t *datasp;
+ uint8_t *javasp;
+ uint8_t *javara;
+#if !defined(NDEBUG)
+ uint64_t *args_regs;
+ uint64_t *args_stack;
+#endif
+
+ STATISTICS(count_calls_java_to_native++);
+
+ /* get information from method header */
+
+ code = *((codeinfo **) (pv + CodeinfoPointer));
+ framesize = *((int32_t *) (pv + FrameSize));
+
+ assert(code);
+ assert(framesize > sizeof(stackframeinfo) + sizeof(localref_table));
+
+ /* get the methodinfo */
+
+ m = code->m;
+
+ assert(m);
+
+ /* calculate needed values */
+
+#if defined(__ALPHA__) || defined(__ARM__)
+ datasp = currentsp + framesize - SIZEOF_VOID_P;
+ javasp = currentsp + framesize;
+ javara = *((uint8_t **) datasp);
+#elif defined(__MIPS__) || defined(__S390__)
+ /* MIPS and S390 always uses 8 bytes to store the RA */
+ datasp = currentsp + framesize - 8;
+ javasp = currentsp + framesize;
+ javara = *((uint8_t **) datasp);
+#elif defined(__I386__) || defined (__M68K__) || defined (__X86_64__)
+ datasp = currentsp + framesize;
+ javasp = currentsp + framesize + SIZEOF_VOID_P;
+ javara = *((uint8_t **) datasp);
+#elif defined(__POWERPC__) || defined(__POWERPC64__)
+ datasp = currentsp + framesize;
+ javasp = currentsp + framesize;
+ javara = *((uint8_t **) (datasp + LA_LR_OFFSET));
+#else
+ /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
+ /* XXX maybe we need to pass the RA as argument there */
+ vm_abort("codegen_start_native_call: unsupported architecture");
+#endif
+
+#if 0
+ printf("NATIVE (framesize=%d): ", framesize);
+ method_print(m);
+ printf("\n");
+ fflush(stdout);
+#endif
+
+#if 0 && !defined(NDEBUG)
+ if (opt_TraceJavaCalls) {
+ args_regs = currentsp;
+ args_stack = javasp;
+ trace_java_call_enter(m, args_regs, args_stack);
+ }
+#endif
/* get data structures from stack */
lrt = (localref_table *) (datasp - sizeof(stackframeinfo) -
sizeof(localref_table));
- /* remove current stackframeinfo from chain */
-
- psfi = STACKFRAMEINFO;
+#if defined(ENABLE_JNI)
+ /* add current JNI local references table to this thread */
- *psfi = sfi->prev;
+ localref_table_add(lrt);
+#endif
-#if defined(ENABLE_JAVASE)
- /* release JNI local references tables for this thread */
+ /* XXX add references to lrt here!!! */
- lrt = LOCALREFTABLE;
+ /* add a stackframeinfo to the chain */
- /* release all current local frames */
+ stacktrace_create_native_stackframeinfo(sfi, pv, javasp, javara);
+}
- for (localframes = lrt->localframes; localframes >= 1; localframes--) {
- /* get previous frame */
- plrt = lrt->prev;
+/* codegen_finish_native_call **************************************************
- /* Clear all reference entries (only for tables allocated on
- the Java heap). */
+ Removes the stuff required for a native (JNI) function call.
+ Additionally it checks for an exceptions and in case, get the
+ exception object and clear the pointer.
- if (localframes > 1)
- MSET(&lrt->refs[0], 0, java_objectheader*, lrt->capacity);
+*******************************************************************************/
- lrt->prev = NULL;
+java_object_t *codegen_finish_native_call(u1 *datasp)
+{
+ stackframeinfo *sfi;
+ java_handle_t *e;
+ java_object_t *o;
- /* set new local references table */
+ /* get data structures from stack */
- lrt = plrt;
- }
+ sfi = (stackframeinfo *) (datasp - sizeof(stackframeinfo));
- /* now store the previous local frames in the thread structure */
+ /* remove current stackframeinfo from chain */
- LOCALREFTABLE = lrt;
-#endif
+ stacktrace_remove_stackframeinfo(sfi);
- /* get the exception and return it */
+ /* get and unwrap the exception */
+ /* ATTENTION: do the this _after_ the stackframeinfo was
+ removed but _before_ the localref_table gets removed! */
e = exceptions_get_and_clear_exception();
+ o = LLNI_UNWRAP(e);
+
+#if defined(ENABLE_JNI)
+ /* release JNI local references table for this thread */
- return e;
+ localref_frame_pop_all();
+ localref_table_remove();
+#endif
+
+ return o;
}
spilled) this function returns tempregnum. If not already done,
regoff and flags are set in the stack location.
- On ARM we have to check if a long/double variable is splitted
- across reg/stack (HIGH_REG == REG_SPLIT). We return the actual
- register of v for LOW_REG and the tempregnum for HIGH_REG in such
- cases. (michi 2005/07/24)
-
*******************************************************************************/
s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
return tempregnum;
#endif
- if (!(v->flags & INMEMORY)) {
-#if defined(__ARM__) && defined(__ARMEL__)
- if (IS_2_WORD_TYPE(v->type) && (GET_HIGH_REG(v->vv.regoff) == REG_SPLIT))
- return PACK_REGS(GET_LOW_REG(v->vv.regoff),
- GET_HIGH_REG(tempregnum));
-#endif
-#if defined(__ARM__) && defined(__ARMEB__)
- if (IS_2_WORD_TYPE(v->type) && (GET_LOW_REG(v->vv.regoff) == REG_SPLIT))
- return PACK_REGS(GET_LOW_REG(tempregnum),
- GET_HIGH_REG(v->vv.regoff));
-#endif
+ if (!(v->flags & INMEMORY))
return v->vv.regoff;
- }
-
-#if defined(ENABLE_STATISTICS)
- if (opt_stat)
- count_spills_read++;
-#endif
return tempregnum;
}
+
/* codegen_reg_of_dst **********************************************************
This function determines a register, to which the result of an
spilled) this function returns tempregnum. If not already done,
regoff and flags are set in the stack location.
- On ARM we have to check if a long/double variable is splitted
- across reg/stack (HIGH_REG == REG_SPLIT). We return the actual
- register of dst.var for LOW_REG and the tempregnum for HIGH_REG in such
- cases. (michi 2005/07/24)
-
*******************************************************************************/
s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
}
-#if defined(ENABLE_THREADS)
-void codegen_threadcritrestart(codegendata *cd, int offset)
-{
- cd->threadcritcurrent.mcoderestart = offset;
-}
+/* codegen_emit_phi_moves ****************************************************
+ Emits phi moves at the end of the basicblock.
-void codegen_threadcritstart(codegendata *cd, int offset)
+*******************************************************************************/
+
+#if defined(ENABLE_SSA)
+void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
{
- cd->threadcritcurrent.mcodebegin = offset;
-}
+ int lt_d,lt_s,i;
+ lsradata *ls;
+ codegendata *cd;
+ varinfo *s, *d;
+ instruction tmp_i;
+ cd = jd->cd;
+ ls = jd->ls;
-void codegen_threadcritstop(codegendata *cd, int offset)
-{
- cd->threadcritcurrent.next = cd->threadcrit;
- cd->threadcritcurrent.mcodeend = offset;
- cd->threadcrit = DNEW(codegen_critical_section_t);
- *(cd->threadcrit) = cd->threadcritcurrent;
- cd->threadcritcount++;
-}
+ MCODECHECK(512);
+
+ /* Moves from phi functions with highest indices have to be */
+ /* inserted first, since this is the order as is used for */
+ /* conflict resolution */
+
+ for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
+ lt_d = ls->phi_moves[bptr->nr][i][0];
+ lt_s = ls->phi_moves[bptr->nr][i][1];
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
+#endif
+ if (lt_s == UNUSED) {
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf(" ... not processed \n");
+#endif
+ continue;
+ }
+
+ d = VAR(ls->lifetime[lt_d].v_index);
+ s = VAR(ls->lifetime[lt_s].v_index);
+
+
+ if (d->type == -1) {
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf("...returning - phi lifetimes where joined\n");
#endif
+ return;
+ }
+
+ if (s->type == -1) {
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf("...returning - phi lifetimes where joined\n");
+#endif
+ return;
+ }
+
+ tmp_i.opc = 0;
+ tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
+ tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
+ emit_copy(jd, &tmp_i);
+
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose) {
+ if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
+ /* mem -> mem */
+ printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
+ }
+ else if (IS_INMEMORY(s->flags)) {
+ /* mem -> reg */
+ printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
+ }
+ else if (IS_INMEMORY(d->flags)) {
+ /* reg -> mem */
+ printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
+ }
+ else {
+ /* reg -> reg */
+ printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
+ }
+ printf("\n");
+ }
+#endif /* defined(SSA_DEBUG_VERBOSE) */
+ }
+}
+#endif /* defined(ENABLE_SSA) */
+
/*