memory. All functions writing values into the data area return the offset
relative the begin of the code area (start of procedure).
- $Id: codegen-common.c 7785 2007-04-21 10:55:30Z edwin $
-
*/
#include "vm/types.h"
-#if defined(ENABLE_JIT)
-/* this is required PATCHER_CALL_SIZE */
-# include "codegen.h"
-#endif
-
-#if defined(__ARM__)
-/* this is required for REG_SPLIT */
-# include "md-abi.h"
-#endif
+#include "codegen.h"
+#include "md-abi.h"
#include "mm/memory.h"
#include "toolbox/logging.h"
#include "native/jni.h"
+#include "native/llni.h"
+#include "native/localref.h"
#include "native/native.h"
+#if defined(WITH_CLASSPATH_SUN)
+# include "native/include/java_lang_Object.h"
+# include "native/include/java_lang_String.h"
+# include "native/include/java_nio_ByteBuffer.h" /* required by j.l.CL */
+# include "native/include/java_lang_ClassLoader.h"
+#endif
+
+#include "native/include/java_lang_Class.h"
+
#include "threads/threads-common.h"
+#include "vm/builtin.h"
#include "vm/exceptions.h"
#include "vm/stringlocal.h"
#include "vm/jit/dseg.h"
#include "vm/jit/emit-common.h"
#include "vm/jit/jit.h"
+#include "vm/jit/linenumbertable.h"
#include "vm/jit/md.h"
+#include "vm/jit/methodheader.h"
+#include "vm/jit/patcher-common.h"
#include "vm/jit/replace.h"
+#if defined(ENABLE_SSA)
+# include "vm/jit/optimizing/lsra.h"
+# include "vm/jit/optimizing/ssa.h"
+#endif
#include "vm/jit/stacktrace.h"
+#include "vm/jit/trace.h"
#if defined(ENABLE_INTRP)
#include "vm/jit/intrp/intrp.h"
#include <vmlog_cacao.h>
#endif
+#include "show.h"
/* in this tree we store all method addresses *********************************/
-static avl_tree *methodtree = NULL;
-static s4 methodtree_comparator(const void *pc, const void *element);
+static avl_tree_t *methodtree = NULL;
+static s4 methodtree_comparator(const void *treenode, const void *node);
/* codegen_init ****************************************************************
avl_insert(methodtree, mte);
#endif /* defined(ENABLE_JIT) */
+
}
+
}
cd->datareferences = NULL;
#endif
-/* cd->patchrefs = list_create_dump(OFFSET(patchref, linkage)); */
- cd->patchrefs = NULL;
cd->brancheslabel = list_create_dump(OFFSET(branch_label_ref_t, linkage));
-
- cd->linenumberreferences = NULL;
- cd->linenumbertablesizepos = 0;
- cd->linenumbertablestartpos = 0;
- cd->linenumbertab = 0;
-
-#if defined(ENABLE_THREADS)
- cd->threadcritcurrent.next = NULL;
- cd->threadcritcount = 0;
-#endif
+ cd->listcritical = list_create_dump(OFFSET(critical_section_ref_t, linkage));
+ cd->linenumbers = list_create_dump(OFFSET(linenumbertable_list_entry_t, linkage));
}
cd->datareferences = NULL;
#endif
-/* cd->patchrefs = list_create_dump(OFFSET(patchref, linkage)); */
- cd->patchrefs = NULL;
cd->brancheslabel = list_create_dump(OFFSET(branch_label_ref_t, linkage));
-
- cd->linenumberreferences = NULL;
- cd->linenumbertablesizepos = 0;
- cd->linenumbertablestartpos = 0;
- cd->linenumbertab = 0;
+ cd->listcritical = list_create_dump(OFFSET(critical_section_ref_t, linkage));
+ cd->linenumbers = list_create_dump(OFFSET(linenumbertable_list_entry_t, linkage));
-#if defined(ENABLE_THREADS)
- cd->threadcritcurrent.next = NULL;
- cd->threadcritcount = 0;
-#endif
-
/* We need to clear the mpc and the branch references from all
basic blocks as they will definitely change. */
bptr->branchrefs = NULL;
}
+ /* We need to clear all the patcher references from the codeinfo
+ since they all will be regenerated */
+
+ patcher_list_reset(code);
+
#if defined(ENABLE_REPLACEMENT)
code->rplpoints = NULL;
code->rplpointcount = 0;
cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
-#if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(ENABLE_INTRP)
+#if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(__M68K__) || defined(ENABLE_INTRP) \
+ || defined(__SPARC_64__)
/* adjust the pointer to the last patcher position */
if (cd->lastmcodeptr != NULL)
}
-/* codegen_add_patch_ref *******************************************************
+/* codegen_critical_section_new ************************************************
- Appends a new patcher reference to the list of patching positions.
+ Allocates a new critical-section reference and adds it to the
+ critical-section list.
*******************************************************************************/
-void codegen_add_patch_ref(codegendata *cd, functionptr patcher, voidptr ref,
- s4 disp)
+#if defined(ENABLE_THREADS)
+void codegen_critical_section_new(codegendata *cd)
{
- patchref *pr;
- s4 branchmpc;
+ list_t *list;
+ critical_section_ref_t *csr;
+ s4 mpc;
- branchmpc = cd->mcodeptr - cd->mcodebase;
+ /* get the critical section list */
- pr = DNEW(patchref);
+ list = cd->listcritical;
+
+ /* calculate the current mpc */
- pr->branchpos = branchmpc;
- pr->disp = disp;
- pr->patcher = patcher;
- pr->ref = ref;
+ mpc = cd->mcodeptr - cd->mcodebase;
-/* list_add_first(cd->patchrefs, pr); */
- pr->next = cd->patchrefs;
- cd->patchrefs = pr;
+ csr = DNEW(critical_section_ref_t);
- /* Generate NOPs for opt_shownops. */
+ /* We only can set restart right now, as start and end are set by
+ the following, corresponding functions. */
- if (opt_shownops)
- PATCHER_NOPS;
+ csr->start = -1;
+ csr->end = -1;
+ csr->restart = mpc;
-#if defined(ENABLE_JIT) && (defined(__I386__) || defined(__MIPS__) || defined(__X86_64__))
- /* On some architectures the patcher stub call instruction might
- be longer than the actual instruction generated. On this
- architectures we store the last patcher call position and after
- the basic block code generation is completed, we check the
- range and maybe generate some nop's. */
+ /* add the branch to the list */
- cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
+ list_add_last_unsynced(list, csr);
+}
#endif
+
+
+/* codegen_critical_section_start **********************************************
+
+ Set the start-point of the current critical section (which is the
+ last element of the list).
+
+*******************************************************************************/
+
+#if defined(ENABLE_THREADS)
+void codegen_critical_section_start(codegendata *cd)
+{
+ list_t *list;
+ critical_section_ref_t *csr;
+ s4 mpc;
+
+ /* get the critical section list */
+
+ list = cd->listcritical;
+
+ /* calculate the current mpc */
+
+ mpc = cd->mcodeptr - cd->mcodebase;
+
+ /* get the current critical section */
+
+ csr = list_last_unsynced(list);
+
+ /* set the start point */
+
+ assert(csr->start == -1);
+
+ csr->start = mpc;
+}
+#endif
+
+
+/* codegen_critical_section_end ************************************************
+
+ Set the end-point of the current critical section (which is the
+ last element of the list).
+
+*******************************************************************************/
+
+#if defined(ENABLE_THREADS)
+void codegen_critical_section_end(codegendata *cd)
+{
+ list_t *list;
+ critical_section_ref_t *csr;
+ s4 mpc;
+
+ /* get the critical section list */
+
+ list = cd->listcritical;
+
+ /* calculate the current mpc */
+
+ mpc = cd->mcodeptr - cd->mcodebase;
+
+ /* get the current critical section */
+
+ csr = list_last_unsynced(list);
+
+ /* set the end point */
+
+ assert(csr->end == -1);
+
+ csr->end = mpc;
}
+#endif
+
+
+/* codegen_critical_section_finish *********************************************
+
+ Finish the critical sections, create the critical section nodes for
+ the AVL tree and insert them into the tree.
+
+*******************************************************************************/
+
+#if defined(ENABLE_THREADS)
+static void codegen_critical_section_finish(jitdata *jd)
+{
+ codeinfo *code;
+ codegendata *cd;
+ list_t *list;
+ critical_section_ref_t *csr;
+ critical_section_node_t *csn;
+
+ /* get required compiler data */
+
+ code = jd->code;
+ cd = jd->cd;
+
+ /* get the critical section list */
+
+ list = cd->listcritical;
+
+ /* iterate over all critical sections */
+
+ for (csr = list_first_unsynced(list); csr != NULL;
+ csr = list_next_unsynced(list, csr)) {
+ /* check if all points are set */
+
+ assert(csr->start != -1);
+ assert(csr->end != -1);
+ assert(csr->restart != -1);
+
+ /* allocate tree node */
+
+ csn = NEW(critical_section_node_t);
+
+ csn->start = code->entrypoint + csr->start;
+ csn->end = code->entrypoint + csr->end;
+ csn->restart = code->entrypoint + csr->restart;
+
+ /* insert into the tree */
+
+ critical_section_register(csn);
+ }
+}
+#endif
/* methodtree_comparator *******************************************************
Comparator function used for the AVL tree of methods.
+ ARGUMENTS:
+ treenode....the node from the tree
+ node........the node to compare to the tree-node
+
*******************************************************************************/
-static s4 methodtree_comparator(const void *pc, const void *element)
+static s4 methodtree_comparator(const void *treenode, const void *node)
{
methodtree_element *mte;
methodtree_element *mtepc;
- mte = (methodtree_element *) element;
- mtepc = (methodtree_element *) pc;
+ mte = (methodtree_element *) treenode;
+ mtepc = (methodtree_element *) node;
/* compare both startpc and endpc of pc, even if they have the same value,
otherwise the avl_probe sometimes thinks the element is already in the
log_println("PC=0x%08x", pc);
#endif
log_println("");
+ assert(0);
log_println("Dumping the current stacktrace:");
#if defined(ENABLE_THREADS)
cd->replacementpoint++;
+#if !defined(NDEBUG)
+ /* XXX actually we should use an own REPLACEMENT_NOPS here! */
+ if (opt_TestReplacement)
+ PATCHER_NOPS;
+#endif
+
/* XXX assert(cd->lastmcodeptr <= cd->mcodeptr); */
cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
#endif
s4 alignedmcodelen;
jumpref *jr;
+ patchref_t *pr;
u1 *epoint;
- s4 extralen;
s4 alignedlen;
/* get required compiler data */
mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
-#if defined(ENABLE_THREADS)
- extralen = sizeof(critical_section_node_t) * cd->threadcritcount;
-#else
- extralen = 0;
-#endif
-
#if defined(ENABLE_STATISTICS)
if (opt_stat) {
count_code_len += mcodelen;
/* allocate new memory */
code->mcodelength = mcodelen + cd->dseglen;
- code->mcode = CNEW(u1, alignedlen + extralen);
+ code->mcode = CNEW(u1, alignedlen);
/* set the entrypoint of the method */
}
#endif
+ /* Create the exception table. */
+
+ exceptiontable_create(jd);
+
+ /* Create the linenumber table. */
+
+ linenumbertable_create(jd);
+
/* jump table resolving */
for (jr = cd->jumpreferences; jr != NULL; jr = jr->next)
*((functionptr *) ((ptrint) epoint + jr->tablepos)) =
(functionptr) ((ptrint) epoint + (ptrint) jr->target->mpc);
- /* line number table resolving */
- {
- linenumberref *lr;
- ptrint lrtlen = 0;
- ptrint target;
-
- for (lr = cd->linenumberreferences; lr != NULL; lr = lr->next) {
- lrtlen++;
- target = lr->targetmpc;
- /* if the entry contains an mcode pointer (normal case), resolve it */
- /* (see doc/inlining_stacktrace.txt for details) */
- if (lr->linenumber >= -2) {
- target += (ptrint) epoint;
- }
- *((functionptr *) ((ptrint) epoint + (ptrint) lr->tablepos)) =
- (functionptr) target;
- }
-
- *((functionptr *) ((ptrint) epoint + cd->linenumbertablestartpos)) =
- (functionptr) ((ptrint) epoint + cd->linenumbertab);
+ /* patcher resolving */
- *((ptrint *) ((ptrint) epoint + cd->linenumbertablesizepos)) = lrtlen;
+ pr = list_first_unsynced(code->patchers);
+ while (pr) {
+ pr->mpc += (ptrint) epoint;
+ pr->datap = (ptrint) (pr->disp + epoint);
+ pr = list_next_unsynced(code->patchers, pr);
}
#if defined(ENABLE_REPLACEMENT)
int i;
rplpoint *rp;
- code->replacementstubs += (ptrint) epoint;
-
rp = code->rplpoints;
for (i=0; i<code->rplpointcount; ++i, ++rp) {
rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
#endif
#if defined(ENABLE_THREADS)
- {
- critical_section_node_t *n = (critical_section_node_t *) ((ptrint) code->mcode + alignedlen);
- s4 i;
- codegen_critical_section_t *nt = cd->threadcrit;
-
- for (i = 0; i < cd->threadcritcount; i++) {
- n->mcodebegin = (u1 *) (ptrint) code->mcode + nt->mcodebegin;
- n->mcodeend = (u1 *) (ptrint) code->mcode + nt->mcodeend;
- n->mcoderestart = (u1 *) (ptrint) code->mcode + nt->mcoderestart;
- critical_register_critical_section(n);
- n++;
- nt = nt->next;
- }
- }
+ /* create cirtical sections */
+
+ codegen_critical_section_finish(jd);
#endif
/* flush the instruction and data caches */
cd = jd->cd;
+#if !defined(JIT_COMPILER_VIA_SIGNAL)
/* allocate code memory */
c = CNEW(u1, 3 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
/* flush caches */
md_cacheflush(cd->mcodebase, 3 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
+#else
+ /* Allocate code memory. */
+
+ c = CNEW(uint8_t, 2 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
+
+ /* Set pointers correctly. */
+
+ d = (ptrint *) c;
+
+ cd->mcodebase = c;
+
+ c = c + 2 * SIZEOF_VOID_P;
+ cd->mcodeptr = c;
+
+ /* NOTE: The codeinfo pointer is actually a pointer to the
+ methodinfo (this fakes a codeinfo structure). */
+
+ d[0] = (ptrint) m;
+ d[1] = (ptrint) &d[0]; /* fake code->m */
+
+ /* Emit the trap instruction. */
+
+ emit_trap_compiler(cd);
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ count_cstub_len += 2 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE;
+#endif
+
+ /* Flush caches. */
+
+ md_cacheflush(cd->mcodebase, 2 * SIZEOF_VOID_P + COMPILERSTUB_CODESIZE);
+#endif
/* release dump memory */
}
+/* codegen_generate_stub_builtin ***********************************************
+
+ Wrapper for codegen_emit_stub_native.
+
+*******************************************************************************/
+
+void codegen_generate_stub_builtin(methodinfo *m, builtintable_entry *bte)
+{
+ jitdata *jd;
+ codeinfo *code;
+ int skipparams;
+ s4 dumpsize;
+
+ /* mark dump memory */
+
+ dumpsize = dump_size();
+
+ /* Create JIT data structure. */
+
+ jd = jit_jitdata_new(m);
+
+ /* Get required compiler data. */
+
+ code = jd->code;
+
+ /* setup code generation stuff */
+
+ codegen_setup(jd);
+
+ /* Set the number of native arguments we need to skip. */
+
+ skipparams = 0;
+
+ /* generate the code */
+
+#if defined(ENABLE_JIT)
+# if defined(ENABLE_INTRP)
+ if (!opt_intrp) {
+# endif
+ assert(bte->fp != NULL);
+ codegen_emit_stub_native(jd, bte->md, bte->fp, skipparams);
+# if defined(ENABLE_INTRP)
+ }
+# endif
+#endif
+
+ /* reallocate the memory and finish the code generation */
+
+ codegen_finish(jd);
+
+ /* set the stub entry point in the builtin table */
+
+ bte->stub = code->entrypoint;
+
+#if defined(ENABLE_STATISTICS)
+ if (opt_stat)
+ size_stub_native += code->mcodelength;
+#endif
+
+#if !defined(NDEBUG) && defined(ENABLE_DISASSEMBLER)
+ /* disassemble native stub */
+
+ if (opt_DisassembleStubs) {
+ codegen_disassemble_stub(m,
+ (u1 *) (ptrint) code->entrypoint,
+ (u1 *) (ptrint) code->entrypoint + (code->mcodelength - jd->cd->dseglen));
+
+ /* show data segment */
+
+ if (opt_showddatasegment)
+ dseg_display(jd);
+ }
+#endif /* !defined(NDEBUG) && defined(ENABLE_DISASSEMBLER) */
+
+ /* release memory */
+
+ dump_release(dumpsize);
+}
+
+
/* codegen_generate_stub_native ************************************************
Wrapper for codegen_emit_stub_native.
s4 dumpsize;
methoddesc *md;
methoddesc *nmd;
- s4 nativeparams;
+ int skipparams;
/* mark dump memory */
dumpsize = dump_size();
- jd = DNEW(jitdata);
+ /* Create JIT data structure. */
- jd->m = m;
- jd->cd = DNEW(codegendata);
- jd->rd = DNEW(registerdata);
- jd->flags = 0;
+ jd = jit_jitdata_new(m);
- /* Allocate codeinfo memory from the heap as we need to keep them. */
-
- jd->code = code_codeinfo_new(m); /* XXX check allocation */
-
- /* get required compiler data */
+ /* Get required compiler data. */
code = jd->code;
/* create new method descriptor with additional native parameters */
md = m->parseddesc;
- nativeparams = (m->flags & ACC_STATIC) ? 2 : 1;
+
+ /* Set the number of native arguments we need to skip. */
+
+ if (m->flags & ACC_STATIC)
+ skipparams = 2;
+ else
+ skipparams = 1;
nmd = (methoddesc *) DMNEW(u1, sizeof(methoddesc) - sizeof(typedesc) +
md->paramcount * sizeof(typedesc) +
- nativeparams * sizeof(typedesc));
+ skipparams * sizeof(typedesc));
- nmd->paramcount = md->paramcount + nativeparams;
+ nmd->paramcount = md->paramcount + skipparams;
nmd->params = DMNEW(paramdesc, nmd->paramcount);
if (m->flags & ACC_STATIC)
nmd->paramtypes[1].type = TYPE_ADR; /* add class pointer */
- MCOPY(nmd->paramtypes + nativeparams, md->paramtypes, typedesc,
+ MCOPY(nmd->paramtypes + skipparams, md->paramtypes, typedesc,
md->paramcount);
#if defined(ENABLE_JIT)
intrp_createnativestub(f, jd, nmd);
else
# endif
- codegen_emit_stub_native(jd, nmd, f);
+ codegen_emit_stub_native(jd, nmd, f, skipparams);
#else
intrp_createnativestub(f, jd, nmd);
#endif
-#if defined(ENABLE_STATISTICS)
- if (opt_stat)
- count_nstub_len += code->mcodelength;
-#endif
-
/* reallocate the memory and finish the code generation */
codegen_finish(jd);
-#if !defined(NDEBUG)
- /* disassemble native stub */
+#if defined(ENABLE_STATISTICS)
+ /* must be done after codegen_finish() */
- if (opt_shownativestub) {
-#if defined(ENABLE_DISASSEMBLER)
- codegen_disassemble_nativestub(m,
- (u1 *) (ptrint) code->entrypoint,
- (u1 *) (ptrint) code->entrypoint + (code->mcodelength - jd->cd->dseglen));
+ if (opt_stat)
+ size_stub_native += code->mcodelength;
#endif
- /* show data segment */
+#if !defined(NDEBUG) && defined(ENABLE_DISASSEMBLER)
+ /* disassemble native stub */
- if (opt_showddatasegment)
- dseg_display(jd);
+ if (opt_DisassembleStubs) {
+# if defined(ENABLE_DEBUG_FILTER)
+ if (m->filtermatches & SHOW_FILTER_FLAG_SHOW_METHOD)
+# endif
+ {
+ codegen_disassemble_stub(m,
+ (u1 *) (ptrint) code->entrypoint,
+ (u1 *) (ptrint) code->entrypoint + (code->mcodelength - jd->cd->dseglen));
+
+ /* show data segment */
+
+ if (opt_showddatasegment)
+ dseg_display(jd);
+ }
}
-#endif /* !defined(NDEBUG) */
+#endif /* !defined(NDEBUG) && defined(ENABLE_DISASSEMBLER) */
/* release memory */
/* codegen_disassemble_nativestub **********************************************
- Disassembles the generated native stub.
+ Disassembles the generated builtin or native stub.
*******************************************************************************/
#if defined(ENABLE_DISASSEMBLER)
-void codegen_disassemble_nativestub(methodinfo *m, u1 *start, u1 *end)
+void codegen_disassemble_stub(methodinfo *m, u1 *start, u1 *end)
{
- printf("Native stub: ");
- utf_fprint_printable_ascii_classname(stdout, m->class->name);
+ printf("Stub code: ");
+ if (m->class != NULL)
+ utf_fprint_printable_ascii_classname(stdout, m->class->name);
+ else
+ printf("NULL");
printf(".");
utf_fprint_printable_ascii(stdout, m->name);
utf_fprint_printable_ascii(stdout, m->descriptor);
- printf("\n\nLength: %d\n\n", (s4) (end - start));
+ printf("\nLength: %d\n\n", (s4) (end - start));
DISASSEMBLE(start, end);
}
The layout of the native stub stackframe should look like this:
- +---------------------------+ <- SP (of parent Java function)
+ +---------------------------+ <- java SP (of parent Java function)
| return address |
- +---------------------------+
+ +---------------------------+ <- data SP
| |
| stackframe info structure |
| |
| |
+---------------------------+
| |
+ | saved registers (if any) |
+ | |
+ +---------------------------+
+ | |
| arguments (if any) |
| |
- +---------------------------+ <- SP (native stub)
+ +---------------------------+ <- current SP (native stub)
*******************************************************************************/
-void codegen_start_native_call(u1 *datasp, u1 *pv, u1 *sp, u1 *ra)
+java_handle_t *codegen_start_native_call(u1 *sp, u1 *pv)
{
- stackframeinfo *sfi;
- localref_table *lrt;
+ stackframeinfo_t *sfi;
+ localref_table *lrt;
+ methodinfo *m;
+ int32_t framesize;
+
+ uint8_t *datasp;
+ uint8_t *javasp;
+ uint64_t *arg_regs;
+ uint64_t *arg_stack;
+
+ STATISTICS(count_calls_java_to_native++);
+
+ /* Get the methodinfo. */
+
+ m = code_get_methodinfo_for_pv(pv);
+
+ assert(m);
+
+ framesize = *((int32_t *) (pv + FrameSize));
+
+ assert(framesize >= sizeof(stackframeinfo_t) + sizeof(localref_table));
+
+ /* calculate needed values */
+
+#if defined(__ALPHA__) || defined(__ARM__)
+ datasp = sp + framesize - SIZEOF_VOID_P;
+ javasp = sp + framesize;
+ arg_regs = (uint64_t *) sp;
+ arg_stack = (uint64_t *) javasp;
+#elif defined(__MIPS__)
+ /* MIPS always uses 8 bytes to store the RA */
+ datasp = sp + framesize - 8;
+ javasp = sp + framesize;
+#elif defined(__S390__)
+ datasp = sp + framesize - 8;
+ javasp = sp + framesize;
+ arg_regs = (uint64_t *) (sp + 96);
+ arg_stack = (uint64_t *) javasp;
+#elif defined(__I386__) || defined(__M68K__) || defined(__X86_64__)
+ datasp = sp + framesize;
+ javasp = sp + framesize + SIZEOF_VOID_P;
+ arg_regs = (uint64_t *) sp;
+ arg_stack = (uint64_t *) javasp;
+#elif defined(__POWERPC__)
+ datasp = sp + framesize;
+ javasp = sp + framesize;
+ arg_regs = (uint64_t *) (sp + LA_SIZE + 4 * SIZEOF_VOID_P);
+ arg_stack = (uint64_t *) javasp;
+#elif defined(__POWERPC64__)
+ datasp = sp + framesize;
+ javasp = sp + framesize;
+ arg_regs = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 4 * SIZEOF_VOID_P);
+ arg_stack = (uint64_t *) javasp;
+#else
+ /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
+ /* XXX maybe we need to pass the RA as argument there */
+ vm_abort("codegen_start_native_call: unsupported architecture");
+#endif
/* get data structures from stack */
- sfi = (stackframeinfo *) (datasp - sizeof(stackframeinfo));
- lrt = (localref_table *) (datasp - sizeof(stackframeinfo) -
- sizeof(localref_table));
-
- /* add a stackframeinfo to the chain */
-
- stacktrace_create_native_stackframeinfo(sfi, pv, sp, ra);
+ sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
+ lrt = (localref_table *) (datasp - sizeof(stackframeinfo_t) -
+ sizeof(localref_table));
-#if defined(ENABLE_JAVASE)
+#if defined(ENABLE_JNI)
/* add current JNI local references table to this thread */
- lrt->capacity = LOCALREFTABLE_CAPACITY;
- lrt->used = 0;
- lrt->localframes = 1;
- lrt->prev = LOCALREFTABLE;
+ localref_table_add(lrt);
+#endif
- /* clear the references array (memset is faster the a for-loop) */
+#if !defined(NDEBUG)
+# if defined(__ALPHA__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__X86_64__) || defined(__S390__)
+ /* print the call-trace if necesarry */
+ /* BEFORE: filling the local reference table */
- MSET(lrt->refs, 0, java_objectheader*, LOCALREFTABLE_CAPACITY);
+ if (opt_TraceJavaCalls)
+ trace_java_call_enter(m, arg_regs, arg_stack);
+# endif
+#endif
+
+#if defined(ENABLE_HANDLES)
+ /* place all references into the local reference table */
+ /* BEFORE: creating stackframeinfo */
- LOCALREFTABLE = lrt;
+ localref_native_enter(m, arg_regs, arg_stack);
#endif
+
+ /* Add a stackframeinfo for this native method. We don't have RA
+ and XPC here. These are determined in
+ stacktrace_stackframeinfo_add. */
+
+ stacktrace_stackframeinfo_add(sfi, pv, sp, NULL, NULL);
+
+ /* Return a wrapped classinfo for static methods. */
+
+ if (m->flags & ACC_STATIC)
+ return LLNI_classinfo_wrap(m->class);
+ else
+ return NULL;
}
*******************************************************************************/
-java_objectheader *codegen_finish_native_call(u1 *datasp)
+java_object_t *codegen_finish_native_call(u1 *sp, u1 *pv)
{
- stackframeinfo *sfi;
- stackframeinfo **psfi;
-#if defined(ENABLE_JAVASE)
- localref_table *lrt;
- localref_table *plrt;
- s4 localframes;
+ stackframeinfo_t *sfi;
+ java_handle_t *e;
+ java_object_t *o;
+ codeinfo *code;
+ methodinfo *m;
+ int32_t framesize;
+
+ uint8_t *datasp;
+ uint64_t *ret_regs;
+
+ /* get information from method header */
+
+ code = *((codeinfo **) (pv + CodeinfoPointer));
+ framesize = *((int32_t *) (pv + FrameSize));
+ assert(code);
+
+ /* get the methodinfo */
+
+ m = code->m;
+ assert(m);
+
+ /* calculate needed values */
+
+#if defined(__ALPHA__) || defined(__ARM__)
+ datasp = sp + framesize - SIZEOF_VOID_P;
+ ret_regs = (uint64_t *) sp;
+#elif defined(__MIPS__)
+ /* MIPS always uses 8 bytes to store the RA */
+ datasp = sp + framesize - 8;
+#elif defined(__S390__)
+ datasp = sp + framesize - 8;
+ ret_regs = (uint64_t *) (sp + 96);
+#elif defined(__I386__)
+ datasp = sp + framesize;
+ ret_regs = (uint64_t *) (sp + 2 * SIZEOF_VOID_P);
+#elif defined(__M68K__) || defined(__X86_64__)
+ datasp = sp + framesize;
+ ret_regs = (uint64_t *) sp;
+#elif defined(__POWERPC__)
+ datasp = sp + framesize;
+ ret_regs = (uint64_t *) (sp + LA_SIZE + 2 * SIZEOF_VOID_P);
+#elif defined(__POWERPC64__)
+ datasp = sp + framesize;
+ ret_regs = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 2 * SIZEOF_VOID_P);
+#else
+ vm_abort("codegen_finish_native_call: unsupported architecture");
#endif
- java_objectheader *e;
/* get data structures from stack */
- sfi = (stackframeinfo *) (datasp - sizeof(stackframeinfo));
-
- /* remove current stackframeinfo from chain */
-
- psfi = &STACKFRAMEINFO;
-
- *psfi = sfi->prev;
+ sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
-#if defined(ENABLE_JAVASE)
- /* release JNI local references tables for this thread */
+ /* Remove current stackframeinfo from chain. */
- lrt = LOCALREFTABLE;
+ stacktrace_stackframeinfo_remove(sfi);
- /* release all current local frames */
+#if defined(ENABLE_HANDLES)
+ /* unwrap the return value from the local reference table */
+ /* AFTER: removing the stackframeinfo */
+ /* BEFORE: releasing the local reference table */
- for (localframes = lrt->localframes; localframes >= 1; localframes--) {
- /* get previous frame */
-
- plrt = lrt->prev;
-
- /* Clear all reference entries (only for tables allocated on
- the Java heap). */
-
- if (localframes > 1)
- MSET(&lrt->refs[0], 0, java_objectheader*, lrt->capacity);
-
- lrt->prev = NULL;
+ localref_native_exit(m, ret_regs);
+#endif
- /* set new local references table */
+ /* get and unwrap the exception */
+ /* AFTER: removing the stackframe info */
+ /* BEFORE: releasing the local reference table */
- lrt = plrt;
- }
+ e = exceptions_get_and_clear_exception();
+ o = LLNI_UNWRAP(e);
- /* now store the previous local frames in the thread structure */
+#if defined(ENABLE_JNI)
+ /* release JNI local references table for this thread */
- LOCALREFTABLE = lrt;
+ localref_frame_pop_all();
+ localref_table_remove();
#endif
- /* get the exception and return it */
+#if !defined(NDEBUG)
+# if defined(__ALPHA__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__X86_64__) || defined(__S390__)
+ /* print the call-trace if necesarry */
+ /* AFTER: unwrapping the return value */
- e = exceptions_get_and_clear_exception();
+ if (opt_TraceJavaCalls)
+ trace_java_call_exit(m, ret_regs);
+# endif
+#endif
- return e;
+ return o;
}
spilled) this function returns tempregnum. If not already done,
regoff and flags are set in the stack location.
- On ARM we have to check if a long/double variable is splitted
- across reg/stack (HIGH_REG == REG_SPLIT). We return the actual
- register of v for LOW_REG and the tempregnum for HIGH_REG in such
- cases. (michi 2005/07/24)
-
*******************************************************************************/
s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
return tempregnum;
#endif
- if (!(v->flags & INMEMORY)) {
-#if defined(__ARM__) && defined(__ARMEL__)
- if (IS_2_WORD_TYPE(v->type) && (GET_HIGH_REG(v->vv.regoff) == REG_SPLIT))
- return PACK_REGS(GET_LOW_REG(v->vv.regoff),
- GET_HIGH_REG(tempregnum));
-#endif
-#if defined(__ARM__) && defined(__ARMEB__)
- if (IS_2_WORD_TYPE(v->type) && (GET_LOW_REG(v->vv.regoff) == REG_SPLIT))
- return PACK_REGS(GET_LOW_REG(tempregnum),
- GET_HIGH_REG(v->vv.regoff));
-#endif
+ if (!(v->flags & INMEMORY))
return v->vv.regoff;
- }
-
-#if defined(ENABLE_STATISTICS)
- if (opt_stat)
- count_spills_read++;
-#endif
return tempregnum;
}
+
/* codegen_reg_of_dst **********************************************************
This function determines a register, to which the result of an
spilled) this function returns tempregnum. If not already done,
regoff and flags are set in the stack location.
- On ARM we have to check if a long/double variable is splitted
- across reg/stack (HIGH_REG == REG_SPLIT). We return the actual
- register of dst.var for LOW_REG and the tempregnum for HIGH_REG in such
- cases. (michi 2005/07/24)
-
*******************************************************************************/
s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
}
-#if defined(ENABLE_THREADS)
-void codegen_threadcritrestart(codegendata *cd, int offset)
-{
- cd->threadcritcurrent.mcoderestart = offset;
-}
+/* codegen_emit_phi_moves ****************************************************
+ Emits phi moves at the end of the basicblock.
-void codegen_threadcritstart(codegendata *cd, int offset)
+*******************************************************************************/
+
+#if defined(ENABLE_SSA)
+void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
{
- cd->threadcritcurrent.mcodebegin = offset;
-}
+ int lt_d,lt_s,i;
+ lsradata *ls;
+ codegendata *cd;
+ varinfo *s, *d;
+ instruction tmp_i;
+ cd = jd->cd;
+ ls = jd->ls;
-void codegen_threadcritstop(codegendata *cd, int offset)
-{
- cd->threadcritcurrent.next = cd->threadcrit;
- cd->threadcritcurrent.mcodeend = offset;
- cd->threadcrit = DNEW(codegen_critical_section_t);
- *(cd->threadcrit) = cd->threadcritcurrent;
- cd->threadcritcount++;
-}
+ MCODECHECK(512);
+
+ /* Moves from phi functions with highest indices have to be */
+ /* inserted first, since this is the order as is used for */
+ /* conflict resolution */
+
+ for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
+ lt_d = ls->phi_moves[bptr->nr][i][0];
+ lt_s = ls->phi_moves[bptr->nr][i][1];
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
#endif
+ if (lt_s == UNUSED) {
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf(" ... not processed \n");
+#endif
+ continue;
+ }
+
+ d = VAR(ls->lifetime[lt_d].v_index);
+ s = VAR(ls->lifetime[lt_s].v_index);
+
+
+ if (d->type == -1) {
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf("...returning - phi lifetimes where joined\n");
+#endif
+ return;
+ }
+
+ if (s->type == -1) {
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose)
+ printf("...returning - phi lifetimes where joined\n");
+#endif
+ return;
+ }
+
+ tmp_i.opc = 0;
+ tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
+ tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
+ emit_copy(jd, &tmp_i);
+
+#if defined(SSA_DEBUG_VERBOSE)
+ if (compileverbose) {
+ if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
+ /* mem -> mem */
+ printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
+ }
+ else if (IS_INMEMORY(s->flags)) {
+ /* mem -> reg */
+ printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
+ }
+ else if (IS_INMEMORY(d->flags)) {
+ /* reg -> mem */
+ printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
+ }
+ else {
+ /* reg -> reg */
+ printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
+ }
+ printf("\n");
+ }
+#endif /* defined(SSA_DEBUG_VERBOSE) */
+ }
+}
+#endif /* defined(ENABLE_SSA) */
+
/*