/* src/vm/jit/jit.c - calls the code generation functions
- Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+ Copyright (C) 1996-2005, 2006, 2007 R. Grafl, A. Krall, C. Kruegel,
C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
J. Wenninger, Institut f. Computersprachen - TU Wien
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
- Contact: cacao@cacaojvm.org
-
- Authors: Andreas Krall
- Reinhard Grafl
-
- Changes: Edwin Steiner
- Christian Thalinger
- Christian Ullrich
-
- $Id: jit.c 5780 2006-10-15 12:20:15Z edwin $
-
*/
#include "config.h"
-#include "vm/types.h"
#include <assert.h>
+#include "vm/types.h"
+
#include "mm/memory.h"
+
#include "native/native.h"
+
#include "toolbox/logging.h"
-#if defined(ENABLE_THREADS)
-# include "threads/native/lock.h"
-#else
-# include "threads/none/lock.h"
-#endif
+#include "threads/lock-common.h"
+#include "threads/threads-common.h"
-#include "vm/class.h"
#include "vm/global.h"
#include "vm/initialize.h"
-#include "vm/loader.h"
-#include "vm/method.h"
-#include "vm/options.h"
-#include "vm/statistics.h"
+
#include "vm/jit/asmpart.h"
# include "vm/jit/cfg.h"
#include "vm/jit/disass.h"
#include "vm/jit/dseg.h"
#include "vm/jit/jit.h"
+#include "vm/jit/md.h"
#include "vm/jit/parse.h"
#include "vm/jit/reg.h"
-# include "vm/jit/reorder.h"
-
#include "vm/jit/show.h"
#include "vm/jit/stack.h"
# include "vm/jit/optimizing/ssa.h"
#endif
-
-#if defined(ENABLE_IFCONV)
-# include "vm/jit/ifconv/ifconv.h"
+#if defined(ENABLE_INLINING)
+# include "vm/jit/inline/inline.h"
#endif
#include "vm/jit/loop/analyze.h"
#include "vm/jit/loop/graph.h"
#include "vm/jit/loop/loop.h"
-#include "vm/jit/verify/typecheck.h"
-#include "vm/rt-timing.h"
-#if defined(ENABLE_THREADS)
-# include "threads/native/threads.h"
+#if defined(ENABLE_IFCONV)
+# include "vm/jit/optimizing/ifconv.h"
#endif
+#include "vm/jit/optimizing/reorder.h"
+
+#include "vm/jit/verify/typecheck.h"
+
+#include "vmcore/class.h"
+#include "vmcore/loader.h"
+#include "vmcore/method.h"
+#include "vmcore/options.h"
+#include "vmcore/rt-timing.h"
+#include "vmcore/statistics.h"
+
/* debug macros ***************************************************************/
0, /* JAVA_POP 87 */
0, /* JAVA_POP2 88 */
1, /* JAVA_DUP 89 */
- 3, /* JAVA_DUP_X1 90 */
- 4, /* JAVA_DUP_X2 91 */
+ 1+3, /* JAVA_DUP_X1 90 */
+ 2+4, /* JAVA_DUP_X2 91 */
2, /* JAVA_DUP2 92 */
- 3, /* JAVA_DUP2_X1 93 */
- 4, /* JAVA_DUP2_X2 94 */
- 2, /* JAVA_SWAP 95 */
+ 2+5, /* JAVA_DUP2_X1 93 */
+ 3+6, /* JAVA_DUP2_X2 94 */
+ 1+2, /* JAVA_SWAP 95 */
1, /* JAVA_IADD 96 */
1, /* JAVA_LADD 97 */
1, /* JAVA_FADD 98 */
void jit_init(void)
{
+#if defined(ENABLE_JIT)
/* initialize stack analysis subsystem */
(void) stack_init();
+#endif
/* initialize show subsystem */
/* initialize codegen subsystem */
codegen_init();
+
+ /* initialize code subsystem */
+
+ (void) code_init();
}
*******************************************************************************/
-static jitdata *jit_jitdata_new(methodinfo *m)
+jitdata *jit_jitdata_new(methodinfo *m)
{
jitdata *jd;
/* initialize variables */
- jd->flags = 0;
- jd->isleafmethod = true;
+ jd->flags = 0;
+ jd->exceptiontable = NULL;
+ jd->exceptiontablelength = 0;
+ jd->returncount = 0;
+ jd->branchtoentry = false;
+ jd->branchtoend = false;
+ jd->returncount = 0;
+ jd->returnblock = NULL;
+ jd->maxlocals = m->maxlocals;
+
+#if defined(ENABLE_THREADS)
+ if (checksync && (m->flags & ACC_SYNCHRONIZED))
+ jd->isleafmethod = false;
+ else
+#endif
+ jd->isleafmethod = true;
return jd;
}
jd->flags |= JITDATA_FLAG_VERIFY;
#endif
+#if defined(ENABLE_PROFILING)
if (opt_prof)
jd->flags |= JITDATA_FLAG_INSTRUMENT;
+#endif
#if defined(ENABLE_IFCONV)
if (opt_ifconv)
jd->flags |= JITDATA_FLAG_IFCONV;
#endif
+#if defined(ENABLE_INLINING) && defined(ENABLE_INLINING_DEBUG)
+ if (opt_inlining && opt_inline_debug_all)
+ jd->flags |= JITDATA_FLAG_INLINE;
+#endif
+
if (opt_showintermediate)
jd->flags |= JITDATA_FLAG_SHOWINTERMEDIATE;
if (opt_verbosecall)
jd->flags |= JITDATA_FLAG_VERBOSECALL;
+#if defined(ENABLE_REPLACEMENT) && defined(ENABLE_INLINING)
+ if (opt_inlining)
+ jd->flags |= JITDATA_FLAG_COUNTDOWN;
+#endif
+
#if defined(ENABLE_JIT)
# if defined(ENABLE_INTRP)
if (!opt_intrp)
code_codeinfo_free(jd->code);
+#if defined(ENABLE_PROFILING)
/* Release memory for basic block profiling information. */
if (JITDATA_HAS_FLAG_INSTRUMENT(jd))
if (jd->code->bbfrequency != NULL)
MFREE(jd->code->bbfrequency, u4, jd->code->basicblockcount);
+#endif
}
else {
DEBUG_JIT_COMPILEVERBOSE("Running: ");
/* check for max. optimization level */
- optlevel = m->code->optlevel;
+ optlevel = (m->code) ? m->code->optlevel : 0;
+#if 0
if (optlevel == 1) {
/* log_message_method("not recompiling: ", m); */
return NULL;
}
+#endif
- log_message_method("Recompiling start: ", m);
+ DEBUG_JIT_COMPILEVERBOSE("Recompiling start: ");
STATISTICS(count_jit_calls++);
/* get the optimization flags for the current JIT run */
- jd->flags |= JITDATA_FLAG_REORDER;
- jd->flags |= JITDATA_FLAG_SHOWINTERMEDIATE;
- jd->flags |= JITDATA_FLAG_SHOWDISASSEMBLE;
-/* jd->flags |= JITDATA_FLAG_VERBOSECALL; */
+#if defined(ENABLE_VERIFIER)
+ jd->flags |= JITDATA_FLAG_VERIFY;
+#endif
+
+ /* jd->flags |= JITDATA_FLAG_REORDER; */
+ if (opt_showintermediate)
+ jd->flags |= JITDATA_FLAG_SHOWINTERMEDIATE;
+ if (opt_showdisassemble)
+ jd->flags |= JITDATA_FLAG_SHOWDISASSEMBLE;
+ if (opt_verbosecall)
+ jd->flags |= JITDATA_FLAG_VERBOSECALL;
+
+#if defined(ENABLE_INLINING)
+ if (opt_inlining)
+ jd->flags |= JITDATA_FLAG_INLINE;
+#endif
#if defined(ENABLE_JIT)
# if defined(ENABLE_INTRP)
compilingtime_stop();
#endif
- log_message_method("Recompiling done: ", m);
+ DEBUG_JIT_COMPILEVERBOSE("Recompiling done: ");
/* return pointer to the methods entry point */
#if defined(ENABLE_RT_TIMING)
struct timespec time_start,time_checks,time_parse,time_stack,
time_typecheck,time_loop,time_ifconv,time_alloc,
- time_rplpoints,time_codegen;
+ time_codegen;
#endif
RT_TIMING_GET_TIME(time_start);
DEBUG_JIT_COMPILEVERBOSE("Compiling: ");
+#if defined(ENABLE_DEBUG_FILTER)
+ show_filters_apply(jd->m);
+#endif
+
/* handle native methods and create a native stub */
if (m->flags & ACC_NATIVE) {
f = NULL;
#endif
- code = codegen_createnativestub(f, m);
+ code = codegen_generate_stub_native(m, f);
assert(!m->code); /* native methods are never recompiled */
m->code = code;
#if defined(ENABLE_STATISTICS)
if (opt_stat) {
- count_tryblocks += m->exceptiontablelength;
count_javacodesize += m->jcodelength + 18;
- count_javaexcsize += m->exceptiontablelength * SIZEOF_VOID_P;
+ count_tryblocks += jd->exceptiontablelength;
+ count_javaexcsize += jd->exceptiontablelength * SIZEOF_VOID_P;
}
#endif
return NULL;
#ifdef ENABLE_VERIFIER
- if (jd->flags & JITDATA_FLAG_VERIFY) {
+ if (JITDATA_HAS_FLAG_VERIFY(jd)) {
DEBUG_JIT_COMPILEVERBOSE("Typechecking: ");
/* call typecheck pass */
depthFirst(jd);
analyseGraph(jd);
optimize_loops(jd);
+ jit_renumber_basicblocks(jd);
}
#endif
RT_TIMING_GET_TIME(time_loop);
#if defined(ENABLE_IFCONV)
- if (JITDATA_HAS_FLAG_IFCONV(jd))
+ if (JITDATA_HAS_FLAG_IFCONV(jd)) {
if (!ifconv_static(jd))
return NULL;
+ jit_renumber_basicblocks(jd);
+ }
#endif
RT_TIMING_GET_TIME(time_ifconv);
+ /* inlining */
+
+#if defined(ENABLE_INLINING)
+ if (JITDATA_HAS_FLAG_INLINE(jd)) {
+ if (!inline_inline(jd))
+ return NULL;
+ }
+#endif
+
+#if defined(ENABLE_PROFILING)
/* Basic block reordering. I think this should be done after
if-conversion, as we could lose the ability to do the
if-conversion. */
- if (JITDATA_HAS_FLAG_REORDER(jd))
+ if (JITDATA_HAS_FLAG_REORDER(jd)) {
if (!reorder(jd))
return NULL;
+ jit_renumber_basicblocks(jd);
+ }
+#endif
DEBUG_JIT_COMPILEVERBOSE("Allocating registers: ");
# endif /* defined(ENABLE_LSRA) && !defined(ENABLE_SSA) */
#if defined(ENABLE_SSA)
/* allocate registers */
- if ((opt_lsra) && (cd->exceptiontablelength == 0)) {
+ if ((opt_lsra) && (jd->exceptiontablelength == 0)) {
jd->ls = DNEW(lsradata);
lsra(jd);
} else
# endif /* defined(ENABLE_SSA) */
{
- STATISTICS(count_locals_conflicts += (cd->maxlocals - 1) * (cd->maxlocals));
+ STATISTICS(count_locals_conflicts += (jd->maxlocals - 1) * (jd->maxlocals));
regalloc(jd);
}
#endif /* defined(ENABLE_JIT) */
RT_TIMING_GET_TIME(time_alloc);
+#if defined(ENABLE_PROFILING)
/* Allocate memory for basic block profiling information. This
_must_ be done after loop optimization and register allocation,
since they can change the basic block count. */
if (JITDATA_HAS_FLAG_INSTRUMENT(jd))
code->bbfrequency = MNEW(u4, jd->basicblockcount);
+#endif
DEBUG_JIT_COMPILEVERBOSE("Generating code: ");
- /* create the replacement points */
-#if 0
- if (!replace_create_replacement_points(jd))
- return NULL;
-#endif
- RT_TIMING_GET_TIME(time_rplpoints);
-
/* now generate the machine code */
#if defined(ENABLE_JIT)
} else
# endif
{
- if (!codegen(jd)) {
+ if (!codegen_generate(jd)) {
DEBUG_JIT_COMPILEVERBOSE("Exception while generating code: ");
return NULL;
DEBUG_JIT_COMPILEVERBOSE("Generating code done: ");
#if !defined(NDEBUG)
- /* intermediate and assembly code listings */
+#if defined(ENABLE_DEBUG_FILTER)
+ if (jd->m->filtermatches & SHOW_FILTER_FLAG_SHOW_METHOD)
+#endif
+ {
+ /* intermediate and assembly code listings */
- if (JITDATA_HAS_FLAG_SHOWINTERMEDIATE(jd)) {
- show_method(jd, SHOW_CODE);
- }
- else if (JITDATA_HAS_FLAG_SHOWDISASSEMBLE(jd)) {
+ if (JITDATA_HAS_FLAG_SHOWINTERMEDIATE(jd)) {
+ show_method(jd, SHOW_CODE);
+ }
+ else if (JITDATA_HAS_FLAG_SHOWDISASSEMBLE(jd)) {
# if defined(ENABLE_DISASSEMBLER)
- DISASSEMBLE(code->entrypoint,
- code->entrypoint + (code->mcodelength - cd->dseglen));
+ DISASSEMBLE(code->entrypoint,
+ code->entrypoint + (code->mcodelength - cd->dseglen));
# endif
- }
+ }
- if (opt_showddatasegment)
- dseg_display(jd);
+ if (opt_showddatasegment)
+ dseg_display(jd);
+ }
#endif
DEBUG_JIT_COMPILEVERBOSE("Compiling done: ");
RT_TIMING_TIME_DIFF(time_stack,time_typecheck,RT_TIMING_JIT_TYPECHECK);
RT_TIMING_TIME_DIFF(time_typecheck,time_loop,RT_TIMING_JIT_LOOP);
RT_TIMING_TIME_DIFF(time_loop,time_alloc,RT_TIMING_JIT_ALLOC);
- RT_TIMING_TIME_DIFF(time_alloc,time_rplpoints,RT_TIMING_JIT_RPLPOINTS);
- RT_TIMING_TIME_DIFF(time_rplpoints,time_codegen,RT_TIMING_JIT_CODEGEN);
+ RT_TIMING_TIME_DIFF(time_alloc,time_codegen,RT_TIMING_JIT_CODEGEN);
RT_TIMING_TIME_DIFF(time_start,time_codegen,RT_TIMING_JIT_TOTAL);
/* return pointer to the methods entry point */
}
+/* jit_invalidate_code *********************************************************
+
+ Mark the compiled code of the given method as invalid and take care that
+ it is replaced if necessary.
+
+ XXX Not fully implemented, yet.
+
+*******************************************************************************/
+
+void jit_invalidate_code(methodinfo *m)
+{
+ codeinfo *code;
+
+ code = m->code;
+ if (code == NULL || CODE_IS_INVALID(code))
+ return;
+
+ CODE_SETFLAG_INVALID(code);
+
+ /* activate mappable replacement points */
+
+#if defined(ENABLE_REPLACEMENT)
+ replace_activate_replacement_points(code, true);
+#else
+ vm_abort("invalidating code only works with ENABLE_REPLACEMENT");
+#endif
+}
+
+
+/* jit_request_optimization ****************************************************
+
+ Request optimization of the given method. If the code of the method is
+ unoptimized, it will be invalidated, so the next jit_get_current_code(m)
+ triggers an optimized recompilation.
+ If the method is already optimized, this function does nothing.
+
+ IN:
+ m................the method
+
+*******************************************************************************/
+
+void jit_request_optimization(methodinfo *m)
+{
+ codeinfo *code;
+
+ code = m->code;
+
+ if (code && code->optlevel == 0)
+ jit_invalidate_code(m);
+}
+
+
+/* jit_get_current_code ********************************************************
+
+ Get the currently valid code for the given method. If there is no valid
+ code, (re)compile the method.
+
+ IN:
+ m................the method
+
+ RETURN VALUE:
+ the codeinfo* for the current code, or
+ NULL if an exception has been thrown during recompilation.
+
+*******************************************************************************/
+
+codeinfo *jit_get_current_code(methodinfo *m)
+{
+ assert(m);
+
+ /* if we have valid code, return it */
+
+ if (m->code && CODE_IS_VALID(m->code))
+ return m->code;
+
+ /* otherwise: recompile */
+
+ if (!jit_recompile(m))
+ return NULL;
+
+ assert(m->code);
+
+ return m->code;
+}
+
+
/* jit_asm_compile *************************************************************
This method is called from asm_vm_call_method and does:
*******************************************************************************/
+#if defined(ENABLE_JIT)
u1 *jit_asm_compile(methodinfo *m, u1 *mptr, u1 *sp, u1 *ra)
{
stackframeinfo sfi;
u1 *pa;
ptrint *p;
- /* create the stackframeinfo (XPC is equal to RA) */
+ /* create the stackframeinfo (subtract 1 from RA as it points to the */
+ /* instruction after the call) */
- stacktrace_create_extern_stackframeinfo(&sfi, NULL, sp, ra, ra);
+ stacktrace_create_extern_stackframeinfo(&sfi, NULL, sp, ra, ra-1);
/* actually compile the method */
return entrypoint;
}
+#endif /* defined(ENABLE_JIT) */
/* jit_complement_condition ****************************************************
/* jit_check_basicblock_numbers ************************************************
- Assert that the ->nr of all blocks increases when traversing ->next.
+ Assert that the ->nr of the first block is zero and increases by 1 each
+ time ->next is traversed.
This function should be called before any analysis that relies on
- the basicblock numbers to increase strictly monotonically in the ->next
- sequence.
+ the basicblock numbers.
IN:
jitdata..........the current jitdata
s4 nr;
basicblock *bptr;
- nr = -1;
+ nr = 0;
for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
- assert(bptr->nr > nr);
- nr = bptr->nr;
+ assert(bptr->nr == nr);
+ nr++;
}
+
+ /* we have one block more than jd->basicblockcount (the end marker) */
+
+ assert(nr == jd->basicblockcount + 1);
}
#endif /* !defined(NDEBUG) */