1 /* src/vm/jit/codegen-common.cpp - architecture independent code generator stuff
3 Copyright (C) 1996-2011
4 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 Copyright (C) 2009 Theobroma Systems Ltd.
7 This file is part of CACAO.
9 This program is free software; you can redistribute it and/or
10 modify it under the terms of the GNU General Public License as
11 published by the Free Software Foundation; either version 2, or (at
12 your option) any later version.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 All functions assume the following code area / data area layout:
28 | code area | code area grows to higher addresses
30 +-----------+ <-- start of procedure
32 | data area | data area grows to lower addresses
36 The functions first write into a temporary code/data area allocated by
37 "codegen_init". "codegen_finish" copies the code and data area into permanent
38 memory. All functions writing values into the data area return the offset
39 relative the begin of the code area (start of procedure).
55 #include "mm/memory.hpp"
57 #include "toolbox/avl.h"
58 #include "toolbox/list.hpp"
59 #include "toolbox/logging.hpp"
61 #include "native/llni.h"
62 #include "native/localref.hpp"
63 #include "native/native.hpp"
65 #include "threads/thread.hpp"
67 #include "vm/jit/builtin.hpp"
68 #include "vm/exceptions.hpp"
69 #include "vm/method.hpp"
70 #include "vm/options.h"
71 #include "vm/statistics.h"
72 #include "vm/string.hpp"
74 #include "vm/jit/abi.h"
75 #include "vm/jit/asmpart.h"
76 #include "vm/jit/code.hpp"
77 #include "vm/jit/codegen-common.hpp"
79 #if defined(ENABLE_DISASSEMBLER)
80 # include "vm/jit/disass.h"
83 #include "vm/jit/dseg.h"
84 #include "vm/jit/emit-common.hpp"
85 #include "vm/jit/jit.hpp"
86 #include "vm/jit/linenumbertable.hpp"
87 #include "vm/jit/methodheader.h"
88 #include "vm/jit/methodtree.h"
89 #include "vm/jit/patcher-common.hpp"
90 #include "vm/jit/replace.hpp"
91 #include "vm/jit/show.hpp"
92 #include "vm/jit/stacktrace.hpp"
93 #include "vm/jit/trace.hpp"
95 #include "vm/jit/optimizing/profile.hpp"
97 #if defined(ENABLE_SSA)
98 # include "vm/jit/optimizing/lsra.h"
99 # include "vm/jit/optimizing/ssa.h"
100 #elif defined(ENABLE_LSRA)
101 # include "vm/jit/allocator/lsra.h"
104 #if defined(ENABLE_INTRP)
105 #include "vm/jit/intrp/intrp.h"
108 #if defined(ENABLE_VMLOG)
109 #include <vmlog_cacao.h>
113 /* codegen_init ****************************************************************
117 *******************************************************************************/
119 void codegen_init(void)
124 /* codegen_setup ***************************************************************
126 Allocates and initialises code area, data area and references.
128 *******************************************************************************/
130 void codegen_setup(jitdata *jd)
135 /* get required compiler data */
140 /* initialize members */
142 // Set flags as requested.
143 if (opt_AlwaysEmitLongBranches) {
144 cd->flags = CODEGENDATA_FLAG_LONGBRANCHES;
150 cd->mcodebase = (u1*) DumpMemory::allocate(MCODEINITSIZE);
151 cd->mcodeend = cd->mcodebase + MCODEINITSIZE;
152 cd->mcodesize = MCODEINITSIZE;
154 /* initialize mcode variables */
156 cd->mcodeptr = cd->mcodebase;
157 cd->lastmcodeptr = cd->mcodebase;
159 #if defined(ENABLE_INTRP)
160 /* native dynamic superinstructions variables */
163 cd->ncodebase = (u1*) DumpMemory::allocate(NCODEINITSIZE);
164 cd->ncodesize = NCODEINITSIZE;
166 /* initialize ncode variables */
168 cd->ncodeptr = cd->ncodebase;
170 cd->lastinstwithoutdispatch = ~0; /* no inst without dispatch */
171 cd->superstarts = NULL;
178 cd->jumpreferences = NULL;
180 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
181 cd->datareferences = NULL;
184 cd->brancheslabel = new DumpList<branch_label_ref_t*>();
185 cd->linenumbers = new DumpList<Linenumber>();
189 /* codegen_reset ***************************************************************
191 Resets the codegen data structure so we can recompile the method.
193 *******************************************************************************/
195 static void codegen_reset(jitdata *jd)
201 /* get required compiler data */
206 /* reset error flag */
208 cd->flags &= ~CODEGENDATA_FLAG_ERROR;
210 /* reset some members, we reuse the code memory already allocated
211 as this should have almost the correct size */
213 cd->mcodeptr = cd->mcodebase;
214 cd->lastmcodeptr = cd->mcodebase;
219 cd->jumpreferences = NULL;
221 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
222 cd->datareferences = NULL;
225 cd->brancheslabel = new DumpList<branch_label_ref_t*>();
226 cd->linenumbers = new DumpList<Linenumber>();
228 /* We need to clear the mpc and the branch references from all
229 basic blocks as they will definitely change. */
231 for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
233 bptr->branchrefs = NULL;
236 /* We need to clear all the patcher references from the codeinfo
237 since they all will be regenerated */
239 patcher_list_reset(code);
241 #if defined(ENABLE_REPLACEMENT)
242 code->rplpoints = NULL;
243 code->rplpointcount = 0;
244 code->regalloc = NULL;
245 code->regalloccount = 0;
246 code->globalcount = 0;
251 /* codegen_generate ************************************************************
253 Generates the code for the currently compiled method.
255 *******************************************************************************/
257 bool codegen_generate(jitdata *jd)
261 /* get required compiler data */
265 /* call the machine-dependent code generation function */
267 if (!codegen_emit(jd))
270 /* check for an error */
272 if (CODEGENDATA_HAS_FLAG_ERROR(cd)) {
273 /* check for long-branches flag, if it is set we recompile the
278 log_message_method("Re-generating code: ", jd->m);
281 /* XXX maybe we should tag long-branches-methods for recompilation */
283 if (CODEGENDATA_HAS_FLAG_LONGBRANCHES(cd)) {
284 /* we have to reset the codegendata structure first */
288 /* and restart the compiler run */
290 if (!codegen_emit(jd))
294 os::abort("codegen_generate: unknown error occurred during codegen_emit: flags=%x\n", cd->flags);
299 log_message_method("Re-generating code done: ", jd->m);
303 /* reallocate the memory and finish the code generation */
307 /* everything's ok */
313 /* codegen_close ***************************************************************
317 *******************************************************************************/
319 void codegen_close(void)
321 /* TODO: release avl tree on i386 and x86_64 */
325 /* codegen_increase ************************************************************
329 *******************************************************************************/
331 void codegen_increase(codegendata *cd)
335 /* save old mcodebase pointer */
337 oldmcodebase = cd->mcodebase;
339 /* reallocate to new, doubled memory */
341 cd->mcodebase = (u1*) DumpMemory::reallocate(cd->mcodebase,
345 cd->mcodeend = cd->mcodebase + cd->mcodesize;
347 /* set new mcodeptr */
349 cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
351 #if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(__M68K__) || defined(ENABLE_INTRP) \
352 || defined(__SPARC_64__)
353 /* adjust the pointer to the last patcher position */
355 if (cd->lastmcodeptr != NULL)
356 cd->lastmcodeptr = cd->mcodebase + (cd->lastmcodeptr - oldmcodebase);
361 /* codegen_ncode_increase ******************************************************
365 *******************************************************************************/
367 #if defined(ENABLE_INTRP)
368 u1 *codegen_ncode_increase(codegendata *cd, u1 *ncodeptr)
372 /* save old ncodebase pointer */
374 oldncodebase = cd->ncodebase;
376 /* reallocate to new, doubled memory */
378 cd->ncodebase = DMREALLOC(cd->ncodebase,
384 /* return the new ncodeptr */
386 return (cd->ncodebase + (ncodeptr - oldncodebase));
391 /* codegen_add_branch_ref ******************************************************
393 Prepends an branch to the list.
395 *******************************************************************************/
397 void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
402 STATISTICS(count_branches_unresolved++);
404 /* calculate the mpc of the branch instruction */
406 branchmpc = cd->mcodeptr - cd->mcodebase;
408 br = (branchref*) DumpMemory::allocate(sizeof(branchref));
410 br->branchmpc = branchmpc;
411 br->condition = condition;
413 br->options = options;
414 br->next = target->branchrefs;
416 target->branchrefs = br;
420 /* codegen_resolve_branchrefs **************************************************
422 Resolves and patches the branch references of a given basic block.
424 *******************************************************************************/
426 void codegen_resolve_branchrefs(codegendata *cd, basicblock *bptr)
431 /* Save the mcodeptr because in the branch emitting functions
432 we generate code somewhere inside already generated code,
433 but we're still in the actual code generation phase. */
435 mcodeptr = cd->mcodeptr;
437 /* just to make sure */
439 assert(bptr->mpc >= 0);
441 for (br = bptr->branchrefs; br != NULL; br = br->next) {
442 /* temporary set the mcodeptr */
444 cd->mcodeptr = cd->mcodebase + br->branchmpc;
446 /* emit_bccz and emit_branch emit the correct code, even if we
447 pass condition == BRANCH_UNCONDITIONAL or reg == -1. */
449 emit_bccz(cd, bptr, br->condition, br->reg, br->options);
452 /* restore mcodeptr */
454 cd->mcodeptr = mcodeptr;
458 /* codegen_branch_label_add ****************************************************
460 Append an branch to the label-branch list.
462 *******************************************************************************/
464 void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
466 // Calculate the current mpc.
467 int32_t mpc = cd->mcodeptr - cd->mcodebase;
469 branch_label_ref_t* br = (branch_label_ref_t*) DumpMemory::allocate(sizeof(branch_label_ref_t));
473 br->condition = condition;
475 br->options = options;
477 // Add the branch to the list.
478 cd->brancheslabel->push_back(br);
482 /* codegen_set_replacement_point_notrap ****************************************
484 Record the position of a non-trappable replacement point.
486 *******************************************************************************/
488 #if defined(ENABLE_REPLACEMENT)
490 void codegen_set_replacement_point_notrap(codegendata *cd, s4 type)
492 void codegen_set_replacement_point_notrap(codegendata *cd)
495 assert(cd->replacementpoint);
496 assert(cd->replacementpoint->type == type);
497 assert(cd->replacementpoint->flags & RPLPOINT_FLAG_NOTRAP);
499 cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
501 cd->replacementpoint++;
503 #endif /* defined(ENABLE_REPLACEMENT) */
506 /* codegen_set_replacement_point ***********************************************
508 Record the position of a trappable replacement point.
510 *******************************************************************************/
512 #if defined(ENABLE_REPLACEMENT)
514 void codegen_set_replacement_point(codegendata *cd, s4 type)
516 void codegen_set_replacement_point(codegendata *cd)
519 assert(cd->replacementpoint);
520 assert(cd->replacementpoint->type == type);
521 assert(!(cd->replacementpoint->flags & RPLPOINT_FLAG_NOTRAP));
523 cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
525 cd->replacementpoint++;
528 /* XXX actually we should use an own REPLACEMENT_NOPS here! */
529 if (opt_TestReplacement)
533 /* XXX assert(cd->lastmcodeptr <= cd->mcodeptr); */
535 cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
537 #endif /* defined(ENABLE_REPLACEMENT) */
540 /* codegen_finish **************************************************************
542 Finishes the code generation. A new memory, large enough for both
543 data and code, is allocated and data and code are copied together
544 to their final layout, unresolved jumps are resolved, ...
546 *******************************************************************************/
548 void codegen_finish(jitdata *jd)
551 #if defined(ENABLE_INTRP)
559 /* Get required compiler data. */
561 codeinfo* code = jd->code;
562 codegendata* cd = jd->cd;
563 registerdata* rd = jd->rd;
565 /* prevent compiler warning */
567 #if defined(ENABLE_INTRP)
571 /* calculate the code length */
573 mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
575 #if defined(ENABLE_STATISTICS)
577 count_code_len += mcodelen;
578 count_data_len += cd->dseglen;
582 alignedmcodelen = MEMORY_ALIGN(mcodelen, MAX_ALIGN);
584 #if defined(ENABLE_INTRP)
586 ncodelen = cd->ncodeptr - cd->ncodebase;
588 ncodelen = 0; /* avoid compiler warning */
592 cd->dseglen = MEMORY_ALIGN(cd->dseglen, MAX_ALIGN);
593 alignedlen = alignedmcodelen + cd->dseglen;
595 #if defined(ENABLE_INTRP)
597 alignedlen += ncodelen;
601 /* allocate new memory */
603 code->mcodelength = mcodelen + cd->dseglen;
604 code->mcode = CNEW(u1, alignedlen);
606 /* set the entrypoint of the method */
608 assert(code->entrypoint == NULL);
609 code->entrypoint = epoint = (code->mcode + cd->dseglen);
611 /* fill the data segment (code->entrypoint must already be set!) */
615 /* copy code to the new location */
617 MCOPY((void *) code->entrypoint, cd->mcodebase, u1, mcodelen);
619 #if defined(ENABLE_INTRP)
620 /* relocate native dynamic superinstruction code (if any) */
623 cd->mcodebase = code->entrypoint;
626 u1 *ncodebase = code->mcode + cd->dseglen + alignedmcodelen;
628 MCOPY((void *) ncodebase, cd->ncodebase, u1, ncodelen);
630 /* flush the instruction and data caches */
632 md_cacheflush(ncodebase, ncodelen);
634 /* set some cd variables for dynamic_super_rerwite */
636 cd->ncodebase = ncodebase;
639 cd->ncodebase = NULL;
642 dynamic_super_rewrite(cd);
646 /* Fill runtime information about generated code. */
648 code->stackframesize = cd->stackframesize;
649 code->synchronizedoffset = rd->memuse * 8;
650 code->savedintcount = INT_SAV_CNT - rd->savintreguse;
651 code->savedfltcount = FLT_SAV_CNT - rd->savfltreguse;
652 #if defined(HAS_ADDRESS_REGISTER_FILE)
653 code->savedadrcount = ADR_SAV_CNT - rd->savadrreguse;
656 /* Create the exception table. */
658 exceptiontable_create(jd);
660 /* Create the linenumber table. */
662 code->linenumbertable = new LinenumberTable(jd);
664 /* jump table resolving */
666 for (jr = cd->jumpreferences; jr != NULL; jr = jr->next)
667 *((functionptr *) ((ptrint) epoint + jr->tablepos)) =
668 (functionptr) ((ptrint) epoint + (ptrint) jr->target->mpc);
670 /* patcher resolving */
674 #if defined(ENABLE_REPLACEMENT)
675 /* replacement point resolving */
680 rp = code->rplpoints;
681 for (i=0; i<code->rplpointcount; ++i, ++rp) {
682 rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
685 #endif /* defined(ENABLE_REPLACEMENT) */
687 /* Insert method into methodtree to find the entrypoint. */
689 methodtree_insert(code->entrypoint, code->entrypoint + mcodelen);
691 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
692 /* resolve data segment references */
694 dseg_resolve_datareferences(jd);
697 /* flush the instruction and data caches */
699 md_cacheflush(code->mcode, code->mcodelength);
703 /* codegen_start_native_call ***************************************************
705 Prepares the stuff required for a native (JNI) function call:
707 - adds a stackframe info structure to the chain, for stacktraces
708 - prepares the local references table on the stack
710 The layout of the native stub stackframe should look like this:
712 +---------------------------+ <- java SP (of parent Java function)
714 +---------------------------+ <- data SP
716 | stackframe info structure |
718 +---------------------------+
720 | local references table |
722 +---------------------------+
724 | saved registers (if any) |
726 +---------------------------+
728 | arguments (if any) |
730 +---------------------------+ <- current SP (native stub)
732 *******************************************************************************/
734 java_handle_t *codegen_start_native_call(u1 *sp, u1 *pv)
736 stackframeinfo_t *sfi;
747 STATISTICS(count_calls_java_to_native++);
749 // Get information from method header.
750 code = code_get_codeinfo_for_pv(pv);
751 assert(code != NULL);
753 framesize = md_stacktrace_get_framesize(code);
754 assert(framesize >= (int32_t) (sizeof(stackframeinfo_t) + sizeof(localref_table)));
756 // Get the methodinfo.
757 m = code_get_methodinfo_for_pv(pv);
760 /* calculate needed values */
762 #if defined(__ALPHA__) || defined(__ARM__)
763 datasp = sp + framesize - SIZEOF_VOID_P;
764 javasp = sp + framesize;
765 arg_regs = (uint64_t *) sp;
766 arg_stack = (uint64_t *) javasp;
767 #elif defined(__MIPS__)
768 /* MIPS always uses 8 bytes to store the RA */
769 datasp = sp + framesize - 8;
770 javasp = sp + framesize;
771 # if SIZEOF_VOID_P == 8
772 arg_regs = (uint64_t *) sp;
774 arg_regs = (uint64_t *) (sp + 5 * 8);
776 arg_stack = (uint64_t *) javasp;
777 #elif defined(__S390__)
778 datasp = sp + framesize - 8;
779 javasp = sp + framesize;
780 arg_regs = (uint64_t *) (sp + 96);
781 arg_stack = (uint64_t *) javasp;
782 #elif defined(__I386__) || defined(__M68K__) || defined(__X86_64__)
783 datasp = sp + framesize;
784 javasp = sp + framesize + SIZEOF_VOID_P;
785 arg_regs = (uint64_t *) sp;
786 arg_stack = (uint64_t *) javasp;
787 #elif defined(__POWERPC__)
788 datasp = sp + framesize;
789 javasp = sp + framesize;
790 arg_regs = (uint64_t *) (sp + LA_SIZE + 4 * SIZEOF_VOID_P);
791 arg_stack = (uint64_t *) javasp;
792 #elif defined(__POWERPC64__)
793 datasp = sp + framesize;
794 javasp = sp + framesize;
795 arg_regs = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 4 * SIZEOF_VOID_P);
796 arg_stack = (uint64_t *) javasp;
798 /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
799 /* XXX maybe we need to pass the RA as argument there */
800 os::abort("codegen_start_native_call: unsupported architecture");
803 /* get data structures from stack */
805 sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
806 lrt = (localref_table *) (datasp - sizeof(stackframeinfo_t) -
807 sizeof(localref_table));
809 #if defined(ENABLE_JNI)
810 /* add current JNI local references table to this thread */
812 localref_table_add(lrt);
816 # if defined(__ALPHA__) || defined(__I386__) || defined(__M68K__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
817 /* print the call-trace if necesarry */
818 /* BEFORE: filling the local reference table */
820 if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
821 trace_java_call_enter(m, arg_regs, arg_stack);
825 #if defined(ENABLE_HANDLES)
826 /* place all references into the local reference table */
827 /* BEFORE: creating stackframeinfo */
829 localref_native_enter(m, arg_regs, arg_stack);
832 /* Add a stackframeinfo for this native method. We don't have RA
833 and XPC here. These are determined in
834 stacktrace_stackframeinfo_add. */
836 stacktrace_stackframeinfo_add(sfi, pv, sp, NULL, NULL);
838 /* Return a wrapped classinfo for static methods. */
840 if (m->flags & ACC_STATIC)
841 return (java_handle_t *) LLNI_classinfo_wrap(m->clazz);
847 /* codegen_finish_native_call **************************************************
849 Removes the stuff required for a native (JNI) function call.
850 Additionally it checks for an exceptions and in case, get the
851 exception object and clear the pointer.
853 *******************************************************************************/
855 java_object_t *codegen_finish_native_call(u1 *sp, u1 *pv)
857 stackframeinfo_t *sfi;
867 // Get information from method header.
868 code = code_get_codeinfo_for_pv(pv);
869 assert(code != NULL);
871 framesize = md_stacktrace_get_framesize(code);
873 // Get the methodinfo.
877 /* calculate needed values */
879 #if defined(__ALPHA__) || defined(__ARM__)
880 datasp = sp + framesize - SIZEOF_VOID_P;
881 ret_regs = (uint64_t *) sp;
882 #elif defined(__MIPS__)
883 /* MIPS always uses 8 bytes to store the RA */
884 datasp = sp + framesize - 8;
885 # if SIZEOF_VOID_P == 8
886 ret_regs = (uint64_t *) sp;
888 ret_regs = (uint64_t *) (sp + 1 * 8);
890 #elif defined(__S390__)
891 datasp = sp + framesize - 8;
892 ret_regs = (uint64_t *) (sp + 96);
893 #elif defined(__I386__)
894 datasp = sp + framesize;
895 ret_regs = (uint64_t *) (sp + 2 * SIZEOF_VOID_P);
896 #elif defined(__M68K__)
897 datasp = sp + framesize;
898 ret_regs = (uint64_t *) (sp + 2 * 8);
899 #elif defined(__X86_64__)
900 datasp = sp + framesize;
901 ret_regs = (uint64_t *) sp;
902 #elif defined(__POWERPC__)
903 datasp = sp + framesize;
904 ret_regs = (uint64_t *) (sp + LA_SIZE + 2 * SIZEOF_VOID_P);
905 #elif defined(__POWERPC64__)
906 datasp = sp + framesize;
907 ret_regs = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 2 * SIZEOF_VOID_P);
909 os::abort("codegen_finish_native_call: unsupported architecture");
912 /* get data structures from stack */
914 sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
916 /* Remove current stackframeinfo from chain. */
918 stacktrace_stackframeinfo_remove(sfi);
920 #if defined(ENABLE_HANDLES)
921 /* unwrap the return value from the local reference table */
922 /* AFTER: removing the stackframeinfo */
923 /* BEFORE: releasing the local reference table */
925 localref_native_exit(m, ret_regs);
928 /* get and unwrap the exception */
929 /* AFTER: removing the stackframe info */
930 /* BEFORE: releasing the local reference table */
932 e = exceptions_get_and_clear_exception();
935 #if defined(ENABLE_JNI)
936 /* release JNI local references table for this thread */
938 localref_frame_pop_all();
939 localref_table_remove();
943 # if defined(__ALPHA__) || defined(__I386__) || defined(__M68K__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
944 /* print the call-trace if necesarry */
945 /* AFTER: unwrapping the return value */
947 if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
948 trace_java_call_exit(m, ret_regs);
956 /* codegen_reg_of_var **********************************************************
958 This function determines a register, to which the result of an
959 operation should go, when it is ultimatively intended to store the
960 result in pseudoregister v. If v is assigned to an actual
961 register, this register will be returned. Otherwise (when v is
962 spilled) this function returns tempregnum. If not already done,
963 regoff and flags are set in the stack location.
965 *******************************************************************************/
967 s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
969 if (!(v->flags & INMEMORY))
976 /* codegen_reg_of_dst **********************************************************
978 This function determines a register, to which the result of an
979 operation should go, when it is ultimatively intended to store the
980 result in iptr->dst.var. If dst.var is assigned to an actual
981 register, this register will be returned. Otherwise (when it is
982 spilled) this function returns tempregnum. If not already done,
983 regoff and flags are set in the stack location.
985 *******************************************************************************/
987 s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
989 return codegen_reg_of_var(iptr->opc, VAROP(iptr->dst), tempregnum);
993 * Fix up register locations in the case where control is transferred to an
994 * exception handler block via normal control flow (no exception).
996 static void fixup_exc_handler_interface(jitdata *jd, basicblock *bptr)
998 // Exception handlers have exactly 1 in-slot
999 assert(bptr->indepth == 1);
1000 varinfo *var = VAR(bptr->invars[0]);
1001 int32_t d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1002 emit_load(jd, NULL, var, d);
1003 // Copy the interface variable to ITMP1 (XPTR) because that's where
1004 // the handler expects it.
1005 emit_imove(jd->cd, d, REG_ITMP1_XPTR);
1009 * Generates machine code.
1011 bool codegen_emit(jitdata *jd)
1014 builtintable_entry* bte = 0;
1016 int32_t s1, s2, /*s3,*/ d;
1021 // Get required compiler data.
1022 //methodinfo* m = jd->m;
1023 codeinfo* code = jd->code;
1024 codegendata* cd = jd->cd;
1025 registerdata* rd = jd->rd;
1026 #if defined(ENABLE_SSA)
1027 lsradata* ls = jd->ls;
1028 bool last_cmd_was_goto = false;
1031 // Space to save used callee saved registers.
1032 int32_t savedregs_num = 0;
1033 savedregs_num += (INT_SAV_CNT - rd->savintreguse);
1034 savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
1035 #ifdef HAS_ADDRESS_REGISTER_FILE
1036 savedregs_num += (ADR_SAV_CNT - rd->savadrreguse);
1039 // Calculate size of stackframe.
1040 cd->stackframesize = rd->memuse + savedregs_num;
1042 // Space to save the return address.
1043 #if STACKFRAME_RA_TOP_OF_FRAME
1044 # if STACKFRAME_LEAFMETHODS_RA_REGISTER
1045 if (!code_is_leafmethod(code))
1047 cd->stackframesize += 1;
1050 // Space to save argument of monitor_enter.
1051 #if defined(ENABLE_THREADS)
1052 if (checksync && code_is_synchronized(code))
1053 # if STACKFRAME_SYNC_NEEDS_TWO_SLOTS
1054 /* On some architectures the stack position for the argument can
1055 not be shared with place to save the return register values to
1056 survive monitor_exit since both values reside in the same register. */
1057 cd->stackframesize += 2;
1059 cd->stackframesize += 1;
1063 // Keep stack of non-leaf functions 16-byte aligned for calls into
1065 if (!code_is_leafmethod(code) || JITDATA_HAS_FLAG_VERBOSECALL(jd))
1066 #if STACKFRMAE_RA_BETWEEN_FRAMES
1067 ALIGN_ODD(cd->stackframesize);
1069 ALIGN_EVEN(cd->stackframesize);
1072 #if defined(SPECIALMEMUSE)
1073 // On architectures having a linkage area, we can get rid of the whole
1074 // stackframe in leaf functions without saved registers.
1075 if (code_is_leafmethod(code) && (cd->stackframesize == LA_SIZE_IN_POINTERS))
1076 cd->stackframesize = 0;
1080 * SECTION 1: Method header generation.
1083 // The method header was reduced to the bare minimum of one pointer
1084 // to the codeinfo structure, which in turn contains all runtime
1085 // information. However this section together with the methodheader.h
1086 // file will be kept alive for historical reasons. It might come in
1087 // handy at some point.
1089 (void) dseg_add_unique_address(cd, code); ///< CodeinfoPointer
1091 // XXX, REMOVEME: We still need it for exception handling in assembler.
1092 // XXX ARM, M68K: (void) dseg_add_unique_s4(cd, cd->stackframesize);
1093 #if defined(__I386__)
1094 int align_off = (cd->stackframesize != 0) ? 4 : 0;
1095 (void) dseg_add_unique_s4(cd, cd->stackframesize * 8 + align_off); /* FrameSize */
1097 (void) dseg_add_unique_s4(cd, cd->stackframesize * 8); /* FrameSize */
1099 // XXX M68K: We use the IntSave as a split field for the adr now
1100 // (void) dseg_add_unique_s4(cd, (ADR_SAV_CNT - rd->savadrreguse) << 16 | (INT_SAV_CNT - rd->savintreguse)); /* IntSave */
1101 (void) dseg_add_unique_s4(cd, code_is_leafmethod(code) ? 1 : 0);
1102 (void) dseg_add_unique_s4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
1103 (void) dseg_add_unique_s4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
1106 * SECTION 2: Method prolog generation.
1109 #if defined(ENABLE_PROFILING)
1110 // Generate method profiling code.
1111 if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1113 // Count method frequency.
1114 emit_profile_method(cd, code);
1116 // Start CPU cycle counting.
1117 emit_profile_cycle_start(cd, code);
1121 // Emit code for the method prolog.
1122 codegen_emit_prolog(jd);
1124 #if defined(ENABLE_THREADS)
1125 // Emit code to call monitorenter function.
1126 if (checksync && code_is_synchronized(code))
1127 emit_monitor_enter(jd, rd->memuse * 8);
1130 #if !defined(NDEBUG)
1131 // Call trace function.
1132 if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
1133 emit_verbosecall_enter(jd);
1136 #if defined(ENABLE_SSA)
1137 // With SSA the header is basicblock 0, insert phi moves if necessary.
1139 codegen_emit_phi_moves(jd, ls->basicblocks[0]);
1142 // Create replacement points.
1143 REPLACEMENT_POINTS_INIT(cd, jd);
1146 * SECTION 3: ICMD code generation.
1149 // Walk through all basic blocks.
1150 for (basicblock* bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
1152 bptr->mpc = (s4) (cd->mcodeptr - cd->mcodebase);
1154 // Is this basic block reached?
1155 if (bptr->flags < BBREACHED)
1158 // Branch resolving.
1159 codegen_resolve_branchrefs(cd, bptr);
1161 // Handle replacement points.
1162 REPLACEMENT_POINT_BLOCK_START(cd, bptr);
1164 #if defined(ENABLE_REPLACEMENT) && defined(__I386__)
1165 // Generate countdown trap code.
1166 methodinfo* m = jd->m;
1167 if (bptr->bitflags & BBFLAG_REPLACEMENT) {
1168 if (cd->replacementpoint[-1].flags & RPLPOINT_FLAG_COUNTDOWN) {
1170 emit_trap_countdown(cd, &(m->hitcountdown));
1175 #if defined(ENABLE_PROFILING)
1176 // Generate basicblock profiling code.
1177 if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1179 // Count basicblock frequency.
1180 emit_profile_basicblock(cd, code, bptr);
1182 // If this is an exception handler, start profiling again.
1183 if (bptr->type == BBTYPE_EXH)
1184 emit_profile_cycle_start(cd, code);
1188 // Copy interface registers to their destination.
1189 int32_t indepth = bptr->indepth;
1190 // XXX Check if this is true for all archs.
1191 MCODECHECK(64+indepth); // All
1192 MCODECHECK(128+indepth); // PPC64
1193 MCODECHECK(512); // I386, X86_64, S390
1194 #if defined(ENABLE_SSA)
1195 // XXX Check if this is correct and add a propper comment!
1197 last_cmd_was_goto = false;
1199 #elif defined(ENABLE_LSRA)
1201 while (indepth > 0) {
1203 var = VAR(bptr->invars[indepth]);
1204 if ((indepth == bptr->indepth-1) && (bptr->type == BBTYPE_EXH)) {
1205 if (!IS_INMEMORY(src->flags))
1209 // XXX M68K: Actually this is M_ADRMOVE(REG_ATMP1_XPTR, d);
1210 // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1211 // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1212 emit_imove(cd, REG_ITMP1_XPTR, d);
1213 emit_store(jd, NULL, var, d);
1218 while (indepth > 0) {
1220 var = VAR(bptr->invars[indepth]);
1221 if ((indepth == bptr->indepth-1) && (bptr->type == BBTYPE_EXH)) {
1222 d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1223 // XXX M68K: Actually this is M_ADRMOVE(REG_ATMP1_XPTR, d);
1224 // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1225 // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1226 emit_imove(cd, REG_ITMP1_XPTR, d);
1227 emit_store(jd, NULL, var, d);
1230 assert((var->flags & INOUT));
1233 #if defined(ENABLE_SSA) || defined(ENABLE_LSRA)
1237 // Walk through all instructions.
1238 int32_t len = bptr->icount;
1239 uint16_t currentline = 0;
1240 for (instruction* iptr = bptr->iinstr; len > 0; len--, iptr++) {
1243 if (iptr->line != currentline) {
1244 linenumbertable_list_entry_add(cd, iptr->line);
1245 currentline = iptr->line;
1248 // An instruction usually needs < 64 words.
1249 // XXX Check if this is true for all archs.
1250 MCODECHECK(64); // All
1251 MCODECHECK(128); // PPC64
1252 MCODECHECK(1024); // I386, X86_64, M68K, S390 /* 1kB should be enough */
1255 switch (iptr->opc) {
1257 case ICMD_NOP: /* ... ==> ... */
1258 case ICMD_POP: /* ..., value ==> ... */
1259 case ICMD_POP2: /* ..., value, value ==> ... */
1262 case ICMD_CHECKNULL: /* ..., objectref ==> ..., objectref */
1264 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1265 emit_nullpointer_check(cd, iptr, s1);
1268 case ICMD_BREAKPOINT: /* ... ==> ... */
1269 /* sx.val.anyptr = Breakpoint */
1271 patcher_add_patch_ref(jd, PATCHER_breakpoint, iptr->sx.val.anyptr, 0);
1275 #if defined(ENABLE_SSA)
1276 case ICMD_GETEXCEPTION:
1278 d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1279 emit_imove(cd, REG_ITMP1, d);
1280 emit_store_dst(jd, iptr, d);
1284 /* inline operations **********************************************/
1286 case ICMD_INLINE_START:
1288 REPLACEMENT_POINT_INLINE_START(cd, iptr);
1291 case ICMD_INLINE_BODY:
1293 REPLACEMENT_POINT_INLINE_BODY(cd, iptr);
1294 linenumbertable_list_entry_add_inline_start(cd, iptr);
1295 linenumbertable_list_entry_add(cd, iptr->line);
1298 case ICMD_INLINE_END:
1300 linenumbertable_list_entry_add_inline_end(cd, iptr);
1301 linenumbertable_list_entry_add(cd, iptr->line);
1305 /* constant operations ********************************************/
1307 case ICMD_ICONST: /* ... ==> ..., constant */
1309 d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1310 ICONST(d, iptr->sx.val.i);
1311 emit_store_dst(jd, iptr, d);
1314 case ICMD_LCONST: /* ... ==> ..., constant */
1316 d = codegen_reg_of_dst(jd, iptr, REG_LTMP12);
1317 LCONST(d, iptr->sx.val.l);
1318 emit_store_dst(jd, iptr, d);
1322 /* load/store/copy/move operations ********************************/
1326 case ICMD_ILOAD: /* ... ==> ..., content of local variable */
1327 case ICMD_LLOAD: /* s1 = local variable */
1331 case ICMD_ISTORE: /* ..., value ==> ... */
1336 emit_copy(jd, iptr);
1341 if (!(iptr->flags.bits & INS_FLAG_RETADDR))
1342 emit_copy(jd, iptr);
1346 /* integer operations *********************************************/
1348 case ICMD_FCONST: /* ... ==> ..., constant */
1349 case ICMD_DCONST: /* ... ==> ..., constant */
1350 case ICMD_ACONST: /* ... ==> ..., constant */
1351 case ICMD_INEG: /* ..., value ==> ..., - value */
1352 case ICMD_LNEG: /* ..., value ==> ..., - value */
1353 case ICMD_I2L: /* ..., value ==> ..., value */
1354 case ICMD_L2I: /* ..., value ==> ..., value */
1355 case ICMD_INT2BYTE: /* ..., value ==> ..., value */
1356 case ICMD_INT2CHAR: /* ..., value ==> ..., value */
1357 case ICMD_INT2SHORT: /* ..., value ==> ..., value */
1358 case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1360 case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
1361 /* sx.val.i = constant */
1362 case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1363 case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
1364 /* sx.val.l = constant */
1365 case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1366 case ICMD_ISUBCONST: /* ..., value ==> ..., value + constant */
1367 /* sx.val.i = constant */
1368 case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1369 case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
1370 /* sx.val.l = constant */
1371 case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1372 case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
1373 /* sx.val.i = constant */
1374 case ICMD_IMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1375 /* sx.val.i = constant */
1376 case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1377 case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
1378 /* sx.val.l = constant */
1379 case ICMD_LMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1380 /* sx.val.l = constant */
1381 case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1382 case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1383 case ICMD_IDIVPOW2: /* ..., value ==> ..., value >> constant */
1384 /* sx.val.i = constant */
1385 case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
1386 /* sx.val.i = constant */
1387 case ICMD_LDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1388 case ICMD_LREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1389 case ICMD_LDIVPOW2: /* ..., value ==> ..., value >> constant */
1390 /* sx.val.i = constant */
1391 case ICMD_LREMPOW2: /* ..., value ==> ..., value % constant */
1392 /* sx.val.l = constant */
1393 case ICMD_ISHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1394 case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
1395 /* sx.val.i = constant */
1396 case ICMD_ISHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1397 case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
1398 /* sx.val.i = constant */
1399 case ICMD_IUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1400 case ICMD_IUSHRCONST: /* ..., value ==> ..., value >>> constant */
1401 /* sx.val.i = constant */
1402 case ICMD_LSHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1403 case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
1404 /* sx.val.i = constant */
1405 case ICMD_LSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1406 case ICMD_LSHRCONST: /* ..., value ==> ..., value >> constant */
1407 /* sx.val.i = constant */
1408 case ICMD_LUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1409 case ICMD_LUSHRCONST: /* ..., value ==> ..., value >>> constant */
1410 /* sx.val.l = constant */
1411 case ICMD_IAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1412 case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
1413 /* sx.val.i = constant */
1414 case ICMD_LAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1415 case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
1416 /* sx.val.l = constant */
1417 case ICMD_IOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1418 case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
1419 /* sx.val.i = constant */
1420 case ICMD_LOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1421 case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
1422 /* sx.val.l = constant */
1423 case ICMD_IXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1424 case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
1425 /* sx.val.i = constant */
1426 case ICMD_LXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1427 case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
1428 /* sx.val.l = constant */
1430 // Generate architecture specific instructions.
1431 codegen_emit_instruction(jd, iptr);
1435 /* floating operations ********************************************/
1437 #if !defined(ENABLE_SOFTFLOAT)
1438 case ICMD_FNEG: /* ..., value ==> ..., - value */
1440 case ICMD_FADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1442 case ICMD_FSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1444 case ICMD_FMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1446 case ICMD_FDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1448 case ICMD_FREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1450 case ICMD_I2F: /* ..., value ==> ..., (float) value */
1451 case ICMD_I2D: /* ..., value ==> ..., (double) value */
1452 case ICMD_L2F: /* ..., value ==> ..., (float) value */
1453 case ICMD_L2D: /* ..., value ==> ..., (double) value */
1454 case ICMD_F2I: /* ..., value ==> ..., (int) value */
1456 case ICMD_F2L: /* ..., value ==> ..., (long) value */
1458 case ICMD_F2D: /* ..., value ==> ..., (double) value */
1459 case ICMD_D2F: /* ..., value ==> ..., (float) value */
1460 case ICMD_FCMPL: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
1461 case ICMD_DCMPL: /* == => 0, < => 1, > => -1 */
1462 case ICMD_FCMPG: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
1463 case ICMD_DCMPG: /* == => 0, < => 1, > => -1 */
1465 // Generate architecture specific instructions.
1466 codegen_emit_instruction(jd, iptr);
1468 #endif /* !defined(ENABLE_SOFTFLOAT) */
1471 /* memory operations **********************************************/
1473 case ICMD_ARRAYLENGTH:/* ..., arrayref ==> ..., length */
1475 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1476 d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1477 /* implicit null-pointer check */
1478 // XXX PPC64: Here we had an explicit null-pointer check
1479 // which I think was obsolete, please confirm. Otherwise:
1480 // emit_nullpointer_check(cd, iptr, s1);
1481 M_ILD(d, s1, OFFSET(java_array_t, size));
1482 emit_store_dst(jd, iptr, d);
1485 case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
1486 case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
1487 case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
1488 case ICMD_IALOAD: /* ..., arrayref, index ==> ..., value */
1489 case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
1490 case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
1491 case ICMD_DALOAD: /* ..., arrayref, index ==> ..., value */
1492 case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
1493 case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
1494 case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
1495 case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
1496 case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
1497 case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
1498 case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
1499 case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
1500 case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
1501 case ICMD_BASTORECONST: /* ..., arrayref, index ==> ... */
1502 case ICMD_CASTORECONST: /* ..., arrayref, index ==> ... */
1503 case ICMD_SASTORECONST: /* ..., arrayref, index ==> ... */
1504 case ICMD_IASTORECONST: /* ..., arrayref, index ==> ... */
1505 case ICMD_LASTORECONST: /* ..., arrayref, index ==> ... */
1506 case ICMD_FASTORECONST: /* ..., arrayref, index ==> ... */
1507 case ICMD_DASTORECONST: /* ..., arrayref, index ==> ... */
1508 case ICMD_AASTORECONST: /* ..., arrayref, index ==> ... */
1509 case ICMD_GETFIELD: /* ... ==> ..., value */
1510 case ICMD_PUTFIELD: /* ..., value ==> ... */
1511 case ICMD_PUTFIELDCONST: /* ..., objectref ==> ... */
1512 /* val = value (in current instruction) */
1513 case ICMD_PUTSTATICCONST: /* ... ==> ... */
1514 /* val = value (in current instruction) */
1516 // Generate architecture specific instructions.
1517 codegen_emit_instruction(jd, iptr);
1520 case ICMD_GETSTATIC: /* ... ==> ..., value */
1522 #if defined(__I386__)
1523 // Generate architecture specific instructions.
1524 codegen_emit_instruction(jd, iptr);
1530 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1531 unresolved_field* uf = iptr->sx.s23.s3.uf;
1532 fieldtype = uf->fieldref->parseddesc.fd->type;
1533 disp = dseg_add_unique_address(cd, 0);
1535 pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1537 fi = NULL; /* Silence compiler warning */
1540 fi = iptr->sx.s23.s3.fmiref->p.field;
1541 fieldtype = fi->type;
1542 disp = dseg_add_address(cd, fi->value);
1544 if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->clazz)) {
1546 patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1547 PROFILE_CYCLE_START;
1550 pr = NULL; /* Silence compiler warning */
1553 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1554 codegen_emit_patchable_barrier(iptr, cd, pr, fi);
1557 // XXX X86_64: Here We had this:
1558 /* This approach is much faster than moving the field
1559 address inline into a register. */
1561 M_ALD_DSEG(REG_ITMP1, disp);
1563 switch (fieldtype) {
1565 d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1566 M_ALD(d, REG_ITMP1, 0);
1569 #if defined(ENABLE_SOFTFLOAT)
1572 d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1573 M_ILD(d, REG_ITMP1, 0);
1576 #if defined(ENABLE_SOFTFLOAT)
1579 d = codegen_reg_of_dst(jd, iptr, REG_LTMP23);
1580 M_LLD(d, REG_ITMP1, 0);
1582 #if !defined(ENABLE_SOFTFLOAT)
1584 d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1585 M_FLD(d, REG_ITMP1, 0);
1588 d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1589 M_DLD(d, REG_ITMP1, 0);
1593 // Silence compiler warning.
1596 emit_store_dst(jd, iptr, d);
1601 case ICMD_PUTSTATIC: /* ..., value ==> ... */
1603 #if defined(__I386__)
1604 // Generate architecture specific instructions.
1605 codegen_emit_instruction(jd, iptr);
1612 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1613 unresolved_field* uf = iptr->sx.s23.s3.uf;
1614 fieldtype = uf->fieldref->parseddesc.fd->type;
1615 disp = dseg_add_unique_address(cd, 0);
1617 pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1619 fi = NULL; /* Silence compiler warning */
1622 fi = iptr->sx.s23.s3.fmiref->p.field;
1623 fieldtype = fi->type;
1624 disp = dseg_add_address(cd, fi->value);
1626 if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->clazz)) {
1628 patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1629 PROFILE_CYCLE_START;
1632 pr = NULL; /* Silence compiler warning */
1635 // XXX X86_64: Here We had this:
1636 /* This approach is much faster than moving the field
1637 address inline into a register. */
1639 M_ALD_DSEG(REG_ITMP1, disp);
1641 switch (fieldtype) {
1643 s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1644 M_AST(s1, REG_ITMP1, 0);
1647 #if defined(ENABLE_SOFTFLOAT)
1650 s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1651 M_IST(s1, REG_ITMP1, 0);
1654 #if defined(ENABLE_SOFTFLOAT)
1657 s1 = emit_load_s1(jd, iptr, REG_LTMP23);
1658 M_LST(s1, REG_ITMP1, 0);
1660 #if !defined(ENABLE_SOFTFLOAT)
1662 s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1663 M_FST(s1, REG_ITMP1, 0);
1666 s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1667 M_DST(s1, REG_ITMP1, 0);
1671 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1672 codegen_emit_patchable_barrier(iptr, cd, pr, fi);
1678 /* branch operations **********************************************/
1680 case ICMD_ATHROW: /* ..., objectref ==> ... (, objectref) */
1682 // We might leave this method, stop profiling.
1685 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1686 // XXX M68K: Actually this is M_ADRMOVE(s1, REG_ATMP1_XPTR);
1687 // XXX Sparc64: We use REG_ITMP2_XPTR here, fix me!
1688 emit_imove(cd, s1, REG_ITMP1_XPTR);
1690 #ifdef ENABLE_VERIFIER
1691 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1692 unresolved_class *uc = iptr->sx.s23.s2.uc;
1693 patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1695 #endif /* ENABLE_VERIFIER */
1697 // Generate architecture specific instructions.
1698 codegen_emit_instruction(jd, iptr);
1702 case ICMD_GOTO: /* ... ==> ... */
1703 case ICMD_RET: /* ... ==> ... */
1705 #if defined(ENABLE_SSA)
1706 // In case of a goto, phimoves have to be inserted
1709 last_cmd_was_goto = true;
1710 codegen_emit_phi_moves(jd, bptr);
1713 if (iptr->dst.block->type == BBTYPE_EXH)
1714 fixup_exc_handler_interface(jd, iptr->dst.block);
1715 emit_br(cd, iptr->dst.block);
1719 case ICMD_JSR: /* ... ==> ... */
1721 assert(iptr->sx.s23.s3.jsrtarget.block->type != BBTYPE_EXH);
1722 emit_br(cd, iptr->sx.s23.s3.jsrtarget.block);
1726 case ICMD_IFNULL: /* ..., value ==> ... */
1727 case ICMD_IFNONNULL:
1729 assert(iptr->dst.block->type != BBTYPE_EXH);
1730 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1731 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1732 emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, s1, BRANCH_OPT_NONE);
1733 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1735 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, BRANCH_OPT_NONE);
1737 # error Unable to generate code for this configuration!
1741 case ICMD_IFEQ: /* ..., value ==> ... */
1748 // XXX Sparc64: int compares must not branch on the
1749 // register directly. Reason is, that register content is
1750 // not 32-bit clean. Fix this!
1752 assert(iptr->dst.block->type != BBTYPE_EXH);
1754 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1755 if (iptr->sx.val.i == 0) {
1756 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1757 emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, s1, BRANCH_OPT_NONE);
1759 // Generate architecture specific instructions.
1760 codegen_emit_instruction(jd, iptr);
1762 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1763 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1764 emit_icmp_imm(cd, s1, iptr->sx.val.i);
1765 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, BRANCH_OPT_NONE);
1767 # error Unable to generate code for this configuration!
1771 case ICMD_IF_LEQ: /* ..., value ==> ... */
1778 assert(iptr->dst.block->type != BBTYPE_EXH);
1780 // Generate architecture specific instructions.
1781 codegen_emit_instruction(jd, iptr);
1784 case ICMD_IF_ACMPEQ: /* ..., value, value ==> ... */
1785 case ICMD_IF_ACMPNE: /* op1 = target JavaVM pc */
1787 assert(iptr->dst.block->type != BBTYPE_EXH);
1789 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1790 s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1791 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1792 switch (iptr->opc) {
1793 case ICMD_IF_ACMPEQ:
1794 emit_beq(cd, iptr->dst.block, s1, s2);
1796 case ICMD_IF_ACMPNE:
1797 emit_bne(cd, iptr->dst.block, s1, s2);
1800 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1802 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ACMPEQ, BRANCH_OPT_NONE);
1803 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1804 M_CMPEQ(s1, s2, REG_ITMP1);
1805 switch (iptr->opc) {
1806 case ICMD_IF_ACMPEQ:
1807 emit_bnez(cd, iptr->dst.block, REG_ITMP1);
1809 case ICMD_IF_ACMPNE:
1810 emit_beqz(cd, iptr->dst.block, REG_ITMP1);
1814 # error Unable to generate code for this configuration!
1818 case ICMD_IF_ICMPEQ: /* ..., value, value ==> ... */
1819 case ICMD_IF_ICMPNE: /* op1 = target JavaVM pc */
1821 assert(iptr->dst.block->type != BBTYPE_EXH);
1823 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1824 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1825 s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1826 switch (iptr->opc) {
1827 case ICMD_IF_ICMPEQ:
1828 emit_beq(cd, iptr->dst.block, s1, s2);
1830 case ICMD_IF_ICMPNE:
1831 emit_bne(cd, iptr->dst.block, s1, s2);
1839 case ICMD_IF_ICMPLT: /* ..., value, value ==> ... */
1840 case ICMD_IF_ICMPGT: /* op1 = target JavaVM pc */
1841 case ICMD_IF_ICMPLE:
1842 case ICMD_IF_ICMPGE:
1844 assert(iptr->dst.block->type != BBTYPE_EXH);
1846 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1847 s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1848 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1849 # if defined(__I386__) || defined(__M68K__) || defined(__X86_64__)
1850 // XXX Fix this soon!!!
1855 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ICMPEQ, BRANCH_OPT_NONE);
1856 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1857 // Generate architecture specific instructions.
1858 codegen_emit_instruction(jd, iptr);
1860 # error Unable to generate code for this configuration!
1864 case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
1865 case ICMD_IF_LCMPNE: /* op1 = target JavaVM pc */
1866 case ICMD_IF_LCMPLT:
1867 case ICMD_IF_LCMPGT:
1868 case ICMD_IF_LCMPLE:
1869 case ICMD_IF_LCMPGE:
1871 assert(iptr->dst.block->type != BBTYPE_EXH);
1873 // Generate architecture specific instructions.
1874 codegen_emit_instruction(jd, iptr);
1877 case ICMD_RETURN: /* ... ==> ... */
1879 REPLACEMENT_POINT_RETURN(cd, iptr);
1880 goto nowperformreturn;
1882 case ICMD_ARETURN: /* ..., retvalue ==> ... */
1884 REPLACEMENT_POINT_RETURN(cd, iptr);
1885 s1 = emit_load_s1(jd, iptr, REG_RESULT);
1886 // XXX M68K: This should actually be M_ADR2INTMOVE(s1, REG_RESULT);
1887 // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1888 emit_imove(cd, s1, REG_RESULT);
1890 #ifdef ENABLE_VERIFIER
1891 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1893 unresolved_class *uc = iptr->sx.s23.s2.uc;
1894 patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1895 PROFILE_CYCLE_START;
1897 #endif /* ENABLE_VERIFIER */
1898 goto nowperformreturn;
1900 case ICMD_IRETURN: /* ..., retvalue ==> ... */
1901 #if defined(ENABLE_SOFTFLOAT)
1905 REPLACEMENT_POINT_RETURN(cd, iptr);
1906 s1 = emit_load_s1(jd, iptr, REG_RESULT);
1907 // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1908 emit_imove(cd, s1, REG_RESULT);
1909 goto nowperformreturn;
1911 case ICMD_LRETURN: /* ..., retvalue ==> ... */
1912 #if defined(ENABLE_SOFTFLOAT)
1916 REPLACEMENT_POINT_RETURN(cd, iptr);
1917 s1 = emit_load_s1(jd, iptr, REG_LRESULT);
1918 // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1919 emit_lmove(cd, s1, REG_LRESULT);
1920 goto nowperformreturn;
1922 #if !defined(ENABLE_SOFTFLOAT)
1923 case ICMD_FRETURN: /* ..., retvalue ==> ... */
1925 REPLACEMENT_POINT_RETURN(cd, iptr);
1926 s1 = emit_load_s1(jd, iptr, REG_FRESULT);
1927 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1928 emit_fmove(cd, s1, REG_FRESULT);
1930 M_CAST_F2I(s1, REG_RESULT);
1932 goto nowperformreturn;
1934 case ICMD_DRETURN: /* ..., retvalue ==> ... */
1936 REPLACEMENT_POINT_RETURN(cd, iptr);
1937 s1 = emit_load_s1(jd, iptr, REG_FRESULT);
1938 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1939 emit_dmove(cd, s1, REG_FRESULT);
1941 M_CAST_D2L(s1, REG_LRESULT);
1943 goto nowperformreturn;
1947 #if !defined(NDEBUG)
1948 // Call trace function.
1949 if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
1950 emit_verbosecall_exit(jd);
1953 #if defined(ENABLE_THREADS)
1954 // Emit code to call monitorexit function.
1955 if (checksync && code_is_synchronized(code)) {
1956 emit_monitor_exit(jd, rd->memuse * 8);
1960 // Generate method profiling code.
1963 // Emit code for the method epilog.
1964 codegen_emit_epilog(jd);
1968 case ICMD_BUILTIN: /* ..., [arg1, [arg2 ...]] ==> ... */
1970 REPLACEMENT_POINT_FORGC_BUILTIN(cd, iptr);
1972 bte = iptr->sx.s23.s3.bte;
1975 #if defined(ENABLE_ESCAPE_REASON) && defined(__I386__)
1976 if (bte->fp == BUILTIN_escape_reason_new) {
1977 void set_escape_reasons(void *);
1978 M_ASUB_IMM(8, REG_SP);
1979 M_MOV_IMM(iptr->escape_reasons, REG_ITMP1);
1980 M_AST(EDX, REG_SP, 4);
1981 M_AST(REG_ITMP1, REG_SP, 0);
1982 M_MOV_IMM(set_escape_reasons, REG_ITMP1);
1984 M_ALD(EDX, REG_SP, 4);
1985 M_AADD_IMM(8, REG_SP);
1989 // Emit the fast-path if available.
1990 if (bte->emit_fastpath != NULL) {
1991 void (*emit_fastpath)(jitdata* jd, instruction* iptr, int d);
1992 emit_fastpath = (void (*)(jitdata* jd, instruction* iptr, int d)) bte->emit_fastpath;
1994 assert(md->returntype.type == TYPE_VOID);
1997 // Actually call the fast-path emitter.
1998 emit_fastpath(jd, iptr, d);
2000 // If fast-path succeeded, jump to the end of the builtin
2002 // XXX Actually the slow-path block below should be moved
2003 // out of the instruction stream and the jump below should be
2005 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2006 os::abort("codegen_emit: Implement jump over slow-path for this configuration.");
2007 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2009 emit_label_bne(cd, BRANCH_LABEL_10);
2011 # error Unable to generate code for this configuration!
2017 case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ... */
2018 case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
2019 case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
2020 case ICMD_INVOKEINTERFACE:
2022 REPLACEMENT_POINT_INVOKE(cd, iptr);
2024 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
2025 unresolved_method* um = iptr->sx.s23.s3.um;
2026 md = um->methodref->parseddesc.md;
2029 methodinfo* lm = iptr->sx.s23.s3.fmiref->p.method;
2030 md = lm->parseddesc;
2036 // XXX Check this again!
2037 MCODECHECK((i << 1) + 64); // PPC
2039 // Copy arguments to registers or stack location.
2040 for (i = i - 1; i >= 0; i--) {
2041 var = VAR(iptr->sx.s23.s2.args[i]);
2042 d = md->params[i].regoff;
2044 // Already pre-allocated?
2045 if (var->flags & PREALLOC)
2048 if (!md->params[i].inmemory) {
2049 switch (var->type) {
2052 #if defined(ENABLE_SOFTFLOAT)
2055 s1 = emit_load(jd, iptr, var, d);
2056 emit_imove(cd, s1, d);
2060 #if defined(ENABLE_SOFTFLOAT)
2063 s1 = emit_load(jd, iptr, var, d);
2064 emit_lmove(cd, s1, d);
2067 #if !defined(ENABLE_SOFTFLOAT)
2069 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2070 s1 = emit_load(jd, iptr, var, d);
2071 emit_fmove(cd, s1, d);
2073 s1 = emit_load(jd, iptr, var, REG_FTMP1);
2079 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2080 s1 = emit_load(jd, iptr, var, d);
2081 emit_dmove(cd, s1, d);
2083 s1 = emit_load(jd, iptr, var, REG_FTMP1);
2091 switch (var->type) {
2093 s1 = emit_load(jd, iptr, var, REG_ITMP1);
2094 // XXX M68K: This should actually be like this:
2095 // s1 = emit_load(jd, iptr, var, REG_ATMP1);
2096 // XXX Sparc64: Here this actually was:
2097 // M_STX(s1, REG_SP, JITSTACK + d);
2098 M_AST(s1, REG_SP, d);
2102 #if defined(ENABLE_SOFTFLOAT)
2105 #if SIZEOF_VOID_P == 4
2106 s1 = emit_load(jd, iptr, var, REG_ITMP1);
2107 M_IST(s1, REG_SP, d);
2114 #if defined(ENABLE_SOFTFLOAT)
2117 s1 = emit_load(jd, iptr, var, REG_LTMP12);
2118 // XXX Sparc64: Here this actually was:
2119 // M_STX(s1, REG_SP, JITSTACK + d);
2120 M_LST(s1, REG_SP, d);
2123 #if !defined(ENABLE_SOFTFLOAT)
2125 #if SIZEOF_VOID_P == 4
2126 s1 = emit_load(jd, iptr, var, REG_FTMP1);
2127 M_FST(s1, REG_SP, d);
2134 s1 = emit_load(jd, iptr, var, REG_FTMP1);
2135 // XXX Sparc64: Here this actually was:
2136 // M_DST(s1, REG_SP, JITSTACK + d);
2137 M_DST(s1, REG_SP, d);
2144 // Generate method profiling code.
2147 // Generate architecture specific instructions.
2148 codegen_emit_instruction(jd, iptr);
2150 // Generate method profiling code.
2151 PROFILE_CYCLE_START;
2153 // Store size of call code in replacement point.
2154 REPLACEMENT_POINT_INVOKE_RETURN(cd, iptr);
2155 REPLACEMENT_POINT_FORGC_BUILTIN_RETURN(cd, iptr);
2157 // Recompute the procedure vector (PV).
2158 emit_recompute_pv(cd);
2160 // Store return value.
2161 #if defined(ENABLE_SSA)
2162 if ((ls == NULL) /* || (!IS_TEMPVAR_INDEX(iptr->dst.varindex)) */ ||
2163 (ls->lifetime[iptr->dst.varindex].type != UNUSED))
2164 /* a "living" stackslot */
2166 switch (md->returntype.type) {
2169 #if defined(ENABLE_SOFTFLOAT)
2172 s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
2173 // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2174 emit_imove(cd, REG_RESULT, s1);
2175 emit_store_dst(jd, iptr, s1);
2179 #if defined(ENABLE_SOFTFLOAT)
2182 s1 = codegen_reg_of_dst(jd, iptr, REG_LRESULT);
2183 // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2184 emit_lmove(cd, REG_LRESULT, s1);
2185 emit_store_dst(jd, iptr, s1);
2188 #if !defined(ENABLE_SOFTFLOAT)
2190 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2191 s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2192 emit_fmove(cd, REG_FRESULT, s1);
2194 s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2195 M_CAST_I2F(REG_RESULT, s1);
2197 emit_store_dst(jd, iptr, s1);
2201 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2202 s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2203 emit_dmove(cd, REG_FRESULT, s1);
2205 s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2206 M_CAST_L2D(REG_LRESULT, s1);
2208 emit_store_dst(jd, iptr, s1);
2216 // If we are emitting a fast-path block, this is the label for
2217 // successful fast-path execution.
2218 if ((iptr->opc == ICMD_BUILTIN) && (bte->emit_fastpath != NULL)) {
2219 emit_label(cd, BRANCH_LABEL_10);
2224 case ICMD_TABLESWITCH: /* ..., index ==> ... */
2226 // Generate architecture specific instructions.
2227 codegen_emit_instruction(jd, iptr);
2230 case ICMD_LOOKUPSWITCH: /* ..., key ==> ... */
2232 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2233 i = iptr->sx.s23.s2.lookupcount;
2235 // XXX Again we need to check this
2236 MCODECHECK((i<<2)+8); // Alpha, ARM, i386, MIPS, M68K, Sparc64
2237 MCODECHECK((i<<3)+8); // PPC64
2238 MCODECHECK(8 + ((7 + 6) * i) + 5); // X86_64, S390
2241 for (lookup_target_t* lookup = iptr->dst.lookup; i > 0; ++lookup, --i) {
2242 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2243 emit_icmp_imm(cd, s1, lookup->value);
2244 emit_beq(cd, lookup->target.block);
2245 #elif SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
2246 ICONST(REG_ITMP2, lookup->value);
2247 emit_beq(cd, lookup->target.block, s1, REG_ITMP2);
2248 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2249 emit_icmpeq_imm(cd, s1, lookup->value, REG_ITMP2);
2250 emit_bnez(cd, lookup->target.block, REG_ITMP2);
2252 # error Unable to generate code for this configuration!
2257 emit_br(cd, iptr->sx.s23.s3.lookupdefault.block);
2261 case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
2262 case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
2263 case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
2265 // Generate architecture specific instructions.
2266 codegen_emit_instruction(jd, iptr);
2270 exceptions_throw_internalerror("Unknown ICMD %d during code generation",
2276 } // for all instructions
2278 #if defined(ENABLE_SSA)
2279 // By edge splitting, in blocks with phi moves there can only
2280 // be a goto as last command, no other jump/branch command.
2282 if (!last_cmd_was_goto)
2283 codegen_emit_phi_moves(jd, bptr);
2287 #if defined(__I386__) || defined(__M68K__) || defined(__MIPS__) || defined(__S390__) || defined(__SPARC_64__) || defined(__X86_64__)
2289 /* XXX require a lower number? */
2290 MCODECHECK(64); // I386, MIPS, Sparc64
2291 MCODECHECK(512); // S390, X86_64
2293 /* XXX We can remove that when we don't use UD2 anymore on i386
2296 /* At the end of a basic block we may have to append some nops,
2297 because the patcher stub calling code might be longer than the
2298 actual instruction. So codepatching does not change the
2299 following block unintentionally. */
2301 if (cd->mcodeptr < cd->lastmcodeptr) {
2302 while (cd->mcodeptr < cd->lastmcodeptr) {
2308 if (bptr->next && bptr->next->type == BBTYPE_EXH)
2309 fixup_exc_handler_interface(jd, bptr->next);
2311 } // for all basic blocks
2314 emit_patcher_traps(jd);
2321 /* codegen_emit_phi_moves ****************************************************
2323 Emits phi moves at the end of the basicblock.
2325 *******************************************************************************/
2327 #if defined(ENABLE_SSA)
2328 void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
2341 /* Moves from phi functions with highest indices have to be */
2342 /* inserted first, since this is the order as is used for */
2343 /* conflict resolution */
2345 for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
2346 lt_d = ls->phi_moves[bptr->nr][i][0];
2347 lt_s = ls->phi_moves[bptr->nr][i][1];
2348 #if defined(SSA_DEBUG_VERBOSE)
2350 printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
2352 if (lt_s == UNUSED) {
2353 #if defined(SSA_DEBUG_VERBOSE)
2355 printf(" ... not processed \n");
2360 d = VAR(ls->lifetime[lt_d].v_index);
2361 s = VAR(ls->lifetime[lt_s].v_index);
2364 if (d->type == -1) {
2365 #if defined(SSA_DEBUG_VERBOSE)
2367 printf("...returning - phi lifetimes where joined\n");
2372 if (s->type == -1) {
2373 #if defined(SSA_DEBUG_VERBOSE)
2375 printf("...returning - phi lifetimes where joined\n");
2381 tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
2382 tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
2383 emit_copy(jd, &tmp_i);
2385 #if defined(SSA_DEBUG_VERBOSE)
2386 if (compileverbose) {
2387 if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
2389 printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2391 else if (IS_INMEMORY(s->flags)) {
2393 printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2395 else if (IS_INMEMORY(d->flags)) {
2397 printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2401 printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2405 #endif /* defined(SSA_DEBUG_VERBOSE) */
2408 #endif /* defined(ENABLE_SSA) */
2411 /* REMOVEME When we have exception handling in C. */
2413 void *md_asm_codegen_get_pv_from_pc(void *ra)
2415 return md_codegen_get_pv_from_pc(ra);
2420 * These are local overrides for various environment variables in Emacs.
2421 * Please do not remove this and leave it at the end of the file, where
2422 * Emacs will automagically detect them.
2423 * ---------------------------------------------------------------------
2426 * indent-tabs-mode: t
2430 * vim:noexpandtab:sw=4:ts=4: