* src/vm/jit/codegen-common.cpp (codegen_emit): Fixed temporary registers in
[cacao.git] / src / vm / jit / codegen-common.cpp
1 /* src/vm/jit/codegen-common.cpp - architecture independent code generator stuff
2
3    Copyright (C) 1996-2005, 2006, 2007, 2008, 2009
4    CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5    Copyright (C) 2009 Theobroma Systems Ltd.
6
7    This file is part of CACAO.
8
9    This program is free software; you can redistribute it and/or
10    modify it under the terms of the GNU General Public License as
11    published by the Free Software Foundation; either version 2, or (at
12    your option) any later version.
13
14    This program is distributed in the hope that it will be useful, but
15    WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17    General Public License for more details.
18
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22    02110-1301, USA.
23
24    All functions assume the following code area / data area layout:
25
26    +-----------+
27    |           |
28    | code area | code area grows to higher addresses
29    |           |
30    +-----------+ <-- start of procedure
31    |           |
32    | data area | data area grows to lower addresses
33    |           |
34    +-----------+
35
36    The functions first write into a temporary code/data area allocated by
37    "codegen_init". "codegen_finish" copies the code and data area into permanent
38    memory. All functions writing values into the data area return the offset
39    relative the begin of the code area (start of procedure).    
40
41 */
42
43
44 #include "config.h"
45
46 #include <assert.h>
47 #include <string.h>
48
49 #include "vm/types.h"
50
51 #include "codegen.h"
52 #include "md.h"
53 #include "md-abi.h"
54
55 #include "mm/memory.hpp"
56
57 #include "toolbox/avl.h"
58 #include "toolbox/list.hpp"
59 #include "toolbox/logging.hpp"
60
61 #include "native/llni.h"
62 #include "native/localref.hpp"
63 #include "native/native.hpp"
64
65 #include "threads/thread.hpp"
66
67 #include "vm/jit/builtin.hpp"
68 #include "vm/exceptions.hpp"
69 #include "vm/method.hpp"
70 #include "vm/options.h"
71 #include "vm/statistics.h"
72 #include "vm/string.hpp"
73
74 #include "vm/jit/abi.h"
75 #include "vm/jit/asmpart.h"
76 #include "vm/jit/code.hpp"
77 #include "vm/jit/codegen-common.hpp"
78
79 #if defined(ENABLE_DISASSEMBLER)
80 # include "vm/jit/disass.h"
81 #endif
82
83 #include "vm/jit/dseg.h"
84 #include "vm/jit/emit-common.hpp"
85 #include "vm/jit/jit.hpp"
86 #include "vm/jit/linenumbertable.hpp"
87 #include "vm/jit/methodheader.h"
88 #include "vm/jit/methodtree.h"
89 #include "vm/jit/patcher-common.hpp"
90 #include "vm/jit/replace.hpp"
91 #include "vm/jit/show.hpp"
92 #include "vm/jit/stacktrace.hpp"
93 #include "vm/jit/trace.hpp"
94
95 #include "vm/jit/optimizing/profile.hpp"
96
97 #if defined(ENABLE_SSA)
98 # include "vm/jit/optimizing/lsra.h"
99 # include "vm/jit/optimizing/ssa.h"
100 #elif defined(ENABLE_LSRA)
101 # include "vm/jit/allocator/lsra.h"
102 #endif
103
104 #if defined(ENABLE_INTRP)
105 #include "vm/jit/intrp/intrp.h"
106 #endif
107
108 #if defined(ENABLE_VMLOG)
109 #include <vmlog_cacao.h>
110 #endif
111
112
113 /* codegen_init ****************************************************************
114
115    TODO
116
117 *******************************************************************************/
118
119 void codegen_init(void)
120 {
121 }
122
123
124 /* codegen_setup ***************************************************************
125
126    Allocates and initialises code area, data area and references.
127
128 *******************************************************************************/
129
130 void codegen_setup(jitdata *jd)
131 {
132         methodinfo  *m;
133         codegendata *cd;
134
135         /* get required compiler data */
136
137         m  = jd->m;
138         cd = jd->cd;
139
140         /* initialize members */
141
142         // Set flags as requested.
143         if (opt_AlwaysEmitLongBranches) {
144                 cd->flags = CODEGENDATA_FLAG_LONGBRANCHES;
145         }
146         else {
147                 cd->flags = 0;
148         }
149
150         cd->mcodebase    = (u1*) DumpMemory::allocate(MCODEINITSIZE);
151         cd->mcodeend     = cd->mcodebase + MCODEINITSIZE;
152         cd->mcodesize    = MCODEINITSIZE;
153
154         /* initialize mcode variables */
155
156         cd->mcodeptr     = cd->mcodebase;
157         cd->lastmcodeptr = cd->mcodebase;
158
159 #if defined(ENABLE_INTRP)
160         /* native dynamic superinstructions variables */
161
162         if (opt_intrp) {
163                 cd->ncodebase = (u1*) DumpMemory::allocate(NCODEINITSIZE);
164                 cd->ncodesize = NCODEINITSIZE;
165
166                 /* initialize ncode variables */
167         
168                 cd->ncodeptr = cd->ncodebase;
169
170                 cd->lastinstwithoutdispatch = ~0; /* no inst without dispatch */
171                 cd->superstarts = NULL;
172         }
173 #endif
174
175         cd->dseg           = NULL;
176         cd->dseglen        = 0;
177
178         cd->jumpreferences = NULL;
179
180 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
181         cd->datareferences = NULL;
182 #endif
183
184         cd->brancheslabel  = new DumpList<branch_label_ref_t*>();
185         cd->linenumbers    = new DumpList<Linenumber>();
186 }
187
188
189 /* codegen_reset ***************************************************************
190
191    Resets the codegen data structure so we can recompile the method.
192
193 *******************************************************************************/
194
195 static void codegen_reset(jitdata *jd)
196 {
197         codeinfo    *code;
198         codegendata *cd;
199         basicblock  *bptr;
200
201         /* get required compiler data */
202
203         code = jd->code;
204         cd   = jd->cd;
205
206         /* reset error flag */
207
208         cd->flags          &= ~CODEGENDATA_FLAG_ERROR;
209
210         /* reset some members, we reuse the code memory already allocated
211            as this should have almost the correct size */
212
213         cd->mcodeptr        = cd->mcodebase;
214         cd->lastmcodeptr    = cd->mcodebase;
215
216         cd->dseg            = NULL;
217         cd->dseglen         = 0;
218
219         cd->jumpreferences  = NULL;
220
221 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
222         cd->datareferences  = NULL;
223 #endif
224
225         cd->brancheslabel   = new DumpList<branch_label_ref_t*>();
226         cd->linenumbers     = new DumpList<Linenumber>();
227         
228         /* We need to clear the mpc and the branch references from all
229            basic blocks as they will definitely change. */
230
231         for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
232                 bptr->mpc        = -1;
233                 bptr->branchrefs = NULL;
234         }
235
236         /* We need to clear all the patcher references from the codeinfo
237            since they all will be regenerated */
238
239         patcher_list_reset(code);
240
241 #if defined(ENABLE_REPLACEMENT)
242         code->rplpoints     = NULL;
243         code->rplpointcount = 0;
244         code->regalloc      = NULL;
245         code->regalloccount = 0;
246         code->globalcount   = 0;
247 #endif
248 }
249
250
251 /* codegen_generate ************************************************************
252
253    Generates the code for the currently compiled method.
254
255 *******************************************************************************/
256
257 bool codegen_generate(jitdata *jd)
258 {
259         codegendata *cd;
260
261         /* get required compiler data */
262
263         cd = jd->cd;
264
265         /* call the machine-dependent code generation function */
266
267         if (!codegen_emit(jd))
268                 return false;
269
270         /* check for an error */
271
272         if (CODEGENDATA_HAS_FLAG_ERROR(cd)) {
273                 /* check for long-branches flag, if it is set we recompile the
274                    method */
275
276 #if !defined(NDEBUG)
277         if (compileverbose)
278             log_message_method("Re-generating code: ", jd->m);
279 #endif
280
281                 /* XXX maybe we should tag long-branches-methods for recompilation */
282
283                 if (CODEGENDATA_HAS_FLAG_LONGBRANCHES(cd)) {
284                         /* we have to reset the codegendata structure first */
285
286                         codegen_reset(jd);
287
288                         /* and restart the compiler run */
289
290                         if (!codegen_emit(jd))
291                                 return false;
292                 }
293                 else {
294                         os::abort("codegen_generate: unknown error occurred during codegen_emit: flags=%x\n", cd->flags);
295                 }
296
297 #if !defined(NDEBUG)
298         if (compileverbose)
299             log_message_method("Re-generating code done: ", jd->m);
300 #endif
301         }
302
303         /* reallocate the memory and finish the code generation */
304
305         codegen_finish(jd);
306
307         /* everything's ok */
308
309         return true;
310 }
311
312
313 /* codegen_close ***************************************************************
314
315    TODO
316
317 *******************************************************************************/
318
319 void codegen_close(void)
320 {
321         /* TODO: release avl tree on i386 and x86_64 */
322 }
323
324
325 /* codegen_increase ************************************************************
326
327    Doubles code area.
328
329 *******************************************************************************/
330
331 void codegen_increase(codegendata *cd)
332 {
333         u1 *oldmcodebase;
334
335         /* save old mcodebase pointer */
336
337         oldmcodebase = cd->mcodebase;
338
339         /* reallocate to new, doubled memory */
340
341         cd->mcodebase = (u1*) DumpMemory::reallocate(cd->mcodebase,
342                                                                                                  cd->mcodesize,
343                                                                                                  cd->mcodesize * 2);
344         cd->mcodesize *= 2;
345         cd->mcodeend   = cd->mcodebase + cd->mcodesize;
346
347         /* set new mcodeptr */
348
349         cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
350
351 #if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(__M68K__) || defined(ENABLE_INTRP) \
352  || defined(__SPARC_64__)
353         /* adjust the pointer to the last patcher position */
354
355         if (cd->lastmcodeptr != NULL)
356                 cd->lastmcodeptr = cd->mcodebase + (cd->lastmcodeptr - oldmcodebase);
357 #endif
358 }
359
360
361 /* codegen_ncode_increase ******************************************************
362
363    Doubles code area.
364
365 *******************************************************************************/
366
367 #if defined(ENABLE_INTRP)
368 u1 *codegen_ncode_increase(codegendata *cd, u1 *ncodeptr)
369 {
370         u1 *oldncodebase;
371
372         /* save old ncodebase pointer */
373
374         oldncodebase = cd->ncodebase;
375
376         /* reallocate to new, doubled memory */
377
378         cd->ncodebase = DMREALLOC(cd->ncodebase,
379                                                           u1,
380                                                           cd->ncodesize,
381                                                           cd->ncodesize * 2);
382         cd->ncodesize *= 2;
383
384         /* return the new ncodeptr */
385
386         return (cd->ncodebase + (ncodeptr - oldncodebase));
387 }
388 #endif
389
390
391 /* codegen_add_branch_ref ******************************************************
392
393    Prepends an branch to the list.
394
395 *******************************************************************************/
396
397 void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
398 {
399         branchref *br;
400         s4         branchmpc;
401
402         STATISTICS(count_branches_unresolved++);
403
404         /* calculate the mpc of the branch instruction */
405
406         branchmpc = cd->mcodeptr - cd->mcodebase;
407
408         br = (branchref*) DumpMemory::allocate(sizeof(branchref));
409
410         br->branchmpc = branchmpc;
411         br->condition = condition;
412         br->reg       = reg;
413         br->options   = options;
414         br->next      = target->branchrefs;
415
416         target->branchrefs = br;
417 }
418
419
420 /* codegen_resolve_branchrefs **************************************************
421
422    Resolves and patches the branch references of a given basic block.
423
424 *******************************************************************************/
425
426 void codegen_resolve_branchrefs(codegendata *cd, basicblock *bptr)
427 {
428         branchref *br;
429         u1        *mcodeptr;
430
431         /* Save the mcodeptr because in the branch emitting functions
432            we generate code somewhere inside already generated code,
433            but we're still in the actual code generation phase. */
434
435         mcodeptr = cd->mcodeptr;
436
437         /* just to make sure */
438
439         assert(bptr->mpc >= 0);
440
441         for (br = bptr->branchrefs; br != NULL; br = br->next) {
442                 /* temporary set the mcodeptr */
443
444                 cd->mcodeptr = cd->mcodebase + br->branchmpc;
445
446                 /* emit_bccz and emit_branch emit the correct code, even if we
447                    pass condition == BRANCH_UNCONDITIONAL or reg == -1. */
448
449                 emit_bccz(cd, bptr, br->condition, br->reg, br->options);
450         }
451
452         /* restore mcodeptr */
453
454         cd->mcodeptr = mcodeptr;
455 }
456
457
458 /* codegen_branch_label_add ****************************************************
459
460    Append an branch to the label-branch list.
461
462 *******************************************************************************/
463
464 void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
465 {
466         // Calculate the current mpc.
467         int32_t mpc = cd->mcodeptr - cd->mcodebase;
468
469         branch_label_ref_t* br = (branch_label_ref_t*) DumpMemory::allocate(sizeof(branch_label_ref_t));
470
471         br->mpc       = mpc;
472         br->label     = label;
473         br->condition = condition;
474         br->reg       = reg;
475         br->options   = options;
476
477         // Add the branch to the list.
478         cd->brancheslabel->push_back(br);
479 }
480
481
482 /* codegen_set_replacement_point_notrap ****************************************
483
484    Record the position of a non-trappable replacement point.
485
486 *******************************************************************************/
487
488 #if defined(ENABLE_REPLACEMENT)
489 #if !defined(NDEBUG)
490 void codegen_set_replacement_point_notrap(codegendata *cd, s4 type)
491 #else
492 void codegen_set_replacement_point_notrap(codegendata *cd)
493 #endif
494 {
495         assert(cd->replacementpoint);
496         assert(cd->replacementpoint->type == type);
497         assert(cd->replacementpoint->flags & RPLPOINT_FLAG_NOTRAP);
498
499         cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
500
501         cd->replacementpoint++;
502 }
503 #endif /* defined(ENABLE_REPLACEMENT) */
504
505
506 /* codegen_set_replacement_point ***********************************************
507
508    Record the position of a trappable replacement point.
509
510 *******************************************************************************/
511
512 #if defined(ENABLE_REPLACEMENT)
513 #if !defined(NDEBUG)
514 void codegen_set_replacement_point(codegendata *cd, s4 type)
515 #else
516 void codegen_set_replacement_point(codegendata *cd)
517 #endif
518 {
519         assert(cd->replacementpoint);
520         assert(cd->replacementpoint->type == type);
521         assert(!(cd->replacementpoint->flags & RPLPOINT_FLAG_NOTRAP));
522
523         cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
524
525         cd->replacementpoint++;
526
527 #if !defined(NDEBUG)
528         /* XXX actually we should use an own REPLACEMENT_NOPS here! */
529         if (opt_TestReplacement)
530                 PATCHER_NOPS;
531 #endif
532
533         /* XXX assert(cd->lastmcodeptr <= cd->mcodeptr); */
534
535         cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
536 }
537 #endif /* defined(ENABLE_REPLACEMENT) */
538
539
540 /* codegen_finish **************************************************************
541
542    Finishes the code generation. A new memory, large enough for both
543    data and code, is allocated and data and code are copied together
544    to their final layout, unresolved jumps are resolved, ...
545
546 *******************************************************************************/
547
548 void codegen_finish(jitdata *jd)
549 {
550         s4       mcodelen;
551 #if defined(ENABLE_INTRP)
552         s4       ncodelen;
553 #endif
554         s4       alignedmcodelen;
555         jumpref *jr;
556         u1      *epoint;
557         s4       alignedlen;
558
559         /* Get required compiler data. */
560
561         codeinfo*     code = jd->code;
562         codegendata*  cd   = jd->cd;
563         registerdata* rd   = jd->rd;
564
565         /* prevent compiler warning */
566
567 #if defined(ENABLE_INTRP)
568         ncodelen = 0;
569 #endif
570
571         /* calculate the code length */
572
573         mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
574
575 #if defined(ENABLE_STATISTICS)
576         if (opt_stat) {
577                 count_code_len += mcodelen;
578                 count_data_len += cd->dseglen;
579         }
580 #endif
581
582         alignedmcodelen = MEMORY_ALIGN(mcodelen, MAX_ALIGN);
583
584 #if defined(ENABLE_INTRP)
585         if (opt_intrp)
586                 ncodelen = cd->ncodeptr - cd->ncodebase;
587         else {
588                 ncodelen = 0; /* avoid compiler warning */
589         }
590 #endif
591
592         cd->dseglen = MEMORY_ALIGN(cd->dseglen, MAX_ALIGN);
593         alignedlen = alignedmcodelen + cd->dseglen;
594
595 #if defined(ENABLE_INTRP)
596         if (opt_intrp) {
597                 alignedlen += ncodelen;
598         }
599 #endif
600
601         /* allocate new memory */
602
603         code->mcodelength = mcodelen + cd->dseglen;
604         code->mcode       = CNEW(u1, alignedlen);
605
606         /* set the entrypoint of the method */
607         
608         assert(code->entrypoint == NULL);
609         code->entrypoint = epoint = (code->mcode + cd->dseglen);
610
611         /* fill the data segment (code->entrypoint must already be set!) */
612
613         dseg_finish(jd);
614
615         /* copy code to the new location */
616
617         MCOPY((void *) code->entrypoint, cd->mcodebase, u1, mcodelen);
618
619 #if defined(ENABLE_INTRP)
620         /* relocate native dynamic superinstruction code (if any) */
621
622         if (opt_intrp) {
623                 cd->mcodebase = code->entrypoint;
624
625                 if (ncodelen > 0) {
626                         u1 *ncodebase = code->mcode + cd->dseglen + alignedmcodelen;
627
628                         MCOPY((void *) ncodebase, cd->ncodebase, u1, ncodelen);
629
630                         /* flush the instruction and data caches */
631
632                         md_cacheflush(ncodebase, ncodelen);
633
634                         /* set some cd variables for dynamic_super_rerwite */
635
636                         cd->ncodebase = ncodebase;
637
638                 } else {
639                         cd->ncodebase = NULL;
640                 }
641
642                 dynamic_super_rewrite(cd);
643         }
644 #endif
645
646         /* Fill runtime information about generated code. */
647
648         code->stackframesize     = cd->stackframesize;
649         code->synchronizedoffset = rd->memuse * 8;
650         code->savedintcount      = INT_SAV_CNT - rd->savintreguse;
651         code->savedfltcount      = FLT_SAV_CNT - rd->savfltreguse;
652 #if defined(HAS_ADDRESS_REGISTER_FILE)
653         code->savedadrcount      = ADR_SAV_CNT - rd->savadrreguse;
654 #endif
655
656         /* Create the exception table. */
657
658         exceptiontable_create(jd);
659
660         /* Create the linenumber table. */
661
662         code->linenumbertable = new LinenumberTable(jd);
663
664         /* jump table resolving */
665
666         for (jr = cd->jumpreferences; jr != NULL; jr = jr->next)
667                 *((functionptr *) ((ptrint) epoint + jr->tablepos)) =
668                         (functionptr) ((ptrint) epoint + (ptrint) jr->target->mpc);
669
670         /* patcher resolving */
671
672         patcher_resolve(jd);
673
674 #if defined(ENABLE_REPLACEMENT)
675         /* replacement point resolving */
676         {
677                 int i;
678                 rplpoint *rp;
679
680                 rp = code->rplpoints;
681                 for (i=0; i<code->rplpointcount; ++i, ++rp) {
682                         rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
683                 }
684         }
685 #endif /* defined(ENABLE_REPLACEMENT) */
686
687         /* Insert method into methodtree to find the entrypoint. */
688
689         methodtree_insert(code->entrypoint, code->entrypoint + mcodelen);
690
691 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
692         /* resolve data segment references */
693
694         dseg_resolve_datareferences(jd);
695 #endif
696
697         /* flush the instruction and data caches */
698
699         md_cacheflush(code->mcode, code->mcodelength);
700 }
701
702
703 /* codegen_start_native_call ***************************************************
704
705    Prepares the stuff required for a native (JNI) function call:
706
707    - adds a stackframe info structure to the chain, for stacktraces
708    - prepares the local references table on the stack
709
710    The layout of the native stub stackframe should look like this:
711
712    +---------------------------+ <- java SP (of parent Java function)
713    | return address            |
714    +---------------------------+ <- data SP
715    |                           |
716    | stackframe info structure |
717    |                           |
718    +---------------------------+
719    |                           |
720    | local references table    |
721    |                           |
722    +---------------------------+
723    |                           |
724    | saved registers (if any)  |
725    |                           |
726    +---------------------------+
727    |                           |
728    | arguments (if any)        |
729    |                           |
730    +---------------------------+ <- current SP (native stub)
731
732 *******************************************************************************/
733
734 java_handle_t *codegen_start_native_call(u1 *sp, u1 *pv)
735 {
736         stackframeinfo_t *sfi;
737         localref_table   *lrt;
738         codeinfo         *code;
739         methodinfo       *m;
740         int32_t           framesize;
741
742         uint8_t  *datasp;
743         uint8_t  *javasp;
744         uint64_t *arg_regs;
745         uint64_t *arg_stack;
746
747         STATISTICS(count_calls_java_to_native++);
748
749         // Get information from method header.
750         code = code_get_codeinfo_for_pv(pv);
751         assert(code != NULL);
752
753         framesize = md_stacktrace_get_framesize(code);
754         assert(framesize >= (int32_t) (sizeof(stackframeinfo_t) + sizeof(localref_table)));
755
756         // Get the methodinfo.
757         m = code_get_methodinfo_for_pv(pv);
758         assert(m);
759
760         /* calculate needed values */
761
762 #if defined(__ALPHA__) || defined(__ARM__)
763         datasp    = sp + framesize - SIZEOF_VOID_P;
764         javasp    = sp + framesize;
765         arg_regs  = (uint64_t *) sp;
766         arg_stack = (uint64_t *) javasp;
767 #elif defined(__MIPS__)
768         /* MIPS always uses 8 bytes to store the RA */
769         datasp    = sp + framesize - 8;
770         javasp    = sp + framesize;
771 # if SIZEOF_VOID_P == 8
772         arg_regs  = (uint64_t *) sp;
773 # else
774         arg_regs  = (uint64_t *) (sp + 5 * 8);
775 # endif
776         arg_stack = (uint64_t *) javasp;
777 #elif defined(__S390__)
778         datasp    = sp + framesize - 8;
779         javasp    = sp + framesize;
780         arg_regs  = (uint64_t *) (sp + 96);
781         arg_stack = (uint64_t *) javasp;
782 #elif defined(__I386__) || defined(__M68K__) || defined(__X86_64__)
783         datasp    = sp + framesize;
784         javasp    = sp + framesize + SIZEOF_VOID_P;
785         arg_regs  = (uint64_t *) sp;
786         arg_stack = (uint64_t *) javasp;
787 #elif defined(__POWERPC__)
788         datasp    = sp + framesize;
789         javasp    = sp + framesize;
790         arg_regs  = (uint64_t *) (sp + LA_SIZE + 4 * SIZEOF_VOID_P);
791         arg_stack = (uint64_t *) javasp;
792 #elif defined(__POWERPC64__)
793         datasp    = sp + framesize;
794         javasp    = sp + framesize;
795         arg_regs  = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 4 * SIZEOF_VOID_P);
796         arg_stack = (uint64_t *) javasp;
797 #else
798         /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
799         /* XXX maybe we need to pass the RA as argument there */
800         os::abort("codegen_start_native_call: unsupported architecture");
801 #endif
802
803         /* get data structures from stack */
804
805         sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
806         lrt = (localref_table *)   (datasp - sizeof(stackframeinfo_t) - 
807                                                                 sizeof(localref_table));
808
809 #if defined(ENABLE_JNI)
810         /* add current JNI local references table to this thread */
811
812         localref_table_add(lrt);
813 #endif
814
815 #if !defined(NDEBUG)
816 # if defined(__ALPHA__) || defined(__I386__) || defined(__M68K__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
817         /* print the call-trace if necesarry */
818         /* BEFORE: filling the local reference table */
819
820         if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
821                 trace_java_call_enter(m, arg_regs, arg_stack);
822 # endif
823 #endif
824
825 #if defined(ENABLE_HANDLES)
826         /* place all references into the local reference table */
827         /* BEFORE: creating stackframeinfo */
828
829         localref_native_enter(m, arg_regs, arg_stack);
830 #endif
831
832         /* Add a stackframeinfo for this native method.  We don't have RA
833            and XPC here.  These are determined in
834            stacktrace_stackframeinfo_add. */
835
836         stacktrace_stackframeinfo_add(sfi, pv, sp, NULL, NULL);
837
838         /* Return a wrapped classinfo for static methods. */
839
840         if (m->flags & ACC_STATIC)
841                 return (java_handle_t *) LLNI_classinfo_wrap(m->clazz);
842         else
843                 return NULL;
844 }
845
846
847 /* codegen_finish_native_call **************************************************
848
849    Removes the stuff required for a native (JNI) function call.
850    Additionally it checks for an exceptions and in case, get the
851    exception object and clear the pointer.
852
853 *******************************************************************************/
854
855 java_object_t *codegen_finish_native_call(u1 *sp, u1 *pv)
856 {
857         stackframeinfo_t *sfi;
858         java_handle_t    *e;
859         java_object_t    *o;
860         codeinfo         *code;
861         methodinfo       *m;
862         int32_t           framesize;
863
864         uint8_t  *datasp;
865         uint64_t *ret_regs;
866
867         // Get information from method header.
868         code = code_get_codeinfo_for_pv(pv);
869         assert(code != NULL);
870
871         framesize = md_stacktrace_get_framesize(code);
872
873         // Get the methodinfo.
874         m = code->m;
875         assert(m != NULL);
876
877         /* calculate needed values */
878
879 #if defined(__ALPHA__) || defined(__ARM__)
880         datasp   = sp + framesize - SIZEOF_VOID_P;
881         ret_regs = (uint64_t *) sp;
882 #elif defined(__MIPS__)
883         /* MIPS always uses 8 bytes to store the RA */
884         datasp   = sp + framesize - 8;
885 # if SIZEOF_VOID_P == 8
886         ret_regs = (uint64_t *) sp;
887 # else
888         ret_regs = (uint64_t *) (sp + 1 * 8);
889 # endif
890 #elif defined(__S390__)
891         datasp   = sp + framesize - 8;
892         ret_regs = (uint64_t *) (sp + 96);
893 #elif defined(__I386__)
894         datasp   = sp + framesize;
895         ret_regs = (uint64_t *) (sp + 2 * SIZEOF_VOID_P);
896 #elif defined(__M68K__)
897         datasp   = sp + framesize;
898         ret_regs = (uint64_t *) (sp + 2 * 8);
899 #elif defined(__X86_64__)
900         datasp   = sp + framesize;
901         ret_regs = (uint64_t *) sp;
902 #elif defined(__POWERPC__)
903         datasp   = sp + framesize;
904         ret_regs = (uint64_t *) (sp + LA_SIZE + 2 * SIZEOF_VOID_P);
905 #elif defined(__POWERPC64__)
906         datasp   = sp + framesize;
907         ret_regs = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 2 * SIZEOF_VOID_P);
908 #else
909         os::abort("codegen_finish_native_call: unsupported architecture");
910 #endif
911
912         /* get data structures from stack */
913
914         sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
915
916         /* Remove current stackframeinfo from chain. */
917
918         stacktrace_stackframeinfo_remove(sfi);
919
920 #if defined(ENABLE_HANDLES)
921         /* unwrap the return value from the local reference table */
922         /* AFTER: removing the stackframeinfo */
923         /* BEFORE: releasing the local reference table */
924
925         localref_native_exit(m, ret_regs);
926 #endif
927
928         /* get and unwrap the exception */
929         /* AFTER: removing the stackframe info */
930         /* BEFORE: releasing the local reference table */
931
932         e = exceptions_get_and_clear_exception();
933         o = LLNI_UNWRAP(e);
934
935 #if defined(ENABLE_JNI)
936         /* release JNI local references table for this thread */
937
938         localref_frame_pop_all();
939         localref_table_remove();
940 #endif
941
942 #if !defined(NDEBUG)
943 # if defined(__ALPHA__) || defined(__I386__) || defined(__M68K__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
944         /* print the call-trace if necesarry */
945         /* AFTER: unwrapping the return value */
946
947         if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
948                 trace_java_call_exit(m, ret_regs);
949 # endif
950 #endif
951
952         return o;
953 }
954
955
956 /* codegen_reg_of_var **********************************************************
957
958    This function determines a register, to which the result of an
959    operation should go, when it is ultimatively intended to store the
960    result in pseudoregister v.  If v is assigned to an actual
961    register, this register will be returned.  Otherwise (when v is
962    spilled) this function returns tempregnum.  If not already done,
963    regoff and flags are set in the stack location.
964
965 *******************************************************************************/
966
967 s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
968 {
969         if (!(v->flags & INMEMORY))
970                 return v->vv.regoff;
971
972         return tempregnum;
973 }
974
975
976 /* codegen_reg_of_dst **********************************************************
977
978    This function determines a register, to which the result of an
979    operation should go, when it is ultimatively intended to store the
980    result in iptr->dst.var.  If dst.var is assigned to an actual
981    register, this register will be returned.  Otherwise (when it is
982    spilled) this function returns tempregnum.  If not already done,
983    regoff and flags are set in the stack location.
984
985 *******************************************************************************/
986
987 s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
988 {
989         return codegen_reg_of_var(iptr->opc, VAROP(iptr->dst), tempregnum);
990 }
991
992
993 /**
994  * Generates machine code.
995  */
996 bool codegen_emit(jitdata *jd)
997 {
998         varinfo*            var;
999         builtintable_entry* bte;
1000         methoddesc*         md;
1001         int32_t             s1, s2, /*s3,*/ d;
1002         int32_t             fieldtype;
1003         int32_t             disp;
1004         int                 i;
1005
1006         // Get required compiler data.
1007         //methodinfo*   m    = jd->m;
1008         codeinfo*     code = jd->code;
1009         codegendata*  cd   = jd->cd;
1010         registerdata* rd   = jd->rd;
1011 #if defined(ENABLE_SSA)
1012         lsradata*     ls   = jd->ls;
1013         bool last_cmd_was_goto = false;
1014 #endif
1015
1016         // Space to save used callee saved registers.
1017         int32_t savedregs_num = 0;
1018         savedregs_num += (INT_SAV_CNT - rd->savintreguse);
1019         savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
1020 #ifdef HAS_ADDRESS_REGISTER_FILE
1021         savedregs_num += (ADR_SAV_CNT - rd->savadrreguse);
1022 #endif
1023
1024         // Calculate size of stackframe.
1025         cd->stackframesize = rd->memuse + savedregs_num;
1026
1027         // Space to save the return address.
1028 #if STACKFRAME_RA_TOP_OF_FRAME
1029 # if STACKFRAME_LEAFMETHODS_RA_REGISTER
1030         if (!code_is_leafmethod(code))
1031 # endif
1032                 cd->stackframesize += 1;
1033 #endif
1034
1035         // Space to save argument of monitor_enter.
1036 #if defined(ENABLE_THREADS)
1037         if (checksync && code_is_synchronized(code))
1038 # if STACKFRAME_SYNC_NEEDS_TWO_SLOTS
1039                 /* On some architectures the stack position for the argument can
1040                    not be shared with place to save the return register values to
1041                    survive monitor_exit since both values reside in the same register. */
1042                 cd->stackframesize += 2;
1043 # else
1044                 cd->stackframesize += 1;
1045 # endif
1046 #endif
1047
1048         // Keep stack of non-leaf functions 16-byte aligned for calls into
1049         // native code.
1050         if (!code_is_leafmethod(code) || JITDATA_HAS_FLAG_VERBOSECALL(jd))
1051 #if STACKFRMAE_RA_BETWEEN_FRAMES
1052                 ALIGN_ODD(cd->stackframesize);
1053 #else
1054                 ALIGN_EVEN(cd->stackframesize);
1055 #endif
1056
1057 #if defined(SPECIALMEMUSE)
1058         // On architectures having a linkage area, we can get rid of the whole
1059         // stackframe in leaf functions without saved registers.
1060         if (code_is_leafmethod(code) && (cd->stackframesize == LA_SIZE_IN_POINTERS))
1061                 cd->stackframesize = 0;
1062 #endif
1063
1064         /*
1065          * SECTION 1: Method header generation.
1066          */
1067
1068         // The method header was reduced to the bare minimum of one pointer
1069         // to the codeinfo structure, which in turn contains all runtime
1070         // information. However this section together with the methodheader.h
1071         // file will be kept alive for historical reasons. It might come in
1072         // handy at some point.
1073
1074         (void) dseg_add_unique_address(cd, code);   ///< CodeinfoPointer
1075
1076         // XXX, REMOVEME: We still need it for exception handling in assembler.
1077         // XXX ARM, M68K: (void) dseg_add_unique_s4(cd, cd->stackframesize);
1078 #if defined(__I386__)
1079         int align_off = (cd->stackframesize != 0) ? 4 : 0;
1080         (void) dseg_add_unique_s4(cd, cd->stackframesize * 8 + align_off); /* FrameSize       */
1081 #else
1082         (void) dseg_add_unique_s4(cd, cd->stackframesize * 8); /* FrameSize       */
1083 #endif
1084         // XXX M68K: We use the IntSave as a split field for the adr now
1085         //           (void) dseg_add_unique_s4(cd, (ADR_SAV_CNT - rd->savadrreguse) << 16 | (INT_SAV_CNT - rd->savintreguse)); /* IntSave */
1086         (void) dseg_add_unique_s4(cd, code_is_leafmethod(code) ? 1 : 0);
1087         (void) dseg_add_unique_s4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
1088         (void) dseg_add_unique_s4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
1089
1090         /*
1091          * SECTION 2: Method prolog generation.
1092          */
1093
1094 #if defined(ENABLE_PROFILING)
1095         // Generate method profiling code.
1096         if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1097
1098                 // Count method frequency.
1099                 emit_profile_method(cd, code);
1100
1101                 // Start CPU cycle counting.
1102                 emit_profile_cycle_start(cd, code);
1103         }
1104 #endif
1105
1106         // Emit code for the method prolog.
1107         codegen_emit_prolog(jd);
1108
1109 #if defined(ENABLE_THREADS)
1110         // Emit code to call monitorenter function.
1111         if (checksync && code_is_synchronized(code))
1112                 emit_monitor_enter(jd, rd->memuse * 8);
1113 #endif
1114
1115 #if !defined(NDEBUG)
1116         // Call trace function.
1117         if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
1118                 emit_verbosecall_enter(jd);
1119 #endif
1120
1121 #if defined(ENABLE_SSA)
1122         // With SSA the header is basicblock 0, insert phi moves if necessary.
1123         if (ls != NULL)
1124                 codegen_emit_phi_moves(jd, ls->basicblocks[0]);
1125 #endif
1126
1127         // Create replacement points.
1128         REPLACEMENT_POINTS_INIT(cd, jd);
1129
1130         /*
1131          * SECTION 3: ICMD code generation.
1132          */
1133
1134         // Walk through all basic blocks.
1135         for (basicblock* bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
1136
1137                 bptr->mpc = (s4) (cd->mcodeptr - cd->mcodebase);
1138
1139                 // Is this basic block reached?
1140                 if (bptr->flags < BBREACHED)
1141                         continue;
1142
1143                 // Branch resolving.
1144                 codegen_resolve_branchrefs(cd, bptr);
1145
1146                 // Handle replacement points.
1147                 REPLACEMENT_POINT_BLOCK_START(cd, bptr);
1148
1149 #if defined(ENABLE_REPLACEMENT) && defined(__I386__)
1150                 // Generate countdown trap code.
1151                 methodinfo* m = jd->m;
1152                 if (bptr->bitflags & BBFLAG_REPLACEMENT) {
1153                         if (cd->replacementpoint[-1].flags & RPLPOINT_FLAG_COUNTDOWN) {
1154                                 MCODECHECK(32);
1155                                 emit_trap_countdown(cd, &(m->hitcountdown));
1156                         }
1157                 }
1158 #endif
1159
1160 #if defined(ENABLE_PROFILING)
1161                 // Generate basicblock profiling code.
1162                 if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1163
1164                         // Count basicblock frequency.
1165                         emit_profile_basicblock(cd, code, bptr);
1166
1167                         // If this is an exception handler, start profiling again.
1168                         if (bptr->type == BBTYPE_EXH)
1169                                 emit_profile_cycle_start(cd, code);
1170                 }
1171 #endif
1172
1173                 // Copy interface registers to their destination.
1174                 int32_t indepth = bptr->indepth;
1175                 // XXX Check if this is true for all archs.
1176                 MCODECHECK(64+indepth);   // All
1177                 MCODECHECK(128+indepth);  // PPC64
1178                 MCODECHECK(512);          // I386, X86_64, S390
1179 #if defined(ENABLE_SSA)
1180                 // XXX Check if this is correct and add a propper comment!
1181                 if (ls != NULL) {
1182                         last_cmd_was_goto = false;
1183                 } else {
1184 #elif defined(ENABLE_LSRA)
1185                 if (opt_lsra) {
1186                         while (indepth > 0) {
1187                                 indepth--;
1188                                 var = VAR(bptr->invars[indepth]);
1189                                 if ((indepth == bptr->indepth-1) && (bptr->type == BBTYPE_EXH)) {
1190                                         if (!IS_INMEMORY(src->flags))
1191                                                 d = var->vv.regoff;
1192                                         else
1193                                                 d = REG_ITMP1_XPTR;
1194                                         // XXX M68K: Actually this is M_ADRMOVE(REG_ATMP1_XPTR, d);
1195                                         // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1196                                         // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1197                                         emit_imove(cd, REG_ITMP1_XPTR, d);
1198                                         emit_store(jd, NULL, var, d);
1199                                 }
1200                         }
1201                 } else {
1202 #endif
1203                         while (indepth > 0) {
1204                                 indepth--;
1205                                 var = VAR(bptr->invars[indepth]);
1206                                 if ((indepth == bptr->indepth-1) && (bptr->type == BBTYPE_EXH)) {
1207                                         d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1208                                         // XXX M68K: Actually this is M_ADRMOVE(REG_ATMP1_XPTR, d);
1209                                         // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1210                                         // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1211                                         emit_imove(cd, REG_ITMP1_XPTR, d);
1212                                         emit_store(jd, NULL, var, d);
1213                                 }
1214                                 else {
1215                                         assert((var->flags & INOUT));
1216                                 }
1217                         }
1218 #if defined(ENABLE_SSA) || defined(ENABLE_LSRA)
1219                 }
1220 #endif
1221
1222                 // Walk through all instructions.
1223                 int32_t len = bptr->icount;
1224                 uint16_t currentline = 0;
1225                 for (instruction* iptr = bptr->iinstr; len > 0; len--, iptr++) {
1226
1227                         // Add line number.
1228                         if (iptr->line != currentline) {
1229                                 linenumbertable_list_entry_add(cd, iptr->line);
1230                                 currentline = iptr->line;
1231                         }
1232
1233                         // An instruction usually needs < 64 words.
1234                         // XXX Check if this is true for all archs.
1235                         MCODECHECK(64);    // All
1236                         MCODECHECK(128);   // PPC64
1237                         MCODECHECK(1024);  // I386, X86_64, M68K, S390      /* 1kB should be enough */
1238
1239                         // The big switch.
1240                         switch (iptr->opc) {
1241
1242                         case ICMD_NOP:        /* ...  ==> ...                             */
1243                         case ICMD_POP:        /* ..., value  ==> ...                      */
1244                         case ICMD_POP2:       /* ..., value, value  ==> ...               */
1245                                 break;
1246
1247                         case ICMD_CHECKNULL:  /* ..., objectref  ==> ..., objectref       */
1248
1249                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1250                                 emit_nullpointer_check(cd, iptr, s1);
1251                                 break;
1252
1253                         case ICMD_BREAKPOINT: /* ...  ==> ...                             */
1254                                               /* sx.val.anyptr = Breakpoint               */
1255
1256                                 patcher_add_patch_ref(jd, PATCHER_breakpoint, iptr->sx.val.anyptr, 0);
1257                                 PATCHER_NOPS;
1258                                 break;
1259
1260 #if defined(ENABLE_SSA)
1261                         case ICMD_GETEXCEPTION:
1262
1263                                 d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1264                                 emit_imove(cd, REG_ITMP1, d);
1265                                 emit_store_dst(jd, iptr, d);
1266                                 break;
1267 #endif
1268
1269                         /* inline operations **********************************************/
1270
1271                         case ICMD_INLINE_START:
1272
1273                                 REPLACEMENT_POINT_INLINE_START(cd, iptr);
1274                                 break;
1275
1276                         case ICMD_INLINE_BODY:
1277
1278                                 REPLACEMENT_POINT_INLINE_BODY(cd, iptr);
1279                                 linenumbertable_list_entry_add_inline_start(cd, iptr);
1280                                 linenumbertable_list_entry_add(cd, iptr->line);
1281                                 break;
1282
1283                         case ICMD_INLINE_END:
1284
1285                                 linenumbertable_list_entry_add_inline_end(cd, iptr);
1286                                 linenumbertable_list_entry_add(cd, iptr->line);
1287                                 break;
1288
1289
1290                         /* constant operations ********************************************/
1291
1292                         case ICMD_ICONST:     /* ...  ==> ..., constant                   */
1293
1294                                 d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1295                                 ICONST(d, iptr->sx.val.i);
1296                                 emit_store_dst(jd, iptr, d);
1297                                 break;
1298
1299                         case ICMD_LCONST:     /* ...  ==> ..., constant                   */
1300
1301                                 d = codegen_reg_of_dst(jd, iptr, REG_LTMP12);
1302                                 LCONST(d, iptr->sx.val.l);
1303                                 emit_store_dst(jd, iptr, d);
1304                                 break;
1305
1306
1307                         /* load/store/copy/move operations ********************************/
1308
1309                         case ICMD_COPY:
1310                         case ICMD_MOVE:
1311                         case ICMD_ILOAD:      /* ...  ==> ..., content of local variable  */
1312                         case ICMD_LLOAD:      /* s1 = local variable                      */
1313                         case ICMD_FLOAD:
1314                         case ICMD_DLOAD:
1315                         case ICMD_ALOAD:
1316                         case ICMD_ISTORE:     /* ..., value  ==> ...                      */
1317                         case ICMD_LSTORE:
1318                         case ICMD_FSTORE:
1319                         case ICMD_DSTORE:
1320
1321                                 emit_copy(jd, iptr);
1322                                 break;
1323
1324                         case ICMD_ASTORE:
1325
1326                                 if (!(iptr->flags.bits & INS_FLAG_RETADDR))
1327                                         emit_copy(jd, iptr);
1328                                 break;
1329
1330
1331                         /* integer operations *********************************************/
1332
1333                         case ICMD_FCONST:     /* ...  ==> ..., constant                   */
1334                         case ICMD_DCONST:     /* ...  ==> ..., constant                   */
1335                         case ICMD_ACONST:     /* ...  ==> ..., constant                   */
1336                         case ICMD_INEG:       /* ..., value  ==> ..., - value             */
1337                         case ICMD_LNEG:       /* ..., value  ==> ..., - value             */
1338                         case ICMD_I2L:        /* ..., value  ==> ..., value               */
1339                         case ICMD_L2I:        /* ..., value  ==> ..., value               */
1340                         case ICMD_INT2BYTE:   /* ..., value  ==> ..., value               */
1341                         case ICMD_INT2CHAR:   /* ..., value  ==> ..., value               */
1342                         case ICMD_INT2SHORT:  /* ..., value  ==> ..., value               */
1343                         case ICMD_IADD:       /* ..., val1, val2  ==> ..., val1 + val2    */
1344                         case ICMD_IINC:
1345                         case ICMD_IADDCONST:  /* ..., value  ==> ..., value + constant    */
1346                                               /* sx.val.i = constant                      */
1347                         case ICMD_LADD:       /* ..., val1, val2  ==> ..., val1 + val2    */
1348                         case ICMD_LADDCONST:  /* ..., value  ==> ..., value + constant    */
1349                                               /* sx.val.l = constant                      */
1350                         case ICMD_ISUB:       /* ..., val1, val2  ==> ..., val1 - val2    */
1351                         case ICMD_ISUBCONST:  /* ..., value  ==> ..., value + constant    */
1352                                               /* sx.val.i = constant                      */
1353                         case ICMD_LSUB:       /* ..., val1, val2  ==> ..., val1 - val2    */
1354                         case ICMD_LSUBCONST:  /* ..., value  ==> ..., value - constant    */
1355                                               /* sx.val.l = constant                      */
1356                         case ICMD_IMUL:       /* ..., val1, val2  ==> ..., val1 * val2    */
1357                         case ICMD_IMULCONST:  /* ..., value  ==> ..., value * constant    */
1358                                               /* sx.val.i = constant                      */
1359                         case ICMD_IMULPOW2:   /* ..., value  ==> ..., value * (2 ^ constant) */
1360                                               /* sx.val.i = constant                      */
1361                         case ICMD_LMUL:       /* ..., val1, val2  ==> ..., val1 * val2    */
1362                         case ICMD_LMULCONST:  /* ..., value  ==> ..., value * constant    */
1363                                               /* sx.val.l = constant                      */
1364                         case ICMD_LMULPOW2:   /* ..., value  ==> ..., value * (2 ^ constant) */
1365                                               /* sx.val.l = constant                      */
1366                         case ICMD_IDIV:       /* ..., val1, val2  ==> ..., val1 / val2    */
1367                         case ICMD_IREM:       /* ..., val1, val2  ==> ..., val1 % val2    */
1368                         case ICMD_IDIVPOW2:   /* ..., value  ==> ..., value >> constant   */
1369                                               /* sx.val.i = constant                      */
1370                         case ICMD_IREMPOW2:   /* ..., value  ==> ..., value % constant    */
1371                                               /* sx.val.i = constant                      */
1372                         case ICMD_LDIV:       /* ..., val1, val2  ==> ..., val1 / val2    */
1373                         case ICMD_LREM:       /* ..., val1, val2  ==> ..., val1 % val2    */
1374                         case ICMD_LDIVPOW2:   /* ..., value  ==> ..., value >> constant   */
1375                                               /* sx.val.i = constant                      */
1376                         case ICMD_LREMPOW2:   /* ..., value  ==> ..., value % constant    */
1377                                               /* sx.val.l = constant                      */
1378                         case ICMD_ISHL:       /* ..., val1, val2  ==> ..., val1 << val2   */
1379                         case ICMD_ISHLCONST:  /* ..., value  ==> ..., value << constant   */
1380                                               /* sx.val.i = constant                      */
1381                         case ICMD_ISHR:       /* ..., val1, val2  ==> ..., val1 >> val2   */
1382                         case ICMD_ISHRCONST:  /* ..., value  ==> ..., value >> constant   */
1383                                               /* sx.val.i = constant                      */
1384                         case ICMD_IUSHR:      /* ..., val1, val2  ==> ..., val1 >>> val2  */
1385                         case ICMD_IUSHRCONST: /* ..., value  ==> ..., value >>> constant  */
1386                                               /* sx.val.i = constant                      */
1387                         case ICMD_LSHL:       /* ..., val1, val2  ==> ..., val1 << val2   */
1388                         case ICMD_LSHLCONST:  /* ..., value  ==> ..., value << constant   */
1389                                               /* sx.val.i = constant                      */
1390                         case ICMD_LSHR:       /* ..., val1, val2  ==> ..., val1 >> val2   */
1391                         case ICMD_LSHRCONST:  /* ..., value  ==> ..., value >> constant   */
1392                                               /* sx.val.i = constant                      */
1393                         case ICMD_LUSHR:      /* ..., val1, val2  ==> ..., val1 >>> val2  */
1394                         case ICMD_LUSHRCONST: /* ..., value  ==> ..., value >>> constant  */
1395                                               /* sx.val.l = constant                      */
1396                         case ICMD_IAND:       /* ..., val1, val2  ==> ..., val1 & val2    */
1397                         case ICMD_IANDCONST:  /* ..., value  ==> ..., value & constant    */
1398                                               /* sx.val.i = constant                      */
1399                         case ICMD_LAND:       /* ..., val1, val2  ==> ..., val1 & val2    */
1400                         case ICMD_LANDCONST:  /* ..., value  ==> ..., value & constant    */
1401                                               /* sx.val.l = constant                      */
1402                         case ICMD_IOR:        /* ..., val1, val2  ==> ..., val1 | val2    */
1403                         case ICMD_IORCONST:   /* ..., value  ==> ..., value | constant    */
1404                                               /* sx.val.i = constant                      */
1405                         case ICMD_LOR:        /* ..., val1, val2  ==> ..., val1 | val2    */
1406                         case ICMD_LORCONST:   /* ..., value  ==> ..., value | constant    */
1407                                               /* sx.val.l = constant                      */
1408                         case ICMD_IXOR:       /* ..., val1, val2  ==> ..., val1 ^ val2    */
1409                         case ICMD_IXORCONST:  /* ..., value  ==> ..., value ^ constant    */
1410                                               /* sx.val.i = constant                      */
1411                         case ICMD_LXOR:       /* ..., val1, val2  ==> ..., val1 ^ val2    */
1412                         case ICMD_LXORCONST:  /* ..., value  ==> ..., value ^ constant    */
1413                                               /* sx.val.l = constant                      */
1414
1415                                 // Generate architecture specific instructions.
1416                                 codegen_emit_instruction(jd, iptr);
1417                                 break;
1418
1419
1420                         /* floating operations ********************************************/
1421
1422 #if !defined(ENABLE_SOFTFLOAT)
1423                         case ICMD_FNEG:       /* ..., value  ==> ..., - value             */
1424                         case ICMD_DNEG:
1425                         case ICMD_FADD:       /* ..., val1, val2  ==> ..., val1 + val2    */
1426                         case ICMD_DADD:
1427                         case ICMD_FSUB:       /* ..., val1, val2  ==> ..., val1 - val2    */
1428                         case ICMD_DSUB:
1429                         case ICMD_FMUL:       /* ..., val1, val2  ==> ..., val1 * val2    */
1430                         case ICMD_DMUL:
1431                         case ICMD_FDIV:       /* ..., val1, val2  ==> ..., val1 / val2    */
1432                         case ICMD_DDIV:
1433                         case ICMD_FREM:       /* ..., val1, val2  ==> ..., val1 % val2        */
1434                         case ICMD_DREM:
1435                         case ICMD_I2F:        /* ..., value  ==> ..., (float) value       */
1436                         case ICMD_I2D:        /* ..., value  ==> ..., (double) value      */
1437                         case ICMD_L2F:        /* ..., value  ==> ..., (float) value       */
1438                         case ICMD_L2D:        /* ..., value  ==> ..., (double) value      */
1439                         case ICMD_F2I:        /* ..., value  ==> ..., (int) value         */
1440                         case ICMD_D2I:
1441                         case ICMD_F2L:        /* ..., value  ==> ..., (long) value        */
1442                         case ICMD_D2L:
1443                         case ICMD_F2D:        /* ..., value  ==> ..., (double) value      */
1444                         case ICMD_D2F:        /* ..., value  ==> ..., (float) value       */
1445                         case ICMD_FCMPL:      /* ..., val1, val2  ==> ..., val1 fcmpg val2 */
1446                         case ICMD_DCMPL:      /* == => 0, < => 1, > => -1                 */
1447                         case ICMD_FCMPG:      /* ..., val1, val2  ==> ..., val1 fcmpl val2 */
1448                         case ICMD_DCMPG:      /* == => 0, < => 1, > => -1                 */
1449
1450                                 // Generate architecture specific instructions.
1451                                 codegen_emit_instruction(jd, iptr);
1452                                 break;
1453 #endif /* !defined(ENABLE_SOFTFLOAT) */
1454
1455
1456                         /* memory operations **********************************************/
1457
1458                         case ICMD_ARRAYLENGTH:/* ..., arrayref  ==> ..., length           */
1459
1460                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1461                                 d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1462                                 /* implicit null-pointer check */
1463                                 // XXX PPC64: Here we had an explicit null-pointer check
1464                                 //     which I think was obsolete, please confirm. Otherwise:
1465                                 // emit_nullpointer_check(cd, iptr, s1);
1466                                 M_ILD(d, s1, OFFSET(java_array_t, size));
1467                                 emit_store_dst(jd, iptr, d);
1468                                 break;
1469
1470                         case ICMD_BALOAD:     /* ..., arrayref, index  ==> ..., value     */
1471                         case ICMD_CALOAD:     /* ..., arrayref, index  ==> ..., value     */
1472                         case ICMD_SALOAD:     /* ..., arrayref, index  ==> ..., value     */
1473                         case ICMD_IALOAD:     /* ..., arrayref, index  ==> ..., value     */
1474                         case ICMD_LALOAD:     /* ..., arrayref, index  ==> ..., value     */
1475                         case ICMD_FALOAD:     /* ..., arrayref, index  ==> ..., value     */
1476                         case ICMD_DALOAD:     /* ..., arrayref, index  ==> ..., value     */
1477                         case ICMD_AALOAD:     /* ..., arrayref, index  ==> ..., value     */
1478                         case ICMD_BASTORE:    /* ..., arrayref, index, value  ==> ...     */
1479                         case ICMD_CASTORE:    /* ..., arrayref, index, value  ==> ...     */
1480                         case ICMD_SASTORE:    /* ..., arrayref, index, value  ==> ...     */
1481                         case ICMD_IASTORE:    /* ..., arrayref, index, value  ==> ...     */
1482                         case ICMD_LASTORE:    /* ..., arrayref, index, value  ==> ...     */
1483                         case ICMD_FASTORE:    /* ..., arrayref, index, value  ==> ...     */
1484                         case ICMD_DASTORE:    /* ..., arrayref, index, value  ==> ...     */
1485                         case ICMD_AASTORE:    /* ..., arrayref, index, value  ==> ...     */
1486                         case ICMD_BASTORECONST:   /* ..., arrayref, index  ==> ...        */
1487                         case ICMD_CASTORECONST:   /* ..., arrayref, index  ==> ...        */
1488                         case ICMD_SASTORECONST:   /* ..., arrayref, index  ==> ...        */
1489                         case ICMD_IASTORECONST:   /* ..., arrayref, index  ==> ...        */
1490                         case ICMD_LASTORECONST:   /* ..., arrayref, index  ==> ...        */
1491                         case ICMD_FASTORECONST:   /* ..., arrayref, index  ==> ...        */
1492                         case ICMD_DASTORECONST:   /* ..., arrayref, index  ==> ...        */
1493                         case ICMD_AASTORECONST:   /* ..., arrayref, index  ==> ...        */
1494                         case ICMD_GETFIELD:   /* ...  ==> ..., value                      */
1495                         case ICMD_PUTFIELD:   /* ..., value  ==> ...                      */
1496                         case ICMD_PUTFIELDCONST:  /* ..., objectref  ==> ...              */
1497                                                   /* val = value (in current instruction) */
1498                         case ICMD_PUTSTATICCONST: /* ...  ==> ...                         */
1499                                                   /* val = value (in current instruction) */
1500
1501                                 // Generate architecture specific instructions.
1502                                 codegen_emit_instruction(jd, iptr);
1503                                 break;
1504
1505                         case ICMD_GETSTATIC:  /* ...  ==> ..., value                      */
1506
1507 #if defined(__I386__)
1508                                 // Generate architecture specific instructions.
1509                                 codegen_emit_instruction(jd, iptr);
1510 #else
1511                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1512                                         unresolved_field* uf = iptr->sx.s23.s3.uf;
1513                                         fieldtype = uf->fieldref->parseddesc.fd->type;
1514                                         disp      = dseg_add_unique_address(cd, 0);
1515
1516                                         patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1517                                 }
1518                                 else {
1519                                         fieldinfo* fi = iptr->sx.s23.s3.fmiref->p.field;
1520                                         fieldtype = fi->type;
1521                                         disp      = dseg_add_address(cd, fi->value);
1522
1523                                         if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->clazz)) {
1524                                                 PROFILE_CYCLE_STOP;
1525                                                 patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1526                                                 PROFILE_CYCLE_START;
1527                                         }
1528                                 }
1529
1530                                 // XXX X86_64: Here We had this:
1531                                 /* This approach is much faster than moving the field
1532                                    address inline into a register. */
1533
1534                                 M_ALD_DSEG(REG_ITMP1, disp);
1535
1536                                 switch (fieldtype) {
1537                                 case TYPE_ADR:
1538                                         d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1539                                         M_ALD(d, REG_ITMP1, 0);
1540                                         break;
1541                                 case TYPE_INT:
1542                                         d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1543                                         M_ILD(d, REG_ITMP1, 0);
1544                                         break;
1545                                 case TYPE_LNG:
1546                                         d = codegen_reg_of_dst(jd, iptr, REG_LTMP23);
1547                                         M_LLD(d, REG_ITMP1, 0);
1548                                         break;
1549                                 case TYPE_FLT:
1550                                         d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1551                                         M_FLD(d, REG_ITMP1, 0);
1552                                         break;
1553                                 case TYPE_DBL:
1554                                         d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1555                                         M_DLD(d, REG_ITMP1, 0);
1556                                         break;
1557                                 }
1558                                 emit_store_dst(jd, iptr, d);
1559 #endif
1560                                 break;
1561
1562                         case ICMD_PUTSTATIC:  /* ..., value  ==> ...                      */
1563
1564 #if defined(__I386__)
1565                                 // Generate architecture specific instructions.
1566                                 codegen_emit_instruction(jd, iptr);
1567 #else
1568                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1569                                         unresolved_field* uf = iptr->sx.s23.s3.uf;
1570                                         fieldtype = uf->fieldref->parseddesc.fd->type;
1571                                         disp      = dseg_add_unique_address(cd, 0);
1572
1573                                         patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1574                                 }
1575                                 else {
1576                                         fieldinfo* fi = iptr->sx.s23.s3.fmiref->p.field;
1577                                         fieldtype = fi->type;
1578                                         disp      = dseg_add_address(cd, fi->value);
1579
1580                                         if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->clazz)) {
1581                                                 PROFILE_CYCLE_STOP;
1582                                                 patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1583                                                 PROFILE_CYCLE_START;
1584                                         }
1585                                 }
1586
1587                                 // XXX X86_64: Here We had this:
1588                                 /* This approach is much faster than moving the field
1589                                    address inline into a register. */
1590
1591                                 M_ALD_DSEG(REG_ITMP1, disp);
1592
1593                                 switch (fieldtype) {
1594                                 case TYPE_ADR:
1595                                         s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1596                                         M_AST(s1, REG_ITMP1, 0);
1597                                         break;
1598                                 case TYPE_INT:
1599                                         s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1600                                         M_IST(s1, REG_ITMP1, 0);
1601                                         break;
1602                                 case TYPE_LNG:
1603                                         s1 = emit_load_s1(jd, iptr, REG_LTMP23);
1604                                         M_LST(s1, REG_ITMP1, 0);
1605                                         break;
1606                                 case TYPE_FLT:
1607                                         s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1608                                         M_FST(s1, REG_ITMP1, 0);
1609                                         break;
1610                                 case TYPE_DBL:
1611                                         s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1612                                         M_DST(s1, REG_ITMP1, 0);
1613                                         break;
1614                                 }
1615 #endif
1616                                 break;
1617
1618                         /* branch operations **********************************************/
1619
1620                         case ICMD_ATHROW:     /* ..., objectref ==> ... (, objectref)     */
1621
1622                                 // We might leave this method, stop profiling.
1623                                 PROFILE_CYCLE_STOP;
1624
1625                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1626                                 // XXX M68K: Actually this is M_ADRMOVE(s1, REG_ATMP1_XPTR);
1627                                 // XXX Sparc64: We use REG_ITMP2_XPTR here, fix me!
1628                                 emit_imove(cd, s1, REG_ITMP1_XPTR);
1629
1630 #ifdef ENABLE_VERIFIER
1631                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1632                                         unresolved_class *uc = iptr->sx.s23.s2.uc;
1633                                         patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1634                                 }
1635 #endif /* ENABLE_VERIFIER */
1636
1637                                 // Generate architecture specific instructions.
1638                                 codegen_emit_instruction(jd, iptr);
1639                                 ALIGNCODENOP;
1640                                 break;
1641
1642                         case ICMD_GOTO:       /* ... ==> ...                              */
1643                         case ICMD_RET:        /* ... ==> ...                              */
1644
1645 #if defined(ENABLE_SSA)
1646                                 // In case of a goto, phimoves have to be inserted
1647                                 // before the jump.
1648                                 if (ls != NULL) {
1649                                         last_cmd_was_goto = true;
1650                                         codegen_emit_phi_moves(jd, bptr);
1651                                 }
1652 #endif
1653                                 emit_br(cd, iptr->dst.block);
1654                                 ALIGNCODENOP;
1655                                 break;
1656
1657                         case ICMD_JSR:        /* ... ==> ...                              */
1658
1659                                 emit_br(cd, iptr->sx.s23.s3.jsrtarget.block);
1660                                 ALIGNCODENOP;
1661                                 break;
1662
1663                         case ICMD_IFNULL:     /* ..., value ==> ...                       */
1664                         case ICMD_IFNONNULL:
1665
1666                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1667 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1668                                 emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, s1, BRANCH_OPT_NONE);
1669 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1670                                 M_TEST(s1);
1671                                 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, BRANCH_OPT_NONE);
1672 #else
1673 # error Unable to generate code for this configuration!
1674 #endif
1675                                 break;
1676
1677                         case ICMD_IFEQ:       /* ..., value ==> ...                       */
1678                         case ICMD_IFNE:
1679                         case ICMD_IFLT:
1680                         case ICMD_IFLE:
1681                         case ICMD_IFGT:
1682                         case ICMD_IFGE:
1683
1684                                 // XXX Sparc64: int compares must not branch on the
1685                                 // register directly. Reason is, that register content is
1686                                 // not 32-bit clean. Fix this!
1687
1688 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1689                                 if (iptr->sx.val.i == 0) {
1690                                         s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1691                                         emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, s1, BRANCH_OPT_NONE);
1692                                 } else {
1693                                         // Generate architecture specific instructions.
1694                                         codegen_emit_instruction(jd, iptr);
1695                                 }
1696 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1697                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1698                                 emit_icmp_imm(cd, s1, iptr->sx.val.i);
1699                                 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, BRANCH_OPT_NONE);
1700 #else
1701 # error Unable to generate code for this configuration!
1702 #endif
1703                                 break;
1704
1705                         case ICMD_IF_LEQ:     /* ..., value ==> ...                       */
1706                         case ICMD_IF_LNE:
1707                         case ICMD_IF_LLT:
1708                         case ICMD_IF_LGE:
1709                         case ICMD_IF_LGT:
1710                         case ICMD_IF_LLE:
1711
1712                                 // Generate architecture specific instructions.
1713                                 codegen_emit_instruction(jd, iptr);
1714                                 break;
1715
1716                         case ICMD_IF_ACMPEQ:  /* ..., value, value ==> ...                */
1717                         case ICMD_IF_ACMPNE:  /* op1 = target JavaVM pc                   */
1718
1719                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1720                                 s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1721 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1722                                 switch (iptr->opc) {
1723                                         case ICMD_IF_ACMPEQ:
1724                                                 emit_beq(cd, iptr->dst.block, s1, s2);
1725                                                 break;
1726                                         case ICMD_IF_ACMPNE:
1727                                                 emit_bne(cd, iptr->dst.block, s1, s2);
1728                                                 break;
1729                                 }
1730 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1731                                 M_ACMP(s1, s2);
1732                                 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ACMPEQ, BRANCH_OPT_NONE);
1733 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1734                                 M_CMPEQ(s1, s2, REG_ITMP1);
1735                                 switch (iptr->opc) {
1736                                         case ICMD_IF_ACMPEQ:
1737                                                 emit_bnez(cd, iptr->dst.block, REG_ITMP1);
1738                                                 break;
1739                                         case ICMD_IF_ACMPNE:
1740                                                 emit_beqz(cd, iptr->dst.block, REG_ITMP1);
1741                                                 break;
1742                                 }
1743 #else
1744 # error Unable to generate code for this configuration!
1745 #endif
1746                                 break;
1747
1748                         case ICMD_IF_ICMPEQ:  /* ..., value, value ==> ...                */
1749                         case ICMD_IF_ICMPNE:  /* op1 = target JavaVM pc                   */
1750
1751 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1752                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1753                                 s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1754                                 switch (iptr->opc) {
1755                                         case ICMD_IF_ICMPEQ:
1756                                                 emit_beq(cd, iptr->dst.block, s1, s2);
1757                                                 break;
1758                                         case ICMD_IF_ICMPNE:
1759                                                 emit_bne(cd, iptr->dst.block, s1, s2);
1760                                                 break;
1761                                 }
1762                                 break;
1763 #else
1764                                 /* fall-through */
1765 #endif
1766
1767                         case ICMD_IF_ICMPLT:  /* ..., value, value ==> ...                */
1768                         case ICMD_IF_ICMPGT:  /* op1 = target JavaVM pc                   */
1769                         case ICMD_IF_ICMPLE:
1770                         case ICMD_IF_ICMPGE:
1771
1772                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1773                                 s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1774 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1775 # if defined(__I386__) || defined(__M68K__) || defined(__X86_64__)
1776                                 // XXX Fix this soon!!!
1777                                 M_ICMP(s2, s1);
1778 # else
1779                                 M_ICMP(s1, s2);
1780 # endif
1781                                 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ICMPEQ, BRANCH_OPT_NONE);
1782 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1783                                 // Generate architecture specific instructions.
1784                                 codegen_emit_instruction(jd, iptr);
1785 #else
1786 # error Unable to generate code for this configuration!
1787 #endif
1788                                 break;
1789
1790                         case ICMD_IF_LCMPEQ:  /* ..., value, value ==> ...                */
1791                         case ICMD_IF_LCMPNE:  /* op1 = target JavaVM pc                   */
1792                         case ICMD_IF_LCMPLT:
1793                         case ICMD_IF_LCMPGT:
1794                         case ICMD_IF_LCMPLE:
1795                         case ICMD_IF_LCMPGE:
1796
1797                                 // Generate architecture specific instructions.
1798                                 codegen_emit_instruction(jd, iptr);
1799                                 break;
1800
1801                         case ICMD_RETURN:     /* ...  ==> ...                             */
1802
1803                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1804                                 goto nowperformreturn;
1805
1806                         case ICMD_ARETURN:    /* ..., retvalue ==> ...                    */
1807
1808                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1809                                 s1 = emit_load_s1(jd, iptr, REG_RESULT);
1810                                 // XXX M68K: This should actually be M_ADR2INTMOVE(s1, REG_RESULT);
1811                                 // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1812                                 emit_imove(cd, s1, REG_RESULT);
1813
1814 #ifdef ENABLE_VERIFIER
1815                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1816                                         PROFILE_CYCLE_STOP;
1817                                         unresolved_class *uc = iptr->sx.s23.s2.uc;
1818                                         patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1819                                         PROFILE_CYCLE_START;
1820                                 }
1821 #endif /* ENABLE_VERIFIER */
1822                                 goto nowperformreturn;
1823
1824                         case ICMD_IRETURN:    /* ..., retvalue ==> ...                    */
1825
1826                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1827                                 s1 = emit_load_s1(jd, iptr, REG_RESULT);
1828                                 // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1829                                 emit_imove(cd, s1, REG_RESULT);
1830                                 goto nowperformreturn;
1831
1832                         case ICMD_LRETURN:    /* ..., retvalue ==> ...                    */
1833
1834                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1835                                 s1 = emit_load_s1(jd, iptr, REG_LRESULT);
1836                                 // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1837                                 emit_lmove(cd, s1, REG_LRESULT);
1838                                 goto nowperformreturn;
1839
1840                         case ICMD_FRETURN:    /* ..., retvalue ==> ...                    */
1841
1842                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1843                                 s1 = emit_load_s1(jd, iptr, REG_FRESULT);
1844 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1845                                 emit_fmove(cd, s1, REG_FRESULT);
1846 #else
1847                                 M_CAST_F2I(s1, REG_RESULT);
1848 #endif
1849                                 goto nowperformreturn;
1850
1851                         case ICMD_DRETURN:    /* ..., retvalue ==> ...                    */
1852
1853                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1854                                 s1 = emit_load_s1(jd, iptr, REG_FRESULT);
1855 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1856                                 emit_dmove(cd, s1, REG_FRESULT);
1857 #else
1858                                 M_CAST_D2L(s1, REG_LRESULT);
1859 #endif
1860                                 goto nowperformreturn;
1861
1862 nowperformreturn:
1863 #if !defined(NDEBUG)
1864                                 // Call trace function.
1865                                 if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
1866                                         emit_verbosecall_exit(jd);
1867 #endif
1868
1869 #if defined(ENABLE_THREADS)
1870                                 // Emit code to call monitorexit function.
1871                                 if (checksync && code_is_synchronized(code)) {
1872                                         emit_monitor_exit(jd, rd->memuse * 8);
1873                                 }
1874 #endif
1875
1876                                 // Generate method profiling code.
1877                                 PROFILE_CYCLE_STOP;
1878
1879                                 // Emit code for the method epilog.
1880                                 codegen_emit_epilog(jd);
1881                                 ALIGNCODENOP;
1882                                 break;
1883
1884                         case ICMD_BUILTIN:      /* ..., [arg1, [arg2 ...]] ==> ...        */
1885
1886                                 REPLACEMENT_POINT_FORGC_BUILTIN(cd, iptr);
1887
1888                                 bte = iptr->sx.s23.s3.bte;
1889                                 md  = bte->md;
1890
1891 #if defined(ENABLE_ESCAPE_REASON) && defined(__I386__)
1892                                 if (bte->fp == BUILTIN_escape_reason_new) {
1893                                         void set_escape_reasons(void *);
1894                                         M_ASUB_IMM(8, REG_SP);
1895                                         M_MOV_IMM(iptr->escape_reasons, REG_ITMP1);
1896                                         M_AST(EDX, REG_SP, 4);
1897                                         M_AST(REG_ITMP1, REG_SP, 0);
1898                                         M_MOV_IMM(set_escape_reasons, REG_ITMP1);
1899                                         M_CALL(REG_ITMP1);
1900                                         M_ALD(EDX, REG_SP, 4);
1901                                         M_AADD_IMM(8, REG_SP);
1902                                 }
1903 #endif
1904
1905                                 // Emit the fast-path if available.
1906                                 if (bte->emit_fastpath != NULL) {
1907                                         void (*emit_fastpath)(jitdata* jd, instruction* iptr, int d);
1908                                         emit_fastpath = (void (*)(jitdata* jd, instruction* iptr, int d)) bte->emit_fastpath;
1909
1910                                         assert(md->returntype.type == TYPE_VOID);
1911                                         d = REG_ITMP1;
1912
1913                                         // Actually call the fast-path emitter.
1914                                         emit_fastpath(jd, iptr, d);
1915
1916                                         // If fast-path succeeded, jump to the end of the builtin
1917                                         // invocation.
1918                                         // XXX Actually the slow-path block below should be moved
1919                                         // out of the instruction stream and the jump below should be
1920                                         // inverted.
1921 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1922                                         os::abort("codegen_emit: Implement jump over slow-path for this configuration.");
1923 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1924                                         M_TEST(d);
1925                                         emit_label_bne(cd, BRANCH_LABEL_10);
1926 #else
1927 # error Unable to generate code for this configuration!
1928 #endif
1929                                 }
1930
1931                                 goto gen_method;
1932
1933                         case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ...        */
1934                         case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
1935                         case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer    */
1936                         case ICMD_INVOKEINTERFACE:
1937
1938                                 REPLACEMENT_POINT_INVOKE(cd, iptr);
1939
1940                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1941                                         unresolved_method* um = iptr->sx.s23.s3.um;
1942                                         md = um->methodref->parseddesc.md;
1943                                 }
1944                                 else {
1945                                         methodinfo* lm = iptr->sx.s23.s3.fmiref->p.method;
1946                                         md = lm->parseddesc;
1947                                 }
1948
1949 gen_method:
1950                                 i = md->paramcount;
1951
1952                                 // XXX Check this again!
1953                                 MCODECHECK((i << 1) + 64);   // PPC
1954
1955                                 // Copy arguments to registers or stack location.
1956                                 for (i = i - 1; i >= 0; i--) {
1957                                         var = VAR(iptr->sx.s23.s2.args[i]);
1958                                         d   = md->params[i].regoff;
1959
1960                                         // Already pre-allocated?
1961                                         if (var->flags & PREALLOC)
1962                                                 continue;
1963
1964                                         if (!md->params[i].inmemory) {
1965                                                 switch (var->type) {
1966                                                 case TYPE_ADR:
1967                                                 case TYPE_INT:
1968                                                         s1 = emit_load(jd, iptr, var, d);
1969                                                         emit_imove(cd, s1, d);
1970                                                         break;
1971
1972                                                 case TYPE_LNG:
1973                                                         s1 = emit_load(jd, iptr, var, d);
1974                                                         emit_lmove(cd, s1, d);
1975                                                         break;
1976
1977                                                 case TYPE_FLT:
1978 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1979                                                         s1 = emit_load(jd, iptr, var, d);
1980                                                         emit_fmove(cd, s1, d);
1981 #else
1982                                                         s1 = emit_load(jd, iptr, var, REG_FTMP1);
1983                                                         M_CAST_F2I(s1, d);
1984 #endif
1985                                                         break;
1986
1987                                                 case TYPE_DBL:
1988 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1989                                                         s1 = emit_load(jd, iptr, var, d);
1990                                                         emit_dmove(cd, s1, d);
1991 #else
1992                                                         s1 = emit_load(jd, iptr, var, REG_FTMP1);
1993                                                         M_CAST_D2L(s1, d);
1994 #endif
1995                                                         break;
1996                                                 }
1997                                         }
1998                                         else {
1999                                                 switch (var->type) {
2000                                                 case TYPE_ADR:
2001                                                         s1 = emit_load(jd, iptr, var, REG_ITMP1);
2002                                                         // XXX M68K: This should actually be like this:
2003                                                         //     s1 = emit_load(jd, iptr, var, REG_ATMP1);
2004                                                         // XXX Sparc64: Here this actually was:
2005                                                         //     M_STX(s1, REG_SP, JITSTACK + d);
2006                                                         M_AST(s1, REG_SP, d);
2007                                                         break;
2008
2009                                                 case TYPE_INT:
2010 #if SIZEOF_VOID_P == 4
2011                                                         s1 = emit_load(jd, iptr, var, REG_ITMP1);
2012                                                         M_IST(s1, REG_SP, d);
2013                                                         break;
2014 #else
2015                                                         /* fall-through */
2016 #endif
2017
2018                                                 case TYPE_LNG:
2019                                                         s1 = emit_load(jd, iptr, var, REG_LTMP12);
2020                                                         // XXX Sparc64: Here this actually was:
2021                                                         //     M_STX(s1, REG_SP, JITSTACK + d);
2022                                                         M_LST(s1, REG_SP, d);
2023                                                         break;
2024
2025                                                 case TYPE_FLT:
2026 #if SIZEOF_VOID_P == 4
2027                                                         s1 = emit_load(jd, iptr, var, REG_FTMP1);
2028                                                         M_FST(s1, REG_SP, d);
2029                                                         break;
2030 #else
2031                                                         /* fall-through */
2032 #endif
2033
2034                                                 case TYPE_DBL:
2035                                                         s1 = emit_load(jd, iptr, var, REG_FTMP1);
2036                                                         // XXX Sparc64: Here this actually was:
2037                                                         //     M_DST(s1, REG_SP, JITSTACK + d);
2038                                                         M_DST(s1, REG_SP, d);
2039                                                         break;
2040                                                 }
2041                                         }
2042                                 }
2043
2044                                 // Generate method profiling code.
2045                                 PROFILE_CYCLE_STOP;
2046
2047                                 // Generate architecture specific instructions.
2048                                 codegen_emit_instruction(jd, iptr);
2049
2050                                 // Generate method profiling code.
2051                                 PROFILE_CYCLE_START;
2052
2053                                 // Store size of call code in replacement point.
2054                                 REPLACEMENT_POINT_INVOKE_RETURN(cd, iptr);
2055                                 REPLACEMENT_POINT_FORGC_BUILTIN_RETURN(cd, iptr);
2056
2057                                 // Recompute the procedure vector (PV).
2058                                 emit_recompute_pv(cd);
2059
2060                                 // Store return value.
2061 #if defined(ENABLE_SSA)
2062                                 if ((ls == NULL) /* || (!IS_TEMPVAR_INDEX(iptr->dst.varindex)) */ ||
2063                                         (ls->lifetime[iptr->dst.varindex].type != UNUSED))
2064                                         /* a "living" stackslot */
2065 #endif
2066                                 switch (md->returntype.type) {
2067                                 case TYPE_INT:
2068                                 case TYPE_ADR:
2069                                         s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
2070                                         // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2071                                         emit_imove(cd, REG_RESULT, s1);
2072                                         emit_store_dst(jd, iptr, s1);
2073                                         break;
2074
2075                                 case TYPE_LNG:
2076                                         s1 = codegen_reg_of_dst(jd, iptr, REG_LRESULT);
2077                                         // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2078                                         emit_lmove(cd, REG_LRESULT, s1);
2079                                         emit_store_dst(jd, iptr, s1);
2080                                         break;
2081
2082                                 case TYPE_FLT:
2083 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2084                                         s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2085                                         emit_fmove(cd, REG_FRESULT, s1);
2086 #else
2087                                         s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2088                                         M_CAST_I2F(REG_RESULT, s1);
2089 #endif
2090                                         emit_store_dst(jd, iptr, s1);
2091                                         break;
2092
2093                                 case TYPE_DBL:
2094 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2095                                         s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2096                                         emit_dmove(cd, REG_FRESULT, s1);
2097 #else
2098                                         s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2099                                         M_CAST_L2D(REG_LRESULT, s1);
2100 #endif
2101                                         emit_store_dst(jd, iptr, s1);
2102                                         break;
2103
2104                                 case TYPE_VOID:
2105                                         break;
2106                                 }
2107
2108                                 // If we are emitting a fast-path block, this is the label for
2109                                 // successful fast-path execution.
2110                                 if ((iptr->opc == ICMD_BUILTIN) && (bte->emit_fastpath != NULL)) {
2111                                         emit_label(cd, BRANCH_LABEL_10);
2112                                 }
2113
2114                                 break;
2115
2116                         case ICMD_TABLESWITCH:  /* ..., index ==> ...                     */
2117
2118                                 // Generate architecture specific instructions.
2119                                 codegen_emit_instruction(jd, iptr);
2120                                 break;
2121
2122                         case ICMD_LOOKUPSWITCH: /* ..., key ==> ...                       */
2123
2124                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2125                                 i = iptr->sx.s23.s2.lookupcount;
2126
2127                                 // XXX Again we need to check this
2128                                 MCODECHECK((i<<2)+8);   // Alpha, ARM, i386, MIPS, M68K, Sparc64
2129                                 MCODECHECK((i<<3)+8);   // PPC64
2130                                 MCODECHECK(8 + ((7 + 6) * i) + 5);   // X86_64, S390
2131
2132                                 // Compare keys.
2133                                 for (lookup_target_t* lookup = iptr->dst.lookup; i > 0; ++lookup, --i) {
2134 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2135                                         emit_icmp_imm(cd, s1, lookup->value);
2136                                         emit_beq(cd, lookup->target.block);
2137 #elif SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
2138                                         ICONST(REG_ITMP2, lookup->value);
2139                                         emit_beq(cd, lookup->target.block, s1, REG_ITMP2);
2140 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2141                                         emit_icmpeq_imm(cd, s1, lookup->value, REG_ITMP2);
2142                                         emit_bnez(cd, lookup->target.block, REG_ITMP2);
2143 #else
2144 # error Unable to generate code for this configuration!
2145 #endif
2146                                 }
2147
2148                                 // Default branch.
2149                                 emit_br(cd, iptr->sx.s23.s3.lookupdefault.block);
2150                                 ALIGNCODENOP;
2151                                 break;
2152
2153                         case ICMD_CHECKCAST:  /* ..., objectref ==> ..., objectref        */
2154                         case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult        */
2155                         case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref  */
2156
2157                                 // Generate architecture specific instructions.
2158                                 codegen_emit_instruction(jd, iptr);
2159                                 break;
2160
2161                         default:
2162                                 exceptions_throw_internalerror("Unknown ICMD %d during code generation",
2163                                                                                            iptr->opc);
2164                                 return false;
2165
2166                         } // the big switch
2167
2168                 } // for all instructions
2169
2170 #if defined(ENABLE_SSA)
2171                 // By edge splitting, in blocks with phi moves there can only
2172                 // be a goto as last command, no other jump/branch command.
2173                 if (ls != NULL) {
2174                         if (!last_cmd_was_goto)
2175                                 codegen_emit_phi_moves(jd, bptr);
2176                 }
2177 #endif
2178
2179 #if defined(__I386__) || defined(__M68K__) || defined(__MIPS__) || defined(__S390__) || defined(__SPARC_64__) || defined(__X86_64__)
2180                 // XXX Again!!!
2181                 /* XXX require a lower number? */
2182                 MCODECHECK(64);  // I386, MIPS, Sparc64
2183                 MCODECHECK(512); // S390, X86_64
2184
2185                 /* XXX We can remove that when we don't use UD2 anymore on i386
2186                    and x86_64. */
2187
2188                 /* At the end of a basic block we may have to append some nops,
2189                    because the patcher stub calling code might be longer than the
2190                    actual instruction. So codepatching does not change the
2191                    following block unintentionally. */
2192
2193                 if (cd->mcodeptr < cd->lastmcodeptr) {
2194                         while (cd->mcodeptr < cd->lastmcodeptr) {
2195                                 M_NOP;
2196                         }
2197                 }
2198 #endif
2199
2200         } // for all basic blocks
2201
2202         // Generate traps.
2203         emit_patcher_traps(jd);
2204
2205         // Everything's ok.
2206         return true;
2207 }
2208
2209
2210 /* codegen_emit_phi_moves ****************************************************
2211
2212    Emits phi moves at the end of the basicblock.
2213
2214 *******************************************************************************/
2215
2216 #if defined(ENABLE_SSA)
2217 void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
2218 {
2219         int lt_d,lt_s,i;
2220         lsradata *ls;
2221         codegendata *cd;
2222         varinfo *s, *d;
2223         instruction tmp_i;
2224
2225         cd = jd->cd;
2226         ls = jd->ls;
2227
2228         MCODECHECK(512);
2229
2230         /* Moves from phi functions with highest indices have to be */
2231         /* inserted first, since this is the order as is used for   */
2232         /* conflict resolution */
2233
2234         for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
2235                 lt_d = ls->phi_moves[bptr->nr][i][0];
2236                 lt_s = ls->phi_moves[bptr->nr][i][1];
2237 #if defined(SSA_DEBUG_VERBOSE)
2238                 if (compileverbose)
2239                         printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
2240 #endif
2241                 if (lt_s == UNUSED) {
2242 #if defined(SSA_DEBUG_VERBOSE)
2243                 if (compileverbose)
2244                         printf(" ... not processed \n");
2245 #endif
2246                         continue;
2247                 }
2248                         
2249                 d = VAR(ls->lifetime[lt_d].v_index);
2250                 s = VAR(ls->lifetime[lt_s].v_index);
2251                 
2252
2253                 if (d->type == -1) {
2254 #if defined(SSA_DEBUG_VERBOSE)
2255                         if (compileverbose)
2256                                 printf("...returning - phi lifetimes where joined\n");
2257 #endif
2258                         continue;
2259                 }
2260
2261                 if (s->type == -1) {
2262 #if defined(SSA_DEBUG_VERBOSE)
2263                         if (compileverbose)
2264                                 printf("...returning - phi lifetimes where joined\n");
2265 #endif
2266                         continue;
2267                 }
2268
2269                 tmp_i.opc = 0;
2270                 tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
2271                 tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
2272                 emit_copy(jd, &tmp_i);
2273
2274 #if defined(SSA_DEBUG_VERBOSE)
2275                 if (compileverbose) {
2276                         if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
2277                                 /* mem -> mem */
2278                                 printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2279                         }
2280                         else if (IS_INMEMORY(s->flags)) {
2281                                 /* mem -> reg */
2282                                 printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2283                         }
2284                         else if (IS_INMEMORY(d->flags)) {
2285                                 /* reg -> mem */
2286                                 printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2287                         }
2288                         else {
2289                                 /* reg -> reg */
2290                                 printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2291                         }
2292                         printf("\n");
2293                 }
2294 #endif /* defined(SSA_DEBUG_VERBOSE) */
2295         }
2296 }
2297 #endif /* defined(ENABLE_SSA) */
2298
2299
2300 /* REMOVEME When we have exception handling in C. */
2301
2302 void *md_asm_codegen_get_pv_from_pc(void *ra)
2303 {
2304         return md_codegen_get_pv_from_pc(ra);
2305 }
2306
2307
2308 /*
2309  * These are local overrides for various environment variables in Emacs.
2310  * Please do not remove this and leave it at the end of the file, where
2311  * Emacs will automagically detect them.
2312  * ---------------------------------------------------------------------
2313  * Local variables:
2314  * mode: c++
2315  * indent-tabs-mode: t
2316  * c-basic-offset: 4
2317  * tab-width: 4
2318  * End:
2319  * vim:noexpandtab:sw=4:ts=4:
2320  */