PR159: Exception handler blocks / register mixup
[cacao.git] / src / vm / jit / codegen-common.cpp
1 /* src/vm/jit/codegen-common.cpp - architecture independent code generator stuff
2
3    Copyright (C) 1996-2011
4    CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5    Copyright (C) 2009 Theobroma Systems Ltd.
6
7    This file is part of CACAO.
8
9    This program is free software; you can redistribute it and/or
10    modify it under the terms of the GNU General Public License as
11    published by the Free Software Foundation; either version 2, or (at
12    your option) any later version.
13
14    This program is distributed in the hope that it will be useful, but
15    WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17    General Public License for more details.
18
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22    02110-1301, USA.
23
24    All functions assume the following code area / data area layout:
25
26    +-----------+
27    |           |
28    | code area | code area grows to higher addresses
29    |           |
30    +-----------+ <-- start of procedure
31    |           |
32    | data area | data area grows to lower addresses
33    |           |
34    +-----------+
35
36    The functions first write into a temporary code/data area allocated by
37    "codegen_init". "codegen_finish" copies the code and data area into permanent
38    memory. All functions writing values into the data area return the offset
39    relative the begin of the code area (start of procedure).    
40
41 */
42
43
44 #include "config.h"
45
46 #include <assert.h>
47 #include <string.h>
48
49 #include "vm/types.h"
50
51 #include "codegen.h"
52 #include "md.h"
53 #include "md-abi.h"
54
55 #include "mm/memory.hpp"
56
57 #include "toolbox/avl.h"
58 #include "toolbox/list.hpp"
59 #include "toolbox/logging.hpp"
60
61 #include "native/llni.h"
62 #include "native/localref.hpp"
63 #include "native/native.hpp"
64
65 #include "threads/thread.hpp"
66
67 #include "vm/jit/builtin.hpp"
68 #include "vm/exceptions.hpp"
69 #include "vm/method.hpp"
70 #include "vm/options.h"
71 #include "vm/statistics.h"
72 #include "vm/string.hpp"
73
74 #include "vm/jit/abi.h"
75 #include "vm/jit/asmpart.h"
76 #include "vm/jit/code.hpp"
77 #include "vm/jit/codegen-common.hpp"
78
79 #if defined(ENABLE_DISASSEMBLER)
80 # include "vm/jit/disass.h"
81 #endif
82
83 #include "vm/jit/dseg.h"
84 #include "vm/jit/emit-common.hpp"
85 #include "vm/jit/jit.hpp"
86 #include "vm/jit/linenumbertable.hpp"
87 #include "vm/jit/methodheader.h"
88 #include "vm/jit/methodtree.h"
89 #include "vm/jit/patcher-common.hpp"
90 #include "vm/jit/replace.hpp"
91 #include "vm/jit/show.hpp"
92 #include "vm/jit/stacktrace.hpp"
93 #include "vm/jit/trace.hpp"
94
95 #include "vm/jit/optimizing/profile.hpp"
96
97 #if defined(ENABLE_SSA)
98 # include "vm/jit/optimizing/lsra.h"
99 # include "vm/jit/optimizing/ssa.h"
100 #elif defined(ENABLE_LSRA)
101 # include "vm/jit/allocator/lsra.h"
102 #endif
103
104 #if defined(ENABLE_INTRP)
105 #include "vm/jit/intrp/intrp.h"
106 #endif
107
108 #if defined(ENABLE_VMLOG)
109 #include <vmlog_cacao.h>
110 #endif
111
112
113 /* codegen_init ****************************************************************
114
115    TODO
116
117 *******************************************************************************/
118
119 void codegen_init(void)
120 {
121 }
122
123
124 /* codegen_setup ***************************************************************
125
126    Allocates and initialises code area, data area and references.
127
128 *******************************************************************************/
129
130 void codegen_setup(jitdata *jd)
131 {
132         methodinfo  *m;
133         codegendata *cd;
134
135         /* get required compiler data */
136
137         m  = jd->m;
138         cd = jd->cd;
139
140         /* initialize members */
141
142         // Set flags as requested.
143         if (opt_AlwaysEmitLongBranches) {
144                 cd->flags = CODEGENDATA_FLAG_LONGBRANCHES;
145         }
146         else {
147                 cd->flags = 0;
148         }
149
150         cd->mcodebase    = (u1*) DumpMemory::allocate(MCODEINITSIZE);
151         cd->mcodeend     = cd->mcodebase + MCODEINITSIZE;
152         cd->mcodesize    = MCODEINITSIZE;
153
154         /* initialize mcode variables */
155
156         cd->mcodeptr     = cd->mcodebase;
157         cd->lastmcodeptr = cd->mcodebase;
158
159 #if defined(ENABLE_INTRP)
160         /* native dynamic superinstructions variables */
161
162         if (opt_intrp) {
163                 cd->ncodebase = (u1*) DumpMemory::allocate(NCODEINITSIZE);
164                 cd->ncodesize = NCODEINITSIZE;
165
166                 /* initialize ncode variables */
167         
168                 cd->ncodeptr = cd->ncodebase;
169
170                 cd->lastinstwithoutdispatch = ~0; /* no inst without dispatch */
171                 cd->superstarts = NULL;
172         }
173 #endif
174
175         cd->dseg           = NULL;
176         cd->dseglen        = 0;
177
178         cd->jumpreferences = NULL;
179
180 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
181         cd->datareferences = NULL;
182 #endif
183
184         cd->brancheslabel  = new DumpList<branch_label_ref_t*>();
185         cd->linenumbers    = new DumpList<Linenumber>();
186 }
187
188
189 /* codegen_reset ***************************************************************
190
191    Resets the codegen data structure so we can recompile the method.
192
193 *******************************************************************************/
194
195 static void codegen_reset(jitdata *jd)
196 {
197         codeinfo    *code;
198         codegendata *cd;
199         basicblock  *bptr;
200
201         /* get required compiler data */
202
203         code = jd->code;
204         cd   = jd->cd;
205
206         /* reset error flag */
207
208         cd->flags          &= ~CODEGENDATA_FLAG_ERROR;
209
210         /* reset some members, we reuse the code memory already allocated
211            as this should have almost the correct size */
212
213         cd->mcodeptr        = cd->mcodebase;
214         cd->lastmcodeptr    = cd->mcodebase;
215
216         cd->dseg            = NULL;
217         cd->dseglen         = 0;
218
219         cd->jumpreferences  = NULL;
220
221 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
222         cd->datareferences  = NULL;
223 #endif
224
225         cd->brancheslabel   = new DumpList<branch_label_ref_t*>();
226         cd->linenumbers     = new DumpList<Linenumber>();
227         
228         /* We need to clear the mpc and the branch references from all
229            basic blocks as they will definitely change. */
230
231         for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
232                 bptr->mpc        = -1;
233                 bptr->branchrefs = NULL;
234         }
235
236         /* We need to clear all the patcher references from the codeinfo
237            since they all will be regenerated */
238
239         patcher_list_reset(code);
240
241 #if defined(ENABLE_REPLACEMENT)
242         code->rplpoints     = NULL;
243         code->rplpointcount = 0;
244         code->regalloc      = NULL;
245         code->regalloccount = 0;
246         code->globalcount   = 0;
247 #endif
248 }
249
250
251 /* codegen_generate ************************************************************
252
253    Generates the code for the currently compiled method.
254
255 *******************************************************************************/
256
257 bool codegen_generate(jitdata *jd)
258 {
259         codegendata *cd;
260
261         /* get required compiler data */
262
263         cd = jd->cd;
264
265         /* call the machine-dependent code generation function */
266
267         if (!codegen_emit(jd))
268                 return false;
269
270         /* check for an error */
271
272         if (CODEGENDATA_HAS_FLAG_ERROR(cd)) {
273                 /* check for long-branches flag, if it is set we recompile the
274                    method */
275
276 #if !defined(NDEBUG)
277         if (compileverbose)
278             log_message_method("Re-generating code: ", jd->m);
279 #endif
280
281                 /* XXX maybe we should tag long-branches-methods for recompilation */
282
283                 if (CODEGENDATA_HAS_FLAG_LONGBRANCHES(cd)) {
284                         /* we have to reset the codegendata structure first */
285
286                         codegen_reset(jd);
287
288                         /* and restart the compiler run */
289
290                         if (!codegen_emit(jd))
291                                 return false;
292                 }
293                 else {
294                         os::abort("codegen_generate: unknown error occurred during codegen_emit: flags=%x\n", cd->flags);
295                 }
296
297 #if !defined(NDEBUG)
298         if (compileverbose)
299             log_message_method("Re-generating code done: ", jd->m);
300 #endif
301         }
302
303         /* reallocate the memory and finish the code generation */
304
305         codegen_finish(jd);
306
307         /* everything's ok */
308
309         return true;
310 }
311
312
313 /* codegen_close ***************************************************************
314
315    TODO
316
317 *******************************************************************************/
318
319 void codegen_close(void)
320 {
321         /* TODO: release avl tree on i386 and x86_64 */
322 }
323
324
325 /* codegen_increase ************************************************************
326
327    Doubles code area.
328
329 *******************************************************************************/
330
331 void codegen_increase(codegendata *cd)
332 {
333         u1 *oldmcodebase;
334
335         /* save old mcodebase pointer */
336
337         oldmcodebase = cd->mcodebase;
338
339         /* reallocate to new, doubled memory */
340
341         cd->mcodebase = (u1*) DumpMemory::reallocate(cd->mcodebase,
342                                                                                                  cd->mcodesize,
343                                                                                                  cd->mcodesize * 2);
344         cd->mcodesize *= 2;
345         cd->mcodeend   = cd->mcodebase + cd->mcodesize;
346
347         /* set new mcodeptr */
348
349         cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
350
351 #if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(__M68K__) || defined(ENABLE_INTRP) \
352  || defined(__SPARC_64__)
353         /* adjust the pointer to the last patcher position */
354
355         if (cd->lastmcodeptr != NULL)
356                 cd->lastmcodeptr = cd->mcodebase + (cd->lastmcodeptr - oldmcodebase);
357 #endif
358 }
359
360
361 /* codegen_ncode_increase ******************************************************
362
363    Doubles code area.
364
365 *******************************************************************************/
366
367 #if defined(ENABLE_INTRP)
368 u1 *codegen_ncode_increase(codegendata *cd, u1 *ncodeptr)
369 {
370         u1 *oldncodebase;
371
372         /* save old ncodebase pointer */
373
374         oldncodebase = cd->ncodebase;
375
376         /* reallocate to new, doubled memory */
377
378         cd->ncodebase = DMREALLOC(cd->ncodebase,
379                                                           u1,
380                                                           cd->ncodesize,
381                                                           cd->ncodesize * 2);
382         cd->ncodesize *= 2;
383
384         /* return the new ncodeptr */
385
386         return (cd->ncodebase + (ncodeptr - oldncodebase));
387 }
388 #endif
389
390
391 /* codegen_add_branch_ref ******************************************************
392
393    Prepends an branch to the list.
394
395 *******************************************************************************/
396
397 void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
398 {
399         branchref *br;
400         s4         branchmpc;
401
402         STATISTICS(count_branches_unresolved++);
403
404         /* calculate the mpc of the branch instruction */
405
406         branchmpc = cd->mcodeptr - cd->mcodebase;
407
408         br = (branchref*) DumpMemory::allocate(sizeof(branchref));
409
410         br->branchmpc = branchmpc;
411         br->condition = condition;
412         br->reg       = reg;
413         br->options   = options;
414         br->next      = target->branchrefs;
415
416         target->branchrefs = br;
417 }
418
419
420 /* codegen_resolve_branchrefs **************************************************
421
422    Resolves and patches the branch references of a given basic block.
423
424 *******************************************************************************/
425
426 void codegen_resolve_branchrefs(codegendata *cd, basicblock *bptr)
427 {
428         branchref *br;
429         u1        *mcodeptr;
430
431         /* Save the mcodeptr because in the branch emitting functions
432            we generate code somewhere inside already generated code,
433            but we're still in the actual code generation phase. */
434
435         mcodeptr = cd->mcodeptr;
436
437         /* just to make sure */
438
439         assert(bptr->mpc >= 0);
440
441         for (br = bptr->branchrefs; br != NULL; br = br->next) {
442                 /* temporary set the mcodeptr */
443
444                 cd->mcodeptr = cd->mcodebase + br->branchmpc;
445
446                 /* emit_bccz and emit_branch emit the correct code, even if we
447                    pass condition == BRANCH_UNCONDITIONAL or reg == -1. */
448
449                 emit_bccz(cd, bptr, br->condition, br->reg, br->options);
450         }
451
452         /* restore mcodeptr */
453
454         cd->mcodeptr = mcodeptr;
455 }
456
457
458 /* codegen_branch_label_add ****************************************************
459
460    Append an branch to the label-branch list.
461
462 *******************************************************************************/
463
464 void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
465 {
466         // Calculate the current mpc.
467         int32_t mpc = cd->mcodeptr - cd->mcodebase;
468
469         branch_label_ref_t* br = (branch_label_ref_t*) DumpMemory::allocate(sizeof(branch_label_ref_t));
470
471         br->mpc       = mpc;
472         br->label     = label;
473         br->condition = condition;
474         br->reg       = reg;
475         br->options   = options;
476
477         // Add the branch to the list.
478         cd->brancheslabel->push_back(br);
479 }
480
481
482 /* codegen_set_replacement_point_notrap ****************************************
483
484    Record the position of a non-trappable replacement point.
485
486 *******************************************************************************/
487
488 #if defined(ENABLE_REPLACEMENT)
489 #if !defined(NDEBUG)
490 void codegen_set_replacement_point_notrap(codegendata *cd, s4 type)
491 #else
492 void codegen_set_replacement_point_notrap(codegendata *cd)
493 #endif
494 {
495         assert(cd->replacementpoint);
496         assert(cd->replacementpoint->type == type);
497         assert(cd->replacementpoint->flags & RPLPOINT_FLAG_NOTRAP);
498
499         cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
500
501         cd->replacementpoint++;
502 }
503 #endif /* defined(ENABLE_REPLACEMENT) */
504
505
506 /* codegen_set_replacement_point ***********************************************
507
508    Record the position of a trappable replacement point.
509
510 *******************************************************************************/
511
512 #if defined(ENABLE_REPLACEMENT)
513 #if !defined(NDEBUG)
514 void codegen_set_replacement_point(codegendata *cd, s4 type)
515 #else
516 void codegen_set_replacement_point(codegendata *cd)
517 #endif
518 {
519         assert(cd->replacementpoint);
520         assert(cd->replacementpoint->type == type);
521         assert(!(cd->replacementpoint->flags & RPLPOINT_FLAG_NOTRAP));
522
523         cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
524
525         cd->replacementpoint++;
526
527 #if !defined(NDEBUG)
528         /* XXX actually we should use an own REPLACEMENT_NOPS here! */
529         if (opt_TestReplacement)
530                 PATCHER_NOPS;
531 #endif
532
533         /* XXX assert(cd->lastmcodeptr <= cd->mcodeptr); */
534
535         cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
536 }
537 #endif /* defined(ENABLE_REPLACEMENT) */
538
539
540 /* codegen_finish **************************************************************
541
542    Finishes the code generation. A new memory, large enough for both
543    data and code, is allocated and data and code are copied together
544    to their final layout, unresolved jumps are resolved, ...
545
546 *******************************************************************************/
547
548 void codegen_finish(jitdata *jd)
549 {
550         s4       mcodelen;
551 #if defined(ENABLE_INTRP)
552         s4       ncodelen;
553 #endif
554         s4       alignedmcodelen;
555         jumpref *jr;
556         u1      *epoint;
557         s4       alignedlen;
558
559         /* Get required compiler data. */
560
561         codeinfo*     code = jd->code;
562         codegendata*  cd   = jd->cd;
563         registerdata* rd   = jd->rd;
564
565         /* prevent compiler warning */
566
567 #if defined(ENABLE_INTRP)
568         ncodelen = 0;
569 #endif
570
571         /* calculate the code length */
572
573         mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
574
575 #if defined(ENABLE_STATISTICS)
576         if (opt_stat) {
577                 count_code_len += mcodelen;
578                 count_data_len += cd->dseglen;
579         }
580 #endif
581
582         alignedmcodelen = MEMORY_ALIGN(mcodelen, MAX_ALIGN);
583
584 #if defined(ENABLE_INTRP)
585         if (opt_intrp)
586                 ncodelen = cd->ncodeptr - cd->ncodebase;
587         else {
588                 ncodelen = 0; /* avoid compiler warning */
589         }
590 #endif
591
592         cd->dseglen = MEMORY_ALIGN(cd->dseglen, MAX_ALIGN);
593         alignedlen = alignedmcodelen + cd->dseglen;
594
595 #if defined(ENABLE_INTRP)
596         if (opt_intrp) {
597                 alignedlen += ncodelen;
598         }
599 #endif
600
601         /* allocate new memory */
602
603         code->mcodelength = mcodelen + cd->dseglen;
604         code->mcode       = CNEW(u1, alignedlen);
605
606         /* set the entrypoint of the method */
607         
608         assert(code->entrypoint == NULL);
609         code->entrypoint = epoint = (code->mcode + cd->dseglen);
610
611         /* fill the data segment (code->entrypoint must already be set!) */
612
613         dseg_finish(jd);
614
615         /* copy code to the new location */
616
617         MCOPY((void *) code->entrypoint, cd->mcodebase, u1, mcodelen);
618
619 #if defined(ENABLE_INTRP)
620         /* relocate native dynamic superinstruction code (if any) */
621
622         if (opt_intrp) {
623                 cd->mcodebase = code->entrypoint;
624
625                 if (ncodelen > 0) {
626                         u1 *ncodebase = code->mcode + cd->dseglen + alignedmcodelen;
627
628                         MCOPY((void *) ncodebase, cd->ncodebase, u1, ncodelen);
629
630                         /* flush the instruction and data caches */
631
632                         md_cacheflush(ncodebase, ncodelen);
633
634                         /* set some cd variables for dynamic_super_rerwite */
635
636                         cd->ncodebase = ncodebase;
637
638                 } else {
639                         cd->ncodebase = NULL;
640                 }
641
642                 dynamic_super_rewrite(cd);
643         }
644 #endif
645
646         /* Fill runtime information about generated code. */
647
648         code->stackframesize     = cd->stackframesize;
649         code->synchronizedoffset = rd->memuse * 8;
650         code->savedintcount      = INT_SAV_CNT - rd->savintreguse;
651         code->savedfltcount      = FLT_SAV_CNT - rd->savfltreguse;
652 #if defined(HAS_ADDRESS_REGISTER_FILE)
653         code->savedadrcount      = ADR_SAV_CNT - rd->savadrreguse;
654 #endif
655
656         /* Create the exception table. */
657
658         exceptiontable_create(jd);
659
660         /* Create the linenumber table. */
661
662         code->linenumbertable = new LinenumberTable(jd);
663
664         /* jump table resolving */
665
666         for (jr = cd->jumpreferences; jr != NULL; jr = jr->next)
667                 *((functionptr *) ((ptrint) epoint + jr->tablepos)) =
668                         (functionptr) ((ptrint) epoint + (ptrint) jr->target->mpc);
669
670         /* patcher resolving */
671
672         patcher_resolve(jd);
673
674 #if defined(ENABLE_REPLACEMENT)
675         /* replacement point resolving */
676         {
677                 int i;
678                 rplpoint *rp;
679
680                 rp = code->rplpoints;
681                 for (i=0; i<code->rplpointcount; ++i, ++rp) {
682                         rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
683                 }
684         }
685 #endif /* defined(ENABLE_REPLACEMENT) */
686
687         /* Insert method into methodtree to find the entrypoint. */
688
689         methodtree_insert(code->entrypoint, code->entrypoint + mcodelen);
690
691 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(__M68K__) || defined(ENABLE_INTRP)
692         /* resolve data segment references */
693
694         dseg_resolve_datareferences(jd);
695 #endif
696
697         /* flush the instruction and data caches */
698
699         md_cacheflush(code->mcode, code->mcodelength);
700 }
701
702
703 /* codegen_start_native_call ***************************************************
704
705    Prepares the stuff required for a native (JNI) function call:
706
707    - adds a stackframe info structure to the chain, for stacktraces
708    - prepares the local references table on the stack
709
710    The layout of the native stub stackframe should look like this:
711
712    +---------------------------+ <- java SP (of parent Java function)
713    | return address            |
714    +---------------------------+ <- data SP
715    |                           |
716    | stackframe info structure |
717    |                           |
718    +---------------------------+
719    |                           |
720    | local references table    |
721    |                           |
722    +---------------------------+
723    |                           |
724    | saved registers (if any)  |
725    |                           |
726    +---------------------------+
727    |                           |
728    | arguments (if any)        |
729    |                           |
730    +---------------------------+ <- current SP (native stub)
731
732 *******************************************************************************/
733
734 java_handle_t *codegen_start_native_call(u1 *sp, u1 *pv)
735 {
736         stackframeinfo_t *sfi;
737         localref_table   *lrt;
738         codeinfo         *code;
739         methodinfo       *m;
740         int32_t           framesize;
741
742         uint8_t  *datasp;
743         uint8_t  *javasp;
744         uint64_t *arg_regs;
745         uint64_t *arg_stack;
746
747         STATISTICS(count_calls_java_to_native++);
748
749         // Get information from method header.
750         code = code_get_codeinfo_for_pv(pv);
751         assert(code != NULL);
752
753         framesize = md_stacktrace_get_framesize(code);
754         assert(framesize >= (int32_t) (sizeof(stackframeinfo_t) + sizeof(localref_table)));
755
756         // Get the methodinfo.
757         m = code_get_methodinfo_for_pv(pv);
758         assert(m);
759
760         /* calculate needed values */
761
762 #if defined(__ALPHA__) || defined(__ARM__)
763         datasp    = sp + framesize - SIZEOF_VOID_P;
764         javasp    = sp + framesize;
765         arg_regs  = (uint64_t *) sp;
766         arg_stack = (uint64_t *) javasp;
767 #elif defined(__MIPS__)
768         /* MIPS always uses 8 bytes to store the RA */
769         datasp    = sp + framesize - 8;
770         javasp    = sp + framesize;
771 # if SIZEOF_VOID_P == 8
772         arg_regs  = (uint64_t *) sp;
773 # else
774         arg_regs  = (uint64_t *) (sp + 5 * 8);
775 # endif
776         arg_stack = (uint64_t *) javasp;
777 #elif defined(__S390__)
778         datasp    = sp + framesize - 8;
779         javasp    = sp + framesize;
780         arg_regs  = (uint64_t *) (sp + 96);
781         arg_stack = (uint64_t *) javasp;
782 #elif defined(__I386__) || defined(__M68K__) || defined(__X86_64__)
783         datasp    = sp + framesize;
784         javasp    = sp + framesize + SIZEOF_VOID_P;
785         arg_regs  = (uint64_t *) sp;
786         arg_stack = (uint64_t *) javasp;
787 #elif defined(__POWERPC__)
788         datasp    = sp + framesize;
789         javasp    = sp + framesize;
790         arg_regs  = (uint64_t *) (sp + LA_SIZE + 4 * SIZEOF_VOID_P);
791         arg_stack = (uint64_t *) javasp;
792 #elif defined(__POWERPC64__)
793         datasp    = sp + framesize;
794         javasp    = sp + framesize;
795         arg_regs  = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 4 * SIZEOF_VOID_P);
796         arg_stack = (uint64_t *) javasp;
797 #else
798         /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
799         /* XXX maybe we need to pass the RA as argument there */
800         os::abort("codegen_start_native_call: unsupported architecture");
801 #endif
802
803         /* get data structures from stack */
804
805         sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
806         lrt = (localref_table *)   (datasp - sizeof(stackframeinfo_t) - 
807                                                                 sizeof(localref_table));
808
809 #if defined(ENABLE_JNI)
810         /* add current JNI local references table to this thread */
811
812         localref_table_add(lrt);
813 #endif
814
815 #if !defined(NDEBUG)
816 # if defined(__ALPHA__) || defined(__I386__) || defined(__M68K__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
817         /* print the call-trace if necesarry */
818         /* BEFORE: filling the local reference table */
819
820         if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
821                 trace_java_call_enter(m, arg_regs, arg_stack);
822 # endif
823 #endif
824
825 #if defined(ENABLE_HANDLES)
826         /* place all references into the local reference table */
827         /* BEFORE: creating stackframeinfo */
828
829         localref_native_enter(m, arg_regs, arg_stack);
830 #endif
831
832         /* Add a stackframeinfo for this native method.  We don't have RA
833            and XPC here.  These are determined in
834            stacktrace_stackframeinfo_add. */
835
836         stacktrace_stackframeinfo_add(sfi, pv, sp, NULL, NULL);
837
838         /* Return a wrapped classinfo for static methods. */
839
840         if (m->flags & ACC_STATIC)
841                 return (java_handle_t *) LLNI_classinfo_wrap(m->clazz);
842         else
843                 return NULL;
844 }
845
846
847 /* codegen_finish_native_call **************************************************
848
849    Removes the stuff required for a native (JNI) function call.
850    Additionally it checks for an exceptions and in case, get the
851    exception object and clear the pointer.
852
853 *******************************************************************************/
854
855 java_object_t *codegen_finish_native_call(u1 *sp, u1 *pv)
856 {
857         stackframeinfo_t *sfi;
858         java_handle_t    *e;
859         java_object_t    *o;
860         codeinfo         *code;
861         methodinfo       *m;
862         int32_t           framesize;
863
864         uint8_t  *datasp;
865         uint64_t *ret_regs;
866
867         // Get information from method header.
868         code = code_get_codeinfo_for_pv(pv);
869         assert(code != NULL);
870
871         framesize = md_stacktrace_get_framesize(code);
872
873         // Get the methodinfo.
874         m = code->m;
875         assert(m != NULL);
876
877         /* calculate needed values */
878
879 #if defined(__ALPHA__) || defined(__ARM__)
880         datasp   = sp + framesize - SIZEOF_VOID_P;
881         ret_regs = (uint64_t *) sp;
882 #elif defined(__MIPS__)
883         /* MIPS always uses 8 bytes to store the RA */
884         datasp   = sp + framesize - 8;
885 # if SIZEOF_VOID_P == 8
886         ret_regs = (uint64_t *) sp;
887 # else
888         ret_regs = (uint64_t *) (sp + 1 * 8);
889 # endif
890 #elif defined(__S390__)
891         datasp   = sp + framesize - 8;
892         ret_regs = (uint64_t *) (sp + 96);
893 #elif defined(__I386__)
894         datasp   = sp + framesize;
895         ret_regs = (uint64_t *) (sp + 2 * SIZEOF_VOID_P);
896 #elif defined(__M68K__)
897         datasp   = sp + framesize;
898         ret_regs = (uint64_t *) (sp + 2 * 8);
899 #elif defined(__X86_64__)
900         datasp   = sp + framesize;
901         ret_regs = (uint64_t *) sp;
902 #elif defined(__POWERPC__)
903         datasp   = sp + framesize;
904         ret_regs = (uint64_t *) (sp + LA_SIZE + 2 * SIZEOF_VOID_P);
905 #elif defined(__POWERPC64__)
906         datasp   = sp + framesize;
907         ret_regs = (uint64_t *) (sp + PA_SIZE + LA_SIZE + 2 * SIZEOF_VOID_P);
908 #else
909         os::abort("codegen_finish_native_call: unsupported architecture");
910 #endif
911
912         /* get data structures from stack */
913
914         sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
915
916         /* Remove current stackframeinfo from chain. */
917
918         stacktrace_stackframeinfo_remove(sfi);
919
920 #if defined(ENABLE_HANDLES)
921         /* unwrap the return value from the local reference table */
922         /* AFTER: removing the stackframeinfo */
923         /* BEFORE: releasing the local reference table */
924
925         localref_native_exit(m, ret_regs);
926 #endif
927
928         /* get and unwrap the exception */
929         /* AFTER: removing the stackframe info */
930         /* BEFORE: releasing the local reference table */
931
932         e = exceptions_get_and_clear_exception();
933         o = LLNI_UNWRAP(e);
934
935 #if defined(ENABLE_JNI)
936         /* release JNI local references table for this thread */
937
938         localref_frame_pop_all();
939         localref_table_remove();
940 #endif
941
942 #if !defined(NDEBUG)
943 # if defined(__ALPHA__) || defined(__I386__) || defined(__M68K__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
944         /* print the call-trace if necesarry */
945         /* AFTER: unwrapping the return value */
946
947         if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
948                 trace_java_call_exit(m, ret_regs);
949 # endif
950 #endif
951
952         return o;
953 }
954
955
956 /* codegen_reg_of_var **********************************************************
957
958    This function determines a register, to which the result of an
959    operation should go, when it is ultimatively intended to store the
960    result in pseudoregister v.  If v is assigned to an actual
961    register, this register will be returned.  Otherwise (when v is
962    spilled) this function returns tempregnum.  If not already done,
963    regoff and flags are set in the stack location.
964
965 *******************************************************************************/
966
967 s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
968 {
969         if (!(v->flags & INMEMORY))
970                 return v->vv.regoff;
971
972         return tempregnum;
973 }
974
975
976 /* codegen_reg_of_dst **********************************************************
977
978    This function determines a register, to which the result of an
979    operation should go, when it is ultimatively intended to store the
980    result in iptr->dst.var.  If dst.var is assigned to an actual
981    register, this register will be returned.  Otherwise (when it is
982    spilled) this function returns tempregnum.  If not already done,
983    regoff and flags are set in the stack location.
984
985 *******************************************************************************/
986
987 s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
988 {
989         return codegen_reg_of_var(iptr->opc, VAROP(iptr->dst), tempregnum);
990 }
991
992 /**
993  * Fix up register locations in the case where control is transferred to an
994  * exception handler block via normal control flow (no exception).
995  */
996 static void fixup_exc_handler_interface(jitdata *jd, basicblock *bptr)
997 {
998         // Exception handlers have exactly 1 in-slot
999         assert(bptr->indepth == 1);
1000         varinfo *var = VAR(bptr->invars[0]);
1001         int32_t d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1002         emit_load(jd, NULL, var, d);
1003         // Copy the interface variable to ITMP1 (XPTR) because that's where
1004         // the handler expects it.
1005         emit_imove(jd->cd, d, REG_ITMP1_XPTR);
1006 }
1007
1008 /**
1009  * Generates machine code.
1010  */
1011 bool codegen_emit(jitdata *jd)
1012 {
1013         varinfo*            var;
1014         builtintable_entry* bte = 0;
1015         methoddesc*         md;
1016         int32_t             s1, s2, /*s3,*/ d;
1017         int32_t             fieldtype;
1018         int32_t             disp;
1019         int                 i;
1020
1021         // Get required compiler data.
1022         //methodinfo*   m    = jd->m;
1023         codeinfo*     code = jd->code;
1024         codegendata*  cd   = jd->cd;
1025         registerdata* rd   = jd->rd;
1026 #if defined(ENABLE_SSA)
1027         lsradata*     ls   = jd->ls;
1028         bool last_cmd_was_goto = false;
1029 #endif
1030
1031         // Space to save used callee saved registers.
1032         int32_t savedregs_num = 0;
1033         savedregs_num += (INT_SAV_CNT - rd->savintreguse);
1034         savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
1035 #ifdef HAS_ADDRESS_REGISTER_FILE
1036         savedregs_num += (ADR_SAV_CNT - rd->savadrreguse);
1037 #endif
1038
1039         // Calculate size of stackframe.
1040         cd->stackframesize = rd->memuse + savedregs_num;
1041
1042         // Space to save the return address.
1043 #if STACKFRAME_RA_TOP_OF_FRAME
1044 # if STACKFRAME_LEAFMETHODS_RA_REGISTER
1045         if (!code_is_leafmethod(code))
1046 # endif
1047                 cd->stackframesize += 1;
1048 #endif
1049
1050         // Space to save argument of monitor_enter.
1051 #if defined(ENABLE_THREADS)
1052         if (checksync && code_is_synchronized(code))
1053 # if STACKFRAME_SYNC_NEEDS_TWO_SLOTS
1054                 /* On some architectures the stack position for the argument can
1055                    not be shared with place to save the return register values to
1056                    survive monitor_exit since both values reside in the same register. */
1057                 cd->stackframesize += 2;
1058 # else
1059                 cd->stackframesize += 1;
1060 # endif
1061 #endif
1062
1063         // Keep stack of non-leaf functions 16-byte aligned for calls into
1064         // native code.
1065         if (!code_is_leafmethod(code) || JITDATA_HAS_FLAG_VERBOSECALL(jd))
1066 #if STACKFRMAE_RA_BETWEEN_FRAMES
1067                 ALIGN_ODD(cd->stackframesize);
1068 #else
1069                 ALIGN_EVEN(cd->stackframesize);
1070 #endif
1071
1072 #if defined(SPECIALMEMUSE)
1073         // On architectures having a linkage area, we can get rid of the whole
1074         // stackframe in leaf functions without saved registers.
1075         if (code_is_leafmethod(code) && (cd->stackframesize == LA_SIZE_IN_POINTERS))
1076                 cd->stackframesize = 0;
1077 #endif
1078
1079         /*
1080          * SECTION 1: Method header generation.
1081          */
1082
1083         // The method header was reduced to the bare minimum of one pointer
1084         // to the codeinfo structure, which in turn contains all runtime
1085         // information. However this section together with the methodheader.h
1086         // file will be kept alive for historical reasons. It might come in
1087         // handy at some point.
1088
1089         (void) dseg_add_unique_address(cd, code);   ///< CodeinfoPointer
1090
1091         // XXX, REMOVEME: We still need it for exception handling in assembler.
1092         // XXX ARM, M68K: (void) dseg_add_unique_s4(cd, cd->stackframesize);
1093 #if defined(__I386__)
1094         int align_off = (cd->stackframesize != 0) ? 4 : 0;
1095         (void) dseg_add_unique_s4(cd, cd->stackframesize * 8 + align_off); /* FrameSize       */
1096 #else
1097         (void) dseg_add_unique_s4(cd, cd->stackframesize * 8); /* FrameSize       */
1098 #endif
1099         // XXX M68K: We use the IntSave as a split field for the adr now
1100         //           (void) dseg_add_unique_s4(cd, (ADR_SAV_CNT - rd->savadrreguse) << 16 | (INT_SAV_CNT - rd->savintreguse)); /* IntSave */
1101         (void) dseg_add_unique_s4(cd, code_is_leafmethod(code) ? 1 : 0);
1102         (void) dseg_add_unique_s4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
1103         (void) dseg_add_unique_s4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
1104
1105         /*
1106          * SECTION 2: Method prolog generation.
1107          */
1108
1109 #if defined(ENABLE_PROFILING)
1110         // Generate method profiling code.
1111         if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1112
1113                 // Count method frequency.
1114                 emit_profile_method(cd, code);
1115
1116                 // Start CPU cycle counting.
1117                 emit_profile_cycle_start(cd, code);
1118         }
1119 #endif
1120
1121         // Emit code for the method prolog.
1122         codegen_emit_prolog(jd);
1123
1124 #if defined(ENABLE_THREADS)
1125         // Emit code to call monitorenter function.
1126         if (checksync && code_is_synchronized(code))
1127                 emit_monitor_enter(jd, rd->memuse * 8);
1128 #endif
1129
1130 #if !defined(NDEBUG)
1131         // Call trace function.
1132         if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
1133                 emit_verbosecall_enter(jd);
1134 #endif
1135
1136 #if defined(ENABLE_SSA)
1137         // With SSA the header is basicblock 0, insert phi moves if necessary.
1138         if (ls != NULL)
1139                 codegen_emit_phi_moves(jd, ls->basicblocks[0]);
1140 #endif
1141
1142         // Create replacement points.
1143         REPLACEMENT_POINTS_INIT(cd, jd);
1144
1145         /*
1146          * SECTION 3: ICMD code generation.
1147          */
1148
1149         // Walk through all basic blocks.
1150         for (basicblock* bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
1151
1152                 bptr->mpc = (s4) (cd->mcodeptr - cd->mcodebase);
1153
1154                 // Is this basic block reached?
1155                 if (bptr->flags < BBREACHED)
1156                         continue;
1157
1158                 // Branch resolving.
1159                 codegen_resolve_branchrefs(cd, bptr);
1160
1161                 // Handle replacement points.
1162                 REPLACEMENT_POINT_BLOCK_START(cd, bptr);
1163
1164 #if defined(ENABLE_REPLACEMENT) && defined(__I386__)
1165                 // Generate countdown trap code.
1166                 methodinfo* m = jd->m;
1167                 if (bptr->bitflags & BBFLAG_REPLACEMENT) {
1168                         if (cd->replacementpoint[-1].flags & RPLPOINT_FLAG_COUNTDOWN) {
1169                                 MCODECHECK(32);
1170                                 emit_trap_countdown(cd, &(m->hitcountdown));
1171                         }
1172                 }
1173 #endif
1174
1175 #if defined(ENABLE_PROFILING)
1176                 // Generate basicblock profiling code.
1177                 if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1178
1179                         // Count basicblock frequency.
1180                         emit_profile_basicblock(cd, code, bptr);
1181
1182                         // If this is an exception handler, start profiling again.
1183                         if (bptr->type == BBTYPE_EXH)
1184                                 emit_profile_cycle_start(cd, code);
1185                 }
1186 #endif
1187
1188                 // Copy interface registers to their destination.
1189                 int32_t indepth = bptr->indepth;
1190                 // XXX Check if this is true for all archs.
1191                 MCODECHECK(64+indepth);   // All
1192                 MCODECHECK(128+indepth);  // PPC64
1193                 MCODECHECK(512);          // I386, X86_64, S390
1194 #if defined(ENABLE_SSA)
1195                 // XXX Check if this is correct and add a propper comment!
1196                 if (ls != NULL) {
1197                         last_cmd_was_goto = false;
1198                 } else {
1199 #elif defined(ENABLE_LSRA)
1200                 if (opt_lsra) {
1201                         while (indepth > 0) {
1202                                 indepth--;
1203                                 var = VAR(bptr->invars[indepth]);
1204                                 if ((indepth == bptr->indepth-1) && (bptr->type == BBTYPE_EXH)) {
1205                                         if (!IS_INMEMORY(src->flags))
1206                                                 d = var->vv.regoff;
1207                                         else
1208                                                 d = REG_ITMP1_XPTR;
1209                                         // XXX M68K: Actually this is M_ADRMOVE(REG_ATMP1_XPTR, d);
1210                                         // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1211                                         // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1212                                         emit_imove(cd, REG_ITMP1_XPTR, d);
1213                                         emit_store(jd, NULL, var, d);
1214                                 }
1215                         }
1216                 } else {
1217 #endif
1218                         while (indepth > 0) {
1219                                 indepth--;
1220                                 var = VAR(bptr->invars[indepth]);
1221                                 if ((indepth == bptr->indepth-1) && (bptr->type == BBTYPE_EXH)) {
1222                                         d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1223                                         // XXX M68K: Actually this is M_ADRMOVE(REG_ATMP1_XPTR, d);
1224                                         // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1225                                         // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1226                                         emit_imove(cd, REG_ITMP1_XPTR, d);
1227                                         emit_store(jd, NULL, var, d);
1228                                 }
1229                                 else {
1230                                         assert((var->flags & INOUT));
1231                                 }
1232                         }
1233 #if defined(ENABLE_SSA) || defined(ENABLE_LSRA)
1234                 }
1235 #endif
1236
1237                 // Walk through all instructions.
1238                 int32_t len = bptr->icount;
1239                 uint16_t currentline = 0;
1240                 for (instruction* iptr = bptr->iinstr; len > 0; len--, iptr++) {
1241
1242                         // Add line number.
1243                         if (iptr->line != currentline) {
1244                                 linenumbertable_list_entry_add(cd, iptr->line);
1245                                 currentline = iptr->line;
1246                         }
1247
1248                         // An instruction usually needs < 64 words.
1249                         // XXX Check if this is true for all archs.
1250                         MCODECHECK(64);    // All
1251                         MCODECHECK(128);   // PPC64
1252                         MCODECHECK(1024);  // I386, X86_64, M68K, S390      /* 1kB should be enough */
1253
1254                         // The big switch.
1255                         switch (iptr->opc) {
1256
1257                         case ICMD_NOP:        /* ...  ==> ...                             */
1258                         case ICMD_POP:        /* ..., value  ==> ...                      */
1259                         case ICMD_POP2:       /* ..., value, value  ==> ...               */
1260                                 break;
1261
1262                         case ICMD_CHECKNULL:  /* ..., objectref  ==> ..., objectref       */
1263
1264                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1265                                 emit_nullpointer_check(cd, iptr, s1);
1266                                 break;
1267
1268                         case ICMD_BREAKPOINT: /* ...  ==> ...                             */
1269                                               /* sx.val.anyptr = Breakpoint               */
1270
1271                                 patcher_add_patch_ref(jd, PATCHER_breakpoint, iptr->sx.val.anyptr, 0);
1272                                 PATCHER_NOPS;
1273                                 break;
1274
1275 #if defined(ENABLE_SSA)
1276                         case ICMD_GETEXCEPTION:
1277
1278                                 d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1279                                 emit_imove(cd, REG_ITMP1, d);
1280                                 emit_store_dst(jd, iptr, d);
1281                                 break;
1282 #endif
1283
1284                         /* inline operations **********************************************/
1285
1286                         case ICMD_INLINE_START:
1287
1288                                 REPLACEMENT_POINT_INLINE_START(cd, iptr);
1289                                 break;
1290
1291                         case ICMD_INLINE_BODY:
1292
1293                                 REPLACEMENT_POINT_INLINE_BODY(cd, iptr);
1294                                 linenumbertable_list_entry_add_inline_start(cd, iptr);
1295                                 linenumbertable_list_entry_add(cd, iptr->line);
1296                                 break;
1297
1298                         case ICMD_INLINE_END:
1299
1300                                 linenumbertable_list_entry_add_inline_end(cd, iptr);
1301                                 linenumbertable_list_entry_add(cd, iptr->line);
1302                                 break;
1303
1304
1305                         /* constant operations ********************************************/
1306
1307                         case ICMD_ICONST:     /* ...  ==> ..., constant                   */
1308
1309                                 d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1310                                 ICONST(d, iptr->sx.val.i);
1311                                 emit_store_dst(jd, iptr, d);
1312                                 break;
1313
1314                         case ICMD_LCONST:     /* ...  ==> ..., constant                   */
1315
1316                                 d = codegen_reg_of_dst(jd, iptr, REG_LTMP12);
1317                                 LCONST(d, iptr->sx.val.l);
1318                                 emit_store_dst(jd, iptr, d);
1319                                 break;
1320
1321
1322                         /* load/store/copy/move operations ********************************/
1323
1324                         case ICMD_COPY:
1325                         case ICMD_MOVE:
1326                         case ICMD_ILOAD:      /* ...  ==> ..., content of local variable  */
1327                         case ICMD_LLOAD:      /* s1 = local variable                      */
1328                         case ICMD_FLOAD:
1329                         case ICMD_DLOAD:
1330                         case ICMD_ALOAD:
1331                         case ICMD_ISTORE:     /* ..., value  ==> ...                      */
1332                         case ICMD_LSTORE:
1333                         case ICMD_FSTORE:
1334                         case ICMD_DSTORE:
1335
1336                                 emit_copy(jd, iptr);
1337                                 break;
1338
1339                         case ICMD_ASTORE:
1340
1341                                 if (!(iptr->flags.bits & INS_FLAG_RETADDR))
1342                                         emit_copy(jd, iptr);
1343                                 break;
1344
1345
1346                         /* integer operations *********************************************/
1347
1348                         case ICMD_FCONST:     /* ...  ==> ..., constant                   */
1349                         case ICMD_DCONST:     /* ...  ==> ..., constant                   */
1350                         case ICMD_ACONST:     /* ...  ==> ..., constant                   */
1351                         case ICMD_INEG:       /* ..., value  ==> ..., - value             */
1352                         case ICMD_LNEG:       /* ..., value  ==> ..., - value             */
1353                         case ICMD_I2L:        /* ..., value  ==> ..., value               */
1354                         case ICMD_L2I:        /* ..., value  ==> ..., value               */
1355                         case ICMD_INT2BYTE:   /* ..., value  ==> ..., value               */
1356                         case ICMD_INT2CHAR:   /* ..., value  ==> ..., value               */
1357                         case ICMD_INT2SHORT:  /* ..., value  ==> ..., value               */
1358                         case ICMD_IADD:       /* ..., val1, val2  ==> ..., val1 + val2    */
1359                         case ICMD_IINC:
1360                         case ICMD_IADDCONST:  /* ..., value  ==> ..., value + constant    */
1361                                               /* sx.val.i = constant                      */
1362                         case ICMD_LADD:       /* ..., val1, val2  ==> ..., val1 + val2    */
1363                         case ICMD_LADDCONST:  /* ..., value  ==> ..., value + constant    */
1364                                               /* sx.val.l = constant                      */
1365                         case ICMD_ISUB:       /* ..., val1, val2  ==> ..., val1 - val2    */
1366                         case ICMD_ISUBCONST:  /* ..., value  ==> ..., value + constant    */
1367                                               /* sx.val.i = constant                      */
1368                         case ICMD_LSUB:       /* ..., val1, val2  ==> ..., val1 - val2    */
1369                         case ICMD_LSUBCONST:  /* ..., value  ==> ..., value - constant    */
1370                                               /* sx.val.l = constant                      */
1371                         case ICMD_IMUL:       /* ..., val1, val2  ==> ..., val1 * val2    */
1372                         case ICMD_IMULCONST:  /* ..., value  ==> ..., value * constant    */
1373                                               /* sx.val.i = constant                      */
1374                         case ICMD_IMULPOW2:   /* ..., value  ==> ..., value * (2 ^ constant) */
1375                                               /* sx.val.i = constant                      */
1376                         case ICMD_LMUL:       /* ..., val1, val2  ==> ..., val1 * val2    */
1377                         case ICMD_LMULCONST:  /* ..., value  ==> ..., value * constant    */
1378                                               /* sx.val.l = constant                      */
1379                         case ICMD_LMULPOW2:   /* ..., value  ==> ..., value * (2 ^ constant) */
1380                                               /* sx.val.l = constant                      */
1381                         case ICMD_IDIV:       /* ..., val1, val2  ==> ..., val1 / val2    */
1382                         case ICMD_IREM:       /* ..., val1, val2  ==> ..., val1 % val2    */
1383                         case ICMD_IDIVPOW2:   /* ..., value  ==> ..., value >> constant   */
1384                                               /* sx.val.i = constant                      */
1385                         case ICMD_IREMPOW2:   /* ..., value  ==> ..., value % constant    */
1386                                               /* sx.val.i = constant                      */
1387                         case ICMD_LDIV:       /* ..., val1, val2  ==> ..., val1 / val2    */
1388                         case ICMD_LREM:       /* ..., val1, val2  ==> ..., val1 % val2    */
1389                         case ICMD_LDIVPOW2:   /* ..., value  ==> ..., value >> constant   */
1390                                               /* sx.val.i = constant                      */
1391                         case ICMD_LREMPOW2:   /* ..., value  ==> ..., value % constant    */
1392                                               /* sx.val.l = constant                      */
1393                         case ICMD_ISHL:       /* ..., val1, val2  ==> ..., val1 << val2   */
1394                         case ICMD_ISHLCONST:  /* ..., value  ==> ..., value << constant   */
1395                                               /* sx.val.i = constant                      */
1396                         case ICMD_ISHR:       /* ..., val1, val2  ==> ..., val1 >> val2   */
1397                         case ICMD_ISHRCONST:  /* ..., value  ==> ..., value >> constant   */
1398                                               /* sx.val.i = constant                      */
1399                         case ICMD_IUSHR:      /* ..., val1, val2  ==> ..., val1 >>> val2  */
1400                         case ICMD_IUSHRCONST: /* ..., value  ==> ..., value >>> constant  */
1401                                               /* sx.val.i = constant                      */
1402                         case ICMD_LSHL:       /* ..., val1, val2  ==> ..., val1 << val2   */
1403                         case ICMD_LSHLCONST:  /* ..., value  ==> ..., value << constant   */
1404                                               /* sx.val.i = constant                      */
1405                         case ICMD_LSHR:       /* ..., val1, val2  ==> ..., val1 >> val2   */
1406                         case ICMD_LSHRCONST:  /* ..., value  ==> ..., value >> constant   */
1407                                               /* sx.val.i = constant                      */
1408                         case ICMD_LUSHR:      /* ..., val1, val2  ==> ..., val1 >>> val2  */
1409                         case ICMD_LUSHRCONST: /* ..., value  ==> ..., value >>> constant  */
1410                                               /* sx.val.l = constant                      */
1411                         case ICMD_IAND:       /* ..., val1, val2  ==> ..., val1 & val2    */
1412                         case ICMD_IANDCONST:  /* ..., value  ==> ..., value & constant    */
1413                                               /* sx.val.i = constant                      */
1414                         case ICMD_LAND:       /* ..., val1, val2  ==> ..., val1 & val2    */
1415                         case ICMD_LANDCONST:  /* ..., value  ==> ..., value & constant    */
1416                                               /* sx.val.l = constant                      */
1417                         case ICMD_IOR:        /* ..., val1, val2  ==> ..., val1 | val2    */
1418                         case ICMD_IORCONST:   /* ..., value  ==> ..., value | constant    */
1419                                               /* sx.val.i = constant                      */
1420                         case ICMD_LOR:        /* ..., val1, val2  ==> ..., val1 | val2    */
1421                         case ICMD_LORCONST:   /* ..., value  ==> ..., value | constant    */
1422                                               /* sx.val.l = constant                      */
1423                         case ICMD_IXOR:       /* ..., val1, val2  ==> ..., val1 ^ val2    */
1424                         case ICMD_IXORCONST:  /* ..., value  ==> ..., value ^ constant    */
1425                                               /* sx.val.i = constant                      */
1426                         case ICMD_LXOR:       /* ..., val1, val2  ==> ..., val1 ^ val2    */
1427                         case ICMD_LXORCONST:  /* ..., value  ==> ..., value ^ constant    */
1428                                               /* sx.val.l = constant                      */
1429
1430                                 // Generate architecture specific instructions.
1431                                 codegen_emit_instruction(jd, iptr);
1432                                 break;
1433
1434
1435                         /* floating operations ********************************************/
1436
1437 #if !defined(ENABLE_SOFTFLOAT)
1438                         case ICMD_FNEG:       /* ..., value  ==> ..., - value             */
1439                         case ICMD_DNEG:
1440                         case ICMD_FADD:       /* ..., val1, val2  ==> ..., val1 + val2    */
1441                         case ICMD_DADD:
1442                         case ICMD_FSUB:       /* ..., val1, val2  ==> ..., val1 - val2    */
1443                         case ICMD_DSUB:
1444                         case ICMD_FMUL:       /* ..., val1, val2  ==> ..., val1 * val2    */
1445                         case ICMD_DMUL:
1446                         case ICMD_FDIV:       /* ..., val1, val2  ==> ..., val1 / val2    */
1447                         case ICMD_DDIV:
1448                         case ICMD_FREM:       /* ..., val1, val2  ==> ..., val1 % val2        */
1449                         case ICMD_DREM:
1450                         case ICMD_I2F:        /* ..., value  ==> ..., (float) value       */
1451                         case ICMD_I2D:        /* ..., value  ==> ..., (double) value      */
1452                         case ICMD_L2F:        /* ..., value  ==> ..., (float) value       */
1453                         case ICMD_L2D:        /* ..., value  ==> ..., (double) value      */
1454                         case ICMD_F2I:        /* ..., value  ==> ..., (int) value         */
1455                         case ICMD_D2I:
1456                         case ICMD_F2L:        /* ..., value  ==> ..., (long) value        */
1457                         case ICMD_D2L:
1458                         case ICMD_F2D:        /* ..., value  ==> ..., (double) value      */
1459                         case ICMD_D2F:        /* ..., value  ==> ..., (float) value       */
1460                         case ICMD_FCMPL:      /* ..., val1, val2  ==> ..., val1 fcmpg val2 */
1461                         case ICMD_DCMPL:      /* == => 0, < => 1, > => -1                 */
1462                         case ICMD_FCMPG:      /* ..., val1, val2  ==> ..., val1 fcmpl val2 */
1463                         case ICMD_DCMPG:      /* == => 0, < => 1, > => -1                 */
1464
1465                                 // Generate architecture specific instructions.
1466                                 codegen_emit_instruction(jd, iptr);
1467                                 break;
1468 #endif /* !defined(ENABLE_SOFTFLOAT) */
1469
1470
1471                         /* memory operations **********************************************/
1472
1473                         case ICMD_ARRAYLENGTH:/* ..., arrayref  ==> ..., length           */
1474
1475                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1476                                 d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1477                                 /* implicit null-pointer check */
1478                                 // XXX PPC64: Here we had an explicit null-pointer check
1479                                 //     which I think was obsolete, please confirm. Otherwise:
1480                                 // emit_nullpointer_check(cd, iptr, s1);
1481                                 M_ILD(d, s1, OFFSET(java_array_t, size));
1482                                 emit_store_dst(jd, iptr, d);
1483                                 break;
1484
1485                         case ICMD_BALOAD:     /* ..., arrayref, index  ==> ..., value     */
1486                         case ICMD_CALOAD:     /* ..., arrayref, index  ==> ..., value     */
1487                         case ICMD_SALOAD:     /* ..., arrayref, index  ==> ..., value     */
1488                         case ICMD_IALOAD:     /* ..., arrayref, index  ==> ..., value     */
1489                         case ICMD_LALOAD:     /* ..., arrayref, index  ==> ..., value     */
1490                         case ICMD_FALOAD:     /* ..., arrayref, index  ==> ..., value     */
1491                         case ICMD_DALOAD:     /* ..., arrayref, index  ==> ..., value     */
1492                         case ICMD_AALOAD:     /* ..., arrayref, index  ==> ..., value     */
1493                         case ICMD_BASTORE:    /* ..., arrayref, index, value  ==> ...     */
1494                         case ICMD_CASTORE:    /* ..., arrayref, index, value  ==> ...     */
1495                         case ICMD_SASTORE:    /* ..., arrayref, index, value  ==> ...     */
1496                         case ICMD_IASTORE:    /* ..., arrayref, index, value  ==> ...     */
1497                         case ICMD_LASTORE:    /* ..., arrayref, index, value  ==> ...     */
1498                         case ICMD_FASTORE:    /* ..., arrayref, index, value  ==> ...     */
1499                         case ICMD_DASTORE:    /* ..., arrayref, index, value  ==> ...     */
1500                         case ICMD_AASTORE:    /* ..., arrayref, index, value  ==> ...     */
1501                         case ICMD_BASTORECONST:   /* ..., arrayref, index  ==> ...        */
1502                         case ICMD_CASTORECONST:   /* ..., arrayref, index  ==> ...        */
1503                         case ICMD_SASTORECONST:   /* ..., arrayref, index  ==> ...        */
1504                         case ICMD_IASTORECONST:   /* ..., arrayref, index  ==> ...        */
1505                         case ICMD_LASTORECONST:   /* ..., arrayref, index  ==> ...        */
1506                         case ICMD_FASTORECONST:   /* ..., arrayref, index  ==> ...        */
1507                         case ICMD_DASTORECONST:   /* ..., arrayref, index  ==> ...        */
1508                         case ICMD_AASTORECONST:   /* ..., arrayref, index  ==> ...        */
1509                         case ICMD_GETFIELD:   /* ...  ==> ..., value                      */
1510                         case ICMD_PUTFIELD:   /* ..., value  ==> ...                      */
1511                         case ICMD_PUTFIELDCONST:  /* ..., objectref  ==> ...              */
1512                                                   /* val = value (in current instruction) */
1513                         case ICMD_PUTSTATICCONST: /* ...  ==> ...                         */
1514                                                   /* val = value (in current instruction) */
1515
1516                                 // Generate architecture specific instructions.
1517                                 codegen_emit_instruction(jd, iptr);
1518                                 break;
1519
1520                         case ICMD_GETSTATIC:  /* ...  ==> ..., value                      */
1521
1522 #if defined(__I386__)
1523                                 // Generate architecture specific instructions.
1524                                 codegen_emit_instruction(jd, iptr);
1525                                 break;
1526 #else
1527                         {
1528                                 fieldinfo* fi;
1529                                 patchref_t* pr;
1530                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1531                                         unresolved_field* uf = iptr->sx.s23.s3.uf;
1532                                         fieldtype = uf->fieldref->parseddesc.fd->type;
1533                                         disp      = dseg_add_unique_address(cd, 0);
1534
1535                                         pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1536
1537                                         fi = NULL;              /* Silence compiler warning */
1538                                 }
1539                                 else {
1540                                         fi        = iptr->sx.s23.s3.fmiref->p.field;
1541                                         fieldtype = fi->type;
1542                                         disp      = dseg_add_address(cd, fi->value);
1543
1544                                         if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->clazz)) {
1545                                                 PROFILE_CYCLE_STOP;
1546                                                 patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1547                                                 PROFILE_CYCLE_START;
1548                                         }
1549
1550                                         pr = NULL;              /* Silence compiler warning */
1551                                 }
1552
1553 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1554                                 codegen_emit_patchable_barrier(iptr, cd, pr, fi);
1555 #endif
1556
1557                                 // XXX X86_64: Here We had this:
1558                                 /* This approach is much faster than moving the field
1559                                    address inline into a register. */
1560
1561                                 M_ALD_DSEG(REG_ITMP1, disp);
1562
1563                                 switch (fieldtype) {
1564                                 case TYPE_ADR:
1565                                         d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1566                                         M_ALD(d, REG_ITMP1, 0);
1567                                         break;
1568                                 case TYPE_INT:
1569 #if defined(ENABLE_SOFTFLOAT)
1570                                 case TYPE_FLT:
1571 #endif
1572                                         d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1573                                         M_ILD(d, REG_ITMP1, 0);
1574                                         break;
1575                                 case TYPE_LNG:
1576 #if defined(ENABLE_SOFTFLOAT)
1577                                 case TYPE_DBL:
1578 #endif
1579                                         d = codegen_reg_of_dst(jd, iptr, REG_LTMP23);
1580                                         M_LLD(d, REG_ITMP1, 0);
1581                                         break;
1582 #if !defined(ENABLE_SOFTFLOAT)
1583                                 case TYPE_FLT:
1584                                         d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1585                                         M_FLD(d, REG_ITMP1, 0);
1586                                         break;
1587                                 case TYPE_DBL:
1588                                         d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1589                                         M_DLD(d, REG_ITMP1, 0);
1590                                         break;
1591 #endif
1592                                 default:
1593                                         // Silence compiler warning.
1594                                         d = 0;
1595                                 }
1596                                 emit_store_dst(jd, iptr, d);
1597                                 break;
1598                         }
1599 #endif
1600
1601                         case ICMD_PUTSTATIC:  /* ..., value  ==> ...                      */
1602
1603 #if defined(__I386__)
1604                                 // Generate architecture specific instructions.
1605                                 codegen_emit_instruction(jd, iptr);
1606                                 break;
1607 #else
1608                         {
1609                                 fieldinfo* fi;
1610                                 patchref_t* pr;
1611
1612                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1613                                         unresolved_field* uf = iptr->sx.s23.s3.uf;
1614                                         fieldtype = uf->fieldref->parseddesc.fd->type;
1615                                         disp      = dseg_add_unique_address(cd, 0);
1616
1617                                         pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1618
1619                                         fi = NULL;              /* Silence compiler warning */
1620                                 }
1621                                 else {
1622                                         fi = iptr->sx.s23.s3.fmiref->p.field;
1623                                         fieldtype = fi->type;
1624                                         disp      = dseg_add_address(cd, fi->value);
1625
1626                                         if (!CLASS_IS_OR_ALMOST_INITIALIZED(fi->clazz)) {
1627                                                 PROFILE_CYCLE_STOP;
1628                                                 patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1629                                                 PROFILE_CYCLE_START;
1630                                         }
1631
1632                                         pr = NULL;              /* Silence compiler warning */
1633                                 }
1634
1635                                 // XXX X86_64: Here We had this:
1636                                 /* This approach is much faster than moving the field
1637                                    address inline into a register. */
1638
1639                                 M_ALD_DSEG(REG_ITMP1, disp);
1640
1641                                 switch (fieldtype) {
1642                                 case TYPE_ADR:
1643                                         s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1644                                         M_AST(s1, REG_ITMP1, 0);
1645                                         break;
1646                                 case TYPE_INT:
1647 #if defined(ENABLE_SOFTFLOAT)
1648                                 case TYPE_FLT:
1649 #endif
1650                                         s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1651                                         M_IST(s1, REG_ITMP1, 0);
1652                                         break;
1653                                 case TYPE_LNG:
1654 #if defined(ENABLE_SOFTFLOAT)
1655                                 case TYPE_DBL:
1656 #endif
1657                                         s1 = emit_load_s1(jd, iptr, REG_LTMP23);
1658                                         M_LST(s1, REG_ITMP1, 0);
1659                                         break;
1660 #if !defined(ENABLE_SOFTFLOAT)
1661                                 case TYPE_FLT:
1662                                         s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1663                                         M_FST(s1, REG_ITMP1, 0);
1664                                         break;
1665                                 case TYPE_DBL:
1666                                         s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1667                                         M_DST(s1, REG_ITMP1, 0);
1668                                         break;
1669 #endif
1670                                 }
1671 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1672                                 codegen_emit_patchable_barrier(iptr, cd, pr, fi);
1673 #endif
1674                                 break;
1675                         }
1676 #endif
1677
1678                         /* branch operations **********************************************/
1679
1680                         case ICMD_ATHROW:     /* ..., objectref ==> ... (, objectref)     */
1681
1682                                 // We might leave this method, stop profiling.
1683                                 PROFILE_CYCLE_STOP;
1684
1685                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1686                                 // XXX M68K: Actually this is M_ADRMOVE(s1, REG_ATMP1_XPTR);
1687                                 // XXX Sparc64: We use REG_ITMP2_XPTR here, fix me!
1688                                 emit_imove(cd, s1, REG_ITMP1_XPTR);
1689
1690 #ifdef ENABLE_VERIFIER
1691                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1692                                         unresolved_class *uc = iptr->sx.s23.s2.uc;
1693                                         patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1694                                 }
1695 #endif /* ENABLE_VERIFIER */
1696
1697                                 // Generate architecture specific instructions.
1698                                 codegen_emit_instruction(jd, iptr);
1699                                 ALIGNCODENOP;
1700                                 break;
1701
1702                         case ICMD_GOTO:       /* ... ==> ...                              */
1703                         case ICMD_RET:        /* ... ==> ...                              */
1704
1705 #if defined(ENABLE_SSA)
1706                                 // In case of a goto, phimoves have to be inserted
1707                                 // before the jump.
1708                                 if (ls != NULL) {
1709                                         last_cmd_was_goto = true;
1710                                         codegen_emit_phi_moves(jd, bptr);
1711                                 }
1712 #endif
1713                                 if (iptr->dst.block->type == BBTYPE_EXH)
1714                                         fixup_exc_handler_interface(jd, iptr->dst.block);
1715                                 emit_br(cd, iptr->dst.block);
1716                                 ALIGNCODENOP;
1717                                 break;
1718
1719                         case ICMD_JSR:        /* ... ==> ...                              */
1720
1721                                 assert(iptr->sx.s23.s3.jsrtarget.block->type != BBTYPE_EXH);
1722                                 emit_br(cd, iptr->sx.s23.s3.jsrtarget.block);
1723                                 ALIGNCODENOP;
1724                                 break;
1725
1726                         case ICMD_IFNULL:     /* ..., value ==> ...                       */
1727                         case ICMD_IFNONNULL:
1728
1729                                 assert(iptr->dst.block->type != BBTYPE_EXH);
1730                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1731 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1732                                 emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, s1, BRANCH_OPT_NONE);
1733 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1734                                 M_TEST(s1);
1735                                 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, BRANCH_OPT_NONE);
1736 #else
1737 # error Unable to generate code for this configuration!
1738 #endif
1739                                 break;
1740
1741                         case ICMD_IFEQ:       /* ..., value ==> ...                       */
1742                         case ICMD_IFNE:
1743                         case ICMD_IFLT:
1744                         case ICMD_IFLE:
1745                         case ICMD_IFGT:
1746                         case ICMD_IFGE:
1747
1748                                 // XXX Sparc64: int compares must not branch on the
1749                                 // register directly. Reason is, that register content is
1750                                 // not 32-bit clean. Fix this!
1751
1752                                 assert(iptr->dst.block->type != BBTYPE_EXH);
1753
1754 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1755                                 if (iptr->sx.val.i == 0) {
1756                                         s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1757                                         emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, s1, BRANCH_OPT_NONE);
1758                                 } else {
1759                                         // Generate architecture specific instructions.
1760                                         codegen_emit_instruction(jd, iptr);
1761                                 }
1762 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1763                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1764                                 emit_icmp_imm(cd, s1, iptr->sx.val.i);
1765                                 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, BRANCH_OPT_NONE);
1766 #else
1767 # error Unable to generate code for this configuration!
1768 #endif
1769                                 break;
1770
1771                         case ICMD_IF_LEQ:     /* ..., value ==> ...                       */
1772                         case ICMD_IF_LNE:
1773                         case ICMD_IF_LLT:
1774                         case ICMD_IF_LGE:
1775                         case ICMD_IF_LGT:
1776                         case ICMD_IF_LLE:
1777
1778                                 assert(iptr->dst.block->type != BBTYPE_EXH);
1779
1780                                 // Generate architecture specific instructions.
1781                                 codegen_emit_instruction(jd, iptr);
1782                                 break;
1783
1784                         case ICMD_IF_ACMPEQ:  /* ..., value, value ==> ...                */
1785                         case ICMD_IF_ACMPNE:  /* op1 = target JavaVM pc                   */
1786
1787                                 assert(iptr->dst.block->type != BBTYPE_EXH);
1788
1789                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1790                                 s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1791 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1792                                 switch (iptr->opc) {
1793                                         case ICMD_IF_ACMPEQ:
1794                                                 emit_beq(cd, iptr->dst.block, s1, s2);
1795                                                 break;
1796                                         case ICMD_IF_ACMPNE:
1797                                                 emit_bne(cd, iptr->dst.block, s1, s2);
1798                                                 break;
1799                                 }
1800 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1801                                 M_ACMP(s1, s2);
1802                                 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ACMPEQ, BRANCH_OPT_NONE);
1803 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1804                                 M_CMPEQ(s1, s2, REG_ITMP1);
1805                                 switch (iptr->opc) {
1806                                         case ICMD_IF_ACMPEQ:
1807                                                 emit_bnez(cd, iptr->dst.block, REG_ITMP1);
1808                                                 break;
1809                                         case ICMD_IF_ACMPNE:
1810                                                 emit_beqz(cd, iptr->dst.block, REG_ITMP1);
1811                                                 break;
1812                                 }
1813 #else
1814 # error Unable to generate code for this configuration!
1815 #endif
1816                                 break;
1817
1818                         case ICMD_IF_ICMPEQ:  /* ..., value, value ==> ...                */
1819                         case ICMD_IF_ICMPNE:  /* op1 = target JavaVM pc                   */
1820
1821                                 assert(iptr->dst.block->type != BBTYPE_EXH);
1822
1823 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1824                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1825                                 s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1826                                 switch (iptr->opc) {
1827                                         case ICMD_IF_ICMPEQ:
1828                                                 emit_beq(cd, iptr->dst.block, s1, s2);
1829                                                 break;
1830                                         case ICMD_IF_ICMPNE:
1831                                                 emit_bne(cd, iptr->dst.block, s1, s2);
1832                                                 break;
1833                                 }
1834                                 break;
1835 #else
1836                                 /* fall-through */
1837 #endif
1838
1839                         case ICMD_IF_ICMPLT:  /* ..., value, value ==> ...                */
1840                         case ICMD_IF_ICMPGT:  /* op1 = target JavaVM pc                   */
1841                         case ICMD_IF_ICMPLE:
1842                         case ICMD_IF_ICMPGE:
1843
1844                                 assert(iptr->dst.block->type != BBTYPE_EXH);
1845
1846                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1847                                 s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1848 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1849 # if defined(__I386__) || defined(__M68K__) || defined(__X86_64__)
1850                                 // XXX Fix this soon!!!
1851                                 M_ICMP(s2, s1);
1852 # else
1853                                 M_ICMP(s1, s2);
1854 # endif
1855                                 emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ICMPEQ, BRANCH_OPT_NONE);
1856 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1857                                 // Generate architecture specific instructions.
1858                                 codegen_emit_instruction(jd, iptr);
1859 #else
1860 # error Unable to generate code for this configuration!
1861 #endif
1862                                 break;
1863
1864                         case ICMD_IF_LCMPEQ:  /* ..., value, value ==> ...                */
1865                         case ICMD_IF_LCMPNE:  /* op1 = target JavaVM pc                   */
1866                         case ICMD_IF_LCMPLT:
1867                         case ICMD_IF_LCMPGT:
1868                         case ICMD_IF_LCMPLE:
1869                         case ICMD_IF_LCMPGE:
1870
1871                                 assert(iptr->dst.block->type != BBTYPE_EXH);
1872
1873                                 // Generate architecture specific instructions.
1874                                 codegen_emit_instruction(jd, iptr);
1875                                 break;
1876
1877                         case ICMD_RETURN:     /* ...  ==> ...                             */
1878
1879                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1880                                 goto nowperformreturn;
1881
1882                         case ICMD_ARETURN:    /* ..., retvalue ==> ...                    */
1883
1884                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1885                                 s1 = emit_load_s1(jd, iptr, REG_RESULT);
1886                                 // XXX M68K: This should actually be M_ADR2INTMOVE(s1, REG_RESULT);
1887                                 // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1888                                 emit_imove(cd, s1, REG_RESULT);
1889
1890 #ifdef ENABLE_VERIFIER
1891                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1892                                         PROFILE_CYCLE_STOP;
1893                                         unresolved_class *uc = iptr->sx.s23.s2.uc;
1894                                         patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1895                                         PROFILE_CYCLE_START;
1896                                 }
1897 #endif /* ENABLE_VERIFIER */
1898                                 goto nowperformreturn;
1899
1900                         case ICMD_IRETURN:    /* ..., retvalue ==> ...                    */
1901 #if defined(ENABLE_SOFTFLOAT)
1902                         case ICMD_FRETURN:
1903 #endif
1904
1905                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1906                                 s1 = emit_load_s1(jd, iptr, REG_RESULT);
1907                                 // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1908                                 emit_imove(cd, s1, REG_RESULT);
1909                                 goto nowperformreturn;
1910
1911                         case ICMD_LRETURN:    /* ..., retvalue ==> ...                    */
1912 #if defined(ENABLE_SOFTFLOAT)
1913                         case ICMD_DRETURN:
1914 #endif
1915
1916                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1917                                 s1 = emit_load_s1(jd, iptr, REG_LRESULT);
1918                                 // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1919                                 emit_lmove(cd, s1, REG_LRESULT);
1920                                 goto nowperformreturn;
1921
1922 #if !defined(ENABLE_SOFTFLOAT)
1923                         case ICMD_FRETURN:    /* ..., retvalue ==> ...                    */
1924
1925                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1926                                 s1 = emit_load_s1(jd, iptr, REG_FRESULT);
1927 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1928                                 emit_fmove(cd, s1, REG_FRESULT);
1929 #else
1930                                 M_CAST_F2I(s1, REG_RESULT);
1931 #endif
1932                                 goto nowperformreturn;
1933
1934                         case ICMD_DRETURN:    /* ..., retvalue ==> ...                    */
1935
1936                                 REPLACEMENT_POINT_RETURN(cd, iptr);
1937                                 s1 = emit_load_s1(jd, iptr, REG_FRESULT);
1938 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1939                                 emit_dmove(cd, s1, REG_FRESULT);
1940 #else
1941                                 M_CAST_D2L(s1, REG_LRESULT);
1942 #endif
1943                                 goto nowperformreturn;
1944 #endif
1945
1946 nowperformreturn:
1947 #if !defined(NDEBUG)
1948                                 // Call trace function.
1949                                 if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
1950                                         emit_verbosecall_exit(jd);
1951 #endif
1952
1953 #if defined(ENABLE_THREADS)
1954                                 // Emit code to call monitorexit function.
1955                                 if (checksync && code_is_synchronized(code)) {
1956                                         emit_monitor_exit(jd, rd->memuse * 8);
1957                                 }
1958 #endif
1959
1960                                 // Generate method profiling code.
1961                                 PROFILE_CYCLE_STOP;
1962
1963                                 // Emit code for the method epilog.
1964                                 codegen_emit_epilog(jd);
1965                                 ALIGNCODENOP;
1966                                 break;
1967
1968                         case ICMD_BUILTIN:      /* ..., [arg1, [arg2 ...]] ==> ...        */
1969
1970                                 REPLACEMENT_POINT_FORGC_BUILTIN(cd, iptr);
1971
1972                                 bte = iptr->sx.s23.s3.bte;
1973                                 md  = bte->md;
1974
1975 #if defined(ENABLE_ESCAPE_REASON) && defined(__I386__)
1976                                 if (bte->fp == BUILTIN_escape_reason_new) {
1977                                         void set_escape_reasons(void *);
1978                                         M_ASUB_IMM(8, REG_SP);
1979                                         M_MOV_IMM(iptr->escape_reasons, REG_ITMP1);
1980                                         M_AST(EDX, REG_SP, 4);
1981                                         M_AST(REG_ITMP1, REG_SP, 0);
1982                                         M_MOV_IMM(set_escape_reasons, REG_ITMP1);
1983                                         M_CALL(REG_ITMP1);
1984                                         M_ALD(EDX, REG_SP, 4);
1985                                         M_AADD_IMM(8, REG_SP);
1986                                 }
1987 #endif
1988
1989                                 // Emit the fast-path if available.
1990                                 if (bte->emit_fastpath != NULL) {
1991                                         void (*emit_fastpath)(jitdata* jd, instruction* iptr, int d);
1992                                         emit_fastpath = (void (*)(jitdata* jd, instruction* iptr, int d)) bte->emit_fastpath;
1993
1994                                         assert(md->returntype.type == TYPE_VOID);
1995                                         d = REG_ITMP1;
1996
1997                                         // Actually call the fast-path emitter.
1998                                         emit_fastpath(jd, iptr, d);
1999
2000                                         // If fast-path succeeded, jump to the end of the builtin
2001                                         // invocation.
2002                                         // XXX Actually the slow-path block below should be moved
2003                                         // out of the instruction stream and the jump below should be
2004                                         // inverted.
2005 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2006                                         os::abort("codegen_emit: Implement jump over slow-path for this configuration.");
2007 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2008                                         M_TEST(d);
2009                                         emit_label_bne(cd, BRANCH_LABEL_10);
2010 #else
2011 # error Unable to generate code for this configuration!
2012 #endif
2013                                 }
2014
2015                                 goto gen_method;
2016
2017                         case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ...        */
2018                         case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
2019                         case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer    */
2020                         case ICMD_INVOKEINTERFACE:
2021
2022                                 REPLACEMENT_POINT_INVOKE(cd, iptr);
2023
2024                                 if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
2025                                         unresolved_method* um = iptr->sx.s23.s3.um;
2026                                         md = um->methodref->parseddesc.md;
2027                                 }
2028                                 else {
2029                                         methodinfo* lm = iptr->sx.s23.s3.fmiref->p.method;
2030                                         md = lm->parseddesc;
2031                                 }
2032
2033 gen_method:
2034                                 i = md->paramcount;
2035
2036                                 // XXX Check this again!
2037                                 MCODECHECK((i << 1) + 64);   // PPC
2038
2039                                 // Copy arguments to registers or stack location.
2040                                 for (i = i - 1; i >= 0; i--) {
2041                                         var = VAR(iptr->sx.s23.s2.args[i]);
2042                                         d   = md->params[i].regoff;
2043
2044                                         // Already pre-allocated?
2045                                         if (var->flags & PREALLOC)
2046                                                 continue;
2047
2048                                         if (!md->params[i].inmemory) {
2049                                                 switch (var->type) {
2050                                                 case TYPE_ADR:
2051                                                 case TYPE_INT:
2052 #if defined(ENABLE_SOFTFLOAT)
2053                                                 case TYPE_FLT:
2054 #endif
2055                                                         s1 = emit_load(jd, iptr, var, d);
2056                                                         emit_imove(cd, s1, d);
2057                                                         break;
2058
2059                                                 case TYPE_LNG:
2060 #if defined(ENABLE_SOFTFLOAT)
2061                                                 case TYPE_DBL:
2062 #endif
2063                                                         s1 = emit_load(jd, iptr, var, d);
2064                                                         emit_lmove(cd, s1, d);
2065                                                         break;
2066
2067 #if !defined(ENABLE_SOFTFLOAT)
2068                                                 case TYPE_FLT:
2069 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2070                                                         s1 = emit_load(jd, iptr, var, d);
2071                                                         emit_fmove(cd, s1, d);
2072 #else
2073                                                         s1 = emit_load(jd, iptr, var, REG_FTMP1);
2074                                                         M_CAST_F2I(s1, d);
2075 #endif
2076                                                         break;
2077
2078                                                 case TYPE_DBL:
2079 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2080                                                         s1 = emit_load(jd, iptr, var, d);
2081                                                         emit_dmove(cd, s1, d);
2082 #else
2083                                                         s1 = emit_load(jd, iptr, var, REG_FTMP1);
2084                                                         M_CAST_D2L(s1, d);
2085 #endif
2086                                                         break;
2087 #endif
2088                                                 }
2089                                         }
2090                                         else {
2091                                                 switch (var->type) {
2092                                                 case TYPE_ADR:
2093                                                         s1 = emit_load(jd, iptr, var, REG_ITMP1);
2094                                                         // XXX M68K: This should actually be like this:
2095                                                         //     s1 = emit_load(jd, iptr, var, REG_ATMP1);
2096                                                         // XXX Sparc64: Here this actually was:
2097                                                         //     M_STX(s1, REG_SP, JITSTACK + d);
2098                                                         M_AST(s1, REG_SP, d);
2099                                                         break;
2100
2101                                                 case TYPE_INT:
2102 #if defined(ENABLE_SOFTFLOAT)
2103                                                 case TYPE_FLT:
2104 #endif
2105 #if SIZEOF_VOID_P == 4
2106                                                         s1 = emit_load(jd, iptr, var, REG_ITMP1);
2107                                                         M_IST(s1, REG_SP, d);
2108                                                         break;
2109 #else
2110                                                         /* fall-through */
2111 #endif
2112
2113                                                 case TYPE_LNG:
2114 #if defined(ENABLE_SOFTFLOAT)
2115                                                 case TYPE_DBL:
2116 #endif
2117                                                         s1 = emit_load(jd, iptr, var, REG_LTMP12);
2118                                                         // XXX Sparc64: Here this actually was:
2119                                                         //     M_STX(s1, REG_SP, JITSTACK + d);
2120                                                         M_LST(s1, REG_SP, d);
2121                                                         break;
2122
2123 #if !defined(ENABLE_SOFTFLOAT)
2124                                                 case TYPE_FLT:
2125 #if SIZEOF_VOID_P == 4
2126                                                         s1 = emit_load(jd, iptr, var, REG_FTMP1);
2127                                                         M_FST(s1, REG_SP, d);
2128                                                         break;
2129 #else
2130                                                         /* fall-through */
2131 #endif
2132
2133                                                 case TYPE_DBL:
2134                                                         s1 = emit_load(jd, iptr, var, REG_FTMP1);
2135                                                         // XXX Sparc64: Here this actually was:
2136                                                         //     M_DST(s1, REG_SP, JITSTACK + d);
2137                                                         M_DST(s1, REG_SP, d);
2138                                                         break;
2139 #endif
2140                                                 }
2141                                         }
2142                                 }
2143
2144                                 // Generate method profiling code.
2145                                 PROFILE_CYCLE_STOP;
2146
2147                                 // Generate architecture specific instructions.
2148                                 codegen_emit_instruction(jd, iptr);
2149
2150                                 // Generate method profiling code.
2151                                 PROFILE_CYCLE_START;
2152
2153                                 // Store size of call code in replacement point.
2154                                 REPLACEMENT_POINT_INVOKE_RETURN(cd, iptr);
2155                                 REPLACEMENT_POINT_FORGC_BUILTIN_RETURN(cd, iptr);
2156
2157                                 // Recompute the procedure vector (PV).
2158                                 emit_recompute_pv(cd);
2159
2160                                 // Store return value.
2161 #if defined(ENABLE_SSA)
2162                                 if ((ls == NULL) /* || (!IS_TEMPVAR_INDEX(iptr->dst.varindex)) */ ||
2163                                         (ls->lifetime[iptr->dst.varindex].type != UNUSED))
2164                                         /* a "living" stackslot */
2165 #endif
2166                                 switch (md->returntype.type) {
2167                                 case TYPE_INT:
2168                                 case TYPE_ADR:
2169 #if defined(ENABLE_SOFTFLOAT)
2170                                 case TYPE_FLT:
2171 #endif
2172                                         s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
2173                                         // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2174                                         emit_imove(cd, REG_RESULT, s1);
2175                                         emit_store_dst(jd, iptr, s1);
2176                                         break;
2177
2178                                 case TYPE_LNG:
2179 #if defined(ENABLE_SOFTFLOAT)
2180                                 case TYPE_DBL:
2181 #endif
2182                                         s1 = codegen_reg_of_dst(jd, iptr, REG_LRESULT);
2183                                         // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2184                                         emit_lmove(cd, REG_LRESULT, s1);
2185                                         emit_store_dst(jd, iptr, s1);
2186                                         break;
2187
2188 #if !defined(ENABLE_SOFTFLOAT)
2189                                 case TYPE_FLT:
2190 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2191                                         s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2192                                         emit_fmove(cd, REG_FRESULT, s1);
2193 #else
2194                                         s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2195                                         M_CAST_I2F(REG_RESULT, s1);
2196 #endif
2197                                         emit_store_dst(jd, iptr, s1);
2198                                         break;
2199
2200                                 case TYPE_DBL:
2201 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2202                                         s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2203                                         emit_dmove(cd, REG_FRESULT, s1);
2204 #else
2205                                         s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2206                                         M_CAST_L2D(REG_LRESULT, s1);
2207 #endif
2208                                         emit_store_dst(jd, iptr, s1);
2209                                         break;
2210 #endif
2211
2212                                 case TYPE_VOID:
2213                                         break;
2214                                 }
2215
2216                                 // If we are emitting a fast-path block, this is the label for
2217                                 // successful fast-path execution.
2218                                 if ((iptr->opc == ICMD_BUILTIN) && (bte->emit_fastpath != NULL)) {
2219                                         emit_label(cd, BRANCH_LABEL_10);
2220                                 }
2221
2222                                 break;
2223
2224                         case ICMD_TABLESWITCH:  /* ..., index ==> ...                     */
2225
2226                                 // Generate architecture specific instructions.
2227                                 codegen_emit_instruction(jd, iptr);
2228                                 break;
2229
2230                         case ICMD_LOOKUPSWITCH: /* ..., key ==> ...                       */
2231
2232                                 s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2233                                 i = iptr->sx.s23.s2.lookupcount;
2234
2235                                 // XXX Again we need to check this
2236                                 MCODECHECK((i<<2)+8);   // Alpha, ARM, i386, MIPS, M68K, Sparc64
2237                                 MCODECHECK((i<<3)+8);   // PPC64
2238                                 MCODECHECK(8 + ((7 + 6) * i) + 5);   // X86_64, S390
2239
2240                                 // Compare keys.
2241                                 for (lookup_target_t* lookup = iptr->dst.lookup; i > 0; ++lookup, --i) {
2242 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2243                                         emit_icmp_imm(cd, s1, lookup->value);
2244                                         emit_beq(cd, lookup->target.block);
2245 #elif SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
2246                                         ICONST(REG_ITMP2, lookup->value);
2247                                         emit_beq(cd, lookup->target.block, s1, REG_ITMP2);
2248 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2249                                         emit_icmpeq_imm(cd, s1, lookup->value, REG_ITMP2);
2250                                         emit_bnez(cd, lookup->target.block, REG_ITMP2);
2251 #else
2252 # error Unable to generate code for this configuration!
2253 #endif
2254                                 }
2255
2256                                 // Default branch.
2257                                 emit_br(cd, iptr->sx.s23.s3.lookupdefault.block);
2258                                 ALIGNCODENOP;
2259                                 break;
2260
2261                         case ICMD_CHECKCAST:  /* ..., objectref ==> ..., objectref        */
2262                         case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult        */
2263                         case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref  */
2264
2265                                 // Generate architecture specific instructions.
2266                                 codegen_emit_instruction(jd, iptr);
2267                                 break;
2268
2269                         default:
2270                                 exceptions_throw_internalerror("Unknown ICMD %d during code generation",
2271                                                                                            iptr->opc);
2272                                 return false;
2273
2274                         } // the big switch
2275
2276                 } // for all instructions
2277
2278 #if defined(ENABLE_SSA)
2279                 // By edge splitting, in blocks with phi moves there can only
2280                 // be a goto as last command, no other jump/branch command.
2281                 if (ls != NULL) {
2282                         if (!last_cmd_was_goto)
2283                                 codegen_emit_phi_moves(jd, bptr);
2284                 }
2285 #endif
2286
2287 #if defined(__I386__) || defined(__M68K__) || defined(__MIPS__) || defined(__S390__) || defined(__SPARC_64__) || defined(__X86_64__)
2288                 // XXX Again!!!
2289                 /* XXX require a lower number? */
2290                 MCODECHECK(64);  // I386, MIPS, Sparc64
2291                 MCODECHECK(512); // S390, X86_64
2292
2293                 /* XXX We can remove that when we don't use UD2 anymore on i386
2294                    and x86_64. */
2295
2296                 /* At the end of a basic block we may have to append some nops,
2297                    because the patcher stub calling code might be longer than the
2298                    actual instruction. So codepatching does not change the
2299                    following block unintentionally. */
2300
2301                 if (cd->mcodeptr < cd->lastmcodeptr) {
2302                         while (cd->mcodeptr < cd->lastmcodeptr) {
2303                                 M_NOP;
2304                         }
2305                 }
2306 #endif
2307
2308                 if (bptr->next && bptr->next->type == BBTYPE_EXH)
2309                         fixup_exc_handler_interface(jd, bptr->next);
2310
2311         } // for all basic blocks
2312
2313         // Generate traps.
2314         emit_patcher_traps(jd);
2315
2316         // Everything's ok.
2317         return true;
2318 }
2319
2320
2321 /* codegen_emit_phi_moves ****************************************************
2322
2323    Emits phi moves at the end of the basicblock.
2324
2325 *******************************************************************************/
2326
2327 #if defined(ENABLE_SSA)
2328 void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
2329 {
2330         int lt_d,lt_s,i;
2331         lsradata *ls;
2332         codegendata *cd;
2333         varinfo *s, *d;
2334         instruction tmp_i;
2335
2336         cd = jd->cd;
2337         ls = jd->ls;
2338
2339         MCODECHECK(512);
2340
2341         /* Moves from phi functions with highest indices have to be */
2342         /* inserted first, since this is the order as is used for   */
2343         /* conflict resolution */
2344
2345         for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
2346                 lt_d = ls->phi_moves[bptr->nr][i][0];
2347                 lt_s = ls->phi_moves[bptr->nr][i][1];
2348 #if defined(SSA_DEBUG_VERBOSE)
2349                 if (compileverbose)
2350                         printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
2351 #endif
2352                 if (lt_s == UNUSED) {
2353 #if defined(SSA_DEBUG_VERBOSE)
2354                 if (compileverbose)
2355                         printf(" ... not processed \n");
2356 #endif
2357                         continue;
2358                 }
2359                         
2360                 d = VAR(ls->lifetime[lt_d].v_index);
2361                 s = VAR(ls->lifetime[lt_s].v_index);
2362                 
2363
2364                 if (d->type == -1) {
2365 #if defined(SSA_DEBUG_VERBOSE)
2366                         if (compileverbose)
2367                                 printf("...returning - phi lifetimes where joined\n");
2368 #endif
2369                         continue;
2370                 }
2371
2372                 if (s->type == -1) {
2373 #if defined(SSA_DEBUG_VERBOSE)
2374                         if (compileverbose)
2375                                 printf("...returning - phi lifetimes where joined\n");
2376 #endif
2377                         continue;
2378                 }
2379
2380                 tmp_i.opc = 0;
2381                 tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
2382                 tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
2383                 emit_copy(jd, &tmp_i);
2384
2385 #if defined(SSA_DEBUG_VERBOSE)
2386                 if (compileverbose) {
2387                         if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
2388                                 /* mem -> mem */
2389                                 printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2390                         }
2391                         else if (IS_INMEMORY(s->flags)) {
2392                                 /* mem -> reg */
2393                                 printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2394                         }
2395                         else if (IS_INMEMORY(d->flags)) {
2396                                 /* reg -> mem */
2397                                 printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2398                         }
2399                         else {
2400                                 /* reg -> reg */
2401                                 printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2402                         }
2403                         printf("\n");
2404                 }
2405 #endif /* defined(SSA_DEBUG_VERBOSE) */
2406         }
2407 }
2408 #endif /* defined(ENABLE_SSA) */
2409
2410
2411 /* REMOVEME When we have exception handling in C. */
2412
2413 void *md_asm_codegen_get_pv_from_pc(void *ra)
2414 {
2415         return md_codegen_get_pv_from_pc(ra);
2416 }
2417
2418
2419 /*
2420  * These are local overrides for various environment variables in Emacs.
2421  * Please do not remove this and leave it at the end of the file, where
2422  * Emacs will automagically detect them.
2423  * ---------------------------------------------------------------------
2424  * Local variables:
2425  * mode: c++
2426  * indent-tabs-mode: t
2427  * c-basic-offset: 4
2428  * tab-width: 4
2429  * End:
2430  * vim:noexpandtab:sw=4:ts=4:
2431  */