2 * regalloc2.c: Global Register Allocator
5 * Zoltan Varga (vargaz@gmail.com)
7 * (C) 2007 Novell, Inc.
11 #include <mono/metadata/debug-helpers.h>
13 #ifdef MONO_ARCH_ENABLE_GLOBAL_RA
18 * This allocator is based on the paper
19 * "Linear Scan Register Allocation for the Java HotSpot Client Compiler"
20 * by Christian Wimmer.
22 * It differs from the current allocator in the following respects:
23 * - all variables (vregs) are allocated a register, there is no separate local/global
25 * - there are no variables allocated strictly to the stack. Even those variables
26 * allocated to the stack are loaded into a register before they are accessed.
32 * - Only works on amd64.
33 * - Tests in mono/mini and mono/tests work, "Hello, World!" works.
34 * - The quality of generated code is not bad, but needs more optimizations.
35 * - Focus was on correctness and easy debuggability so performance is bad, especially on
36 * large methods (like Main in transparentproxy.exe). Since each interval can be
37 * split at each use position, run time is linear in the number of variable accesses,
38 * not the number of variables.
39 * - Have to think about splitting the available registers between caller saved and callee
40 * saved, and take this into account during allocation (callee saved - good for
41 * variables which are accessed a lot, and/or are live across calls, caller saved -
42 * good for scratch registers used only in a bblock and not live across calls).
43 * - FIXME: Fix mono_arch_get_ireg_clobbered_by_call () to only return caller saved
50 typedef struct MonoRegallocInterval {
52 * 0..MONO_MAX_IREGS - iregs
53 * MONO_MAX_IREGS + 1...MONO_MAX_IREGS+MONO_MAX_FREGS - fregs
54 * MONO_MAX_IREGS+MONO_MAX_FREGS... cfg->next_vreg - vregs
55 * cfg->next_vreg... - split intervals
59 * Hard register assigned to this interval, -1 if no register is assigned or the
60 * interval is spilled.
65 * The actual interval data
67 MonoLiveInterval *interval;
70 * Split children of this interval. NULL if the interval is not split.
72 struct MonoRegallocInterval *child1, *child2;
75 * List of use positions, each position is identified by (bb->dfn << 16) + ins_pos
76 * The list is sorted by increasing position.
81 * The offset relative to the frame pointer where this interval is spilled.
86 * If we are a split child of an interval, this points to our parent
88 struct MonoRegallocInterval *parent;
91 * Whenever vreg is an fp vreg
96 * Whenever the variable is volatile
98 guint is_volatile : 1;
101 * The exact type of the variable
106 * The register where this interval should be allocated
109 } MonoRegallocInterval;
111 typedef struct MonoRegallocContext {
113 MonoRegallocInterval *varinfo;
116 * Maps ins pos -> GSList of intervals split at that pos.
118 GHashTable *split_positions;
120 * Used to avoid lookups in split_positions for every position.
122 GHashTable *split_position_set;
124 * Contains MonoInst's representing spill loads/stores
126 GHashTable *spill_ins;
127 } MonoRegallocContext;
133 #define NEW_UNALU(cfg,dest,op,dr,sr1) do { \
134 MONO_INST_NEW ((cfg), (dest), (op)); \
136 (dest)->sreg1 = sr1; \
139 #define NEW_STORE_MEMBASE(cfg,dest,op,base,offset,sr) do { \
140 MONO_INST_NEW ((cfg), (dest), (op)); \
141 (dest)->sreg1 = sr; \
142 (dest)->inst_destbasereg = base; \
143 (dest)->inst_offset = offset; \
146 #define NEW_LOAD_MEMBASE(cfg,dest,op,dr,base,offset) do { \
147 MONO_INST_NEW ((cfg), (dest), (op)); \
148 (dest)->dreg = (dr); \
149 (dest)->inst_basereg = (base); \
150 (dest)->inst_offset = (offset); \
151 (dest)->type = STACK_I4; \
154 #if SIZEOF_VOID_P == 8
155 #define BITS_PER_CHUNK 64
157 #define BITS_PER_CHUNK 32
160 #define MONO_FIRST_VREG (MONO_MAX_IREGS+MONO_MAX_FREGS)
163 * Each instruction is allocated 4 liveness phases:
164 * 0 - USE - the instruction reads its input registers in this phase
165 * 1 - CLOB - the instruction clobbers some registers in this phase
166 * 2 - DEF - the instruction writes its output register(s) in this phase
167 * 3 - SPILL - spill code
168 * In liveness ranges, the start position of the range is the DEF position of the
169 * instruction which defines the vreg.
172 #define INS_POS_USE 0
173 #define INS_POS_CLOB 1
174 #define INS_POS_DEF 2
175 #define INS_POS_SPILL 3
177 /* 16 is used instead of 4 so liveness ranges are easier to read */
178 #define INS_POS_INTERVAL 16
180 #define is_hard_reg(r,fp) ((fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
181 #define is_soft_reg(r,fp) (!is_hard_reg((r),(fp)))
183 #ifdef MONO_ARCH_INST_IS_FLOAT
184 #define dreg_is_fp(spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_DEST]))
185 #define sreg1_is_fp(spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_SRC1]))
186 #define sreg2_is_fp(spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_SRC2]))
188 #define sreg1_is_fp(spec) (spec [MONO_INST_SRC1] == 'f')
189 #define sreg2_is_fp(spec) (spec [MONO_INST_SRC2] == 'f')
190 #define dreg_is_fp(spec) (spec [MONO_INST_DEST] == 'f')
193 static inline GSList*
194 g_slist_prepend_mempool (MonoMemPool *mp, GSList *list,
199 new_list = mono_mempool_alloc (mp, sizeof (GSList));
200 new_list->data = data;
201 new_list->next = list;
206 static inline GSList*
207 g_slist_append_mempool (MonoMemPool *mp, GSList *list,
210 GSList *new_list, *last;
212 last = g_slist_last (list);
213 new_list = mono_mempool_alloc (mp, sizeof (GSList));
214 new_list->data = data;
215 new_list->next = NULL;
217 last->next = new_list;
225 insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *insert_after)
227 if (insert_after == NULL) {
228 insert_after = bb->code;
230 ins->next = insert_after;
232 if (bb->last_ins == NULL)
236 ins->next = insert_after->next;
237 insert_after->next = ins;
239 if (bb->last_ins == insert_after)
245 create_move (MonoCompile *cfg, int dreg, int sreg)
249 MONO_INST_NEW (cfg, ins, OP_MOVE);
257 create_fp_move (MonoCompile *cfg, int dreg, int sreg)
261 MONO_INST_NEW (cfg, ins, OP_FMOVE);
269 emit_move (MonoCompile *cfg, int dreg, int sreg, MonoInst *insert_after)
271 MonoInst *ins = create_move (cfg, dreg, sreg);
273 insert_after_ins (cfg->cbb, ins, insert_after);
277 emit_fp_move (MonoCompile *cfg, int dreg, int sreg, MonoInst *insert_after)
279 MonoInst *ins = create_fp_move (cfg, dreg, sreg);
281 insert_after_ins (cfg->cbb, ins, insert_after);
285 emit_nop (MonoCompile *cfg, MonoInst *insert_after)
289 MONO_INST_NEW (cfg, ins, OP_NOP);
291 insert_after_ins (cfg->cbb, ins, insert_after);
295 * handle_reg_constraints:
297 * Rewrite the IR so it satisfies the register constraints of the architecture.
300 handle_reg_constraints (MonoCompile *cfg)
302 MonoMethodSignature *sig;
306 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
307 MonoInst *ins = bb->code;
308 MonoInst *prev = NULL;
310 if (cfg->verbose_level > 1) mono_print_bb (bb, "BEFORE HANDLE-REG-CONSTRAINTS ");
313 for (; ins; ins = ins->next) {
314 const char *spec = ins_get_spec (ins->opcode);
315 int dest_sreg1, dest_sreg2, dest_dreg;
317 dest_sreg1 = MONO_ARCH_INST_FIXED_REG (spec [MONO_INST_SRC1]);
318 dest_sreg2 = MONO_ARCH_INST_FIXED_REG (spec [MONO_INST_SRC2]);
319 dest_dreg = MONO_ARCH_INST_FIXED_REG (spec [MONO_INST_DEST]);
321 if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_DEST]) ||
322 MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC1]) ||
323 MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC2]))
325 g_assert_not_reached ();
327 if (spec [MONO_INST_CLOB] == 'c') {
328 MonoCallInst *call = (MonoCallInst*)ins;
332 * FIXME: mono_arch_emit_call () already adds moves for each argument,
333 * it might be better to rewrite those by changing the dreg to the hreg.
335 for (list = call->out_ireg_args; list; list = list->next) {
340 regpair = (guint32)(gssize)(list->data);
341 hreg = regpair >> 24;
342 reg = regpair & 0xffffff;
344 move = create_move (cfg, hreg, reg);
345 insert_after_ins (bb, move, prev);
349 for (list = call->out_freg_args; list; list = list->next) {
354 regpair = (guint32)(gssize)(list->data);
355 hreg = regpair >> 24;
356 reg = regpair & 0xffffff;
358 move = create_fp_move (cfg, hreg + MONO_MAX_IREGS, reg);
359 insert_after_ins (bb, move, prev);
364 if (spec [MONO_INST_CLOB] == '1') {
365 /* Copying sreg1 to dreg could clobber sreg2 so make a copy of sreg2 */
366 if (spec [MONO_INST_SRC2] && (ins->dreg == ins->sreg2)) {
367 int new_sreg2 = mono_alloc_preg (cfg);
369 g_assert (spec [MONO_INST_DEST] != 'f');
370 move = create_move (cfg, new_sreg2, ins->sreg2);
371 insert_after_ins (cfg->cbb, move, prev);
373 ins->sreg2 = new_sreg2;
375 if (spec [MONO_INST_DEST] == 'f')
376 emit_fp_move (cfg, ins->dreg, ins->sreg1, prev);
378 emit_move (cfg, ins->dreg, ins->sreg1, prev);
379 ins->sreg1 = ins->dreg;
382 if (dest_sreg1 != -1) {
383 emit_move (cfg, dest_sreg1, ins->sreg1, prev);
384 ins->sreg1 = dest_sreg1;
387 if (dest_sreg2 != -1) {
388 emit_move (cfg, dest_sreg2, ins->sreg2, prev);
389 ins->sreg2 = dest_sreg2;
392 if (dest_dreg != -1) {
393 emit_move (cfg, ins->dreg, dest_dreg, ins);
394 g_assert (spec [MONO_INST_CLOB] != '1');
395 ins->dreg = dest_dreg;
398 /* FIXME: Add fixed fp regs to the machine description */
399 if (ins->opcode == OP_FCALL || ins->opcode == OP_FCALL_REG || ins->opcode == OP_FCALL_MEMBASE) {
400 emit_fp_move (cfg, ins->dreg, MONO_MAX_IREGS + MONO_ARCH_FP_RETURN_REG, ins);
401 ins->dreg = MONO_MAX_IREGS + MONO_ARCH_FP_RETURN_REG;
405 * Add a dummy instruction after each definition of a volatile vreg, this is
406 * needed by the code in decompose_volatile_intervals ().
408 if (get_vreg_to_inst (cfg, ins->dreg) && (get_vreg_to_inst (cfg, ins->dreg)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
415 sig = mono_method_signature (cfg->method);
417 /* Add move of arguments */
419 * FIXME: Maybe this should be done by the allocator.
421 if (bb == cfg->bb_entry) {
425 if (cfg->vret_addr) {
426 g_assert (cfg->vret_addr->opcode == OP_REGVAR);
427 emit_move (cfg, cfg->vret_addr->dreg, cfg->vret_addr->inst_c0, prev);
431 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
434 if (sig->hasthis && (i == 0))
435 arg_type = &mono_defaults.object_class->byval_arg;
437 arg_type = sig->params [i - sig->hasthis];
439 // FIXME: vtypes in registers (pass + return)
441 if (ins->opcode == OP_REGVAR) {
442 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4) || (arg_type->type == MONO_TYPE_R8)))
443 /* For R4, the prolog is assumed to do the conversion */
444 emit_fp_move (cfg, ins->dreg, ins->inst_c0 + MONO_MAX_IREGS, prev);
446 emit_move (cfg, ins->dreg, ins->inst_c0, prev);
453 /* Add move of return value */
454 for (i = 0; i < bb->out_count; ++i) {
455 if (cfg->ret && !cfg->vret_addr && !MONO_TYPE_ISSTRUCT (sig->ret) && bb->out_bb [i] == cfg->bb_exit) {
456 MonoInst *ins = NULL;
459 hreg = cfg->ret->inst_c0;
461 if ((sig->ret->type == MONO_TYPE_R4) || (sig->ret->type == MONO_TYPE_R8))
462 /* For R4, the JIT has already emitted code to do the conversion */
463 ins = create_fp_move (cfg, hreg + MONO_MAX_IREGS, cfg->ret->dreg);
465 ins = create_move (cfg, hreg, cfg->ret->dreg);
466 mono_add_ins_to_end (bb, ins);
470 if (cfg->verbose_level > 1) mono_print_bb (bb, "AFTER HANDLE-REG-CONSTRAINTS ");
477 * Set varinfo->fp for all float vregs
480 collect_fp_vregs (MonoCompile *cfg, MonoRegallocContext *ctx)
484 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
485 MonoInst *ins = bb->code;
487 for (; ins; ins = ins->next) {
488 const char *spec = ins_get_spec (ins->opcode);
490 if (G_UNLIKELY (sreg1_is_fp (spec) || sreg2_is_fp (spec) || dreg_is_fp (spec))) {
491 if (sreg1_is_fp (spec)) {
492 g_assert (ins->sreg1 >= MONO_MAX_IREGS);
493 ctx->varinfo [ins->sreg1].fp = TRUE;
494 if (ctx->varinfo [ins->sreg1].type->type != MONO_TYPE_R4)
495 ctx->varinfo [ins->sreg1].type = &mono_defaults.double_class->byval_arg;
497 if (sreg2_is_fp (spec)) {
498 g_assert (ins->sreg2 >= MONO_MAX_IREGS);
499 ctx->varinfo [ins->sreg2].fp = TRUE;
500 if (ctx->varinfo [ins->sreg2].type->type != MONO_TYPE_R4)
501 ctx->varinfo [ins->sreg2].type = &mono_defaults.double_class->byval_arg;
503 if (dreg_is_fp (spec)) {
504 g_assert (ins->dreg >= MONO_MAX_IREGS);
505 ctx->varinfo [ins->dreg].fp = TRUE;
506 if (ctx->varinfo [ins->dreg].type->type != MONO_TYPE_R4)
507 ctx->varinfo [ins->dreg].type = &mono_defaults.double_class->byval_arg;
515 #define LIVENESS_DEBUG(a) do { if (cfg->verbose_level > 1) { a; } } while (0)
517 #define LIVENESS_DEBUG(a)
520 // #define DEBUG_LIVENESS 1
522 G_GNUC_UNUSED static void
523 mono_bitset_print (MonoBitSet *set)
528 for (i = 0; i < mono_bitset_size (set); i++) {
530 if (mono_bitset_test (set, i))
538 update_gen_kill_set (MonoCompile *cfg, MonoRegallocContext *ctx, MonoBasicBlock *bb, MonoInst *ins)
540 const char *spec = INS_INFO (ins->opcode);
545 if (spec [MONO_INST_SRC1] != ' ') {
546 if (!mono_bitset_test_fast (bb->kill_set, sreg))
547 mono_bitset_set_fast (bb->gen_set, sreg);
552 if (spec [MONO_INST_SRC2] != ' ') {
553 if (!mono_bitset_test_fast (bb->kill_set, sreg))
554 mono_bitset_set_fast (bb->gen_set, sreg);
558 if (spec [MONO_INST_DEST] != ' ') {
559 if (MONO_IS_STORE_MEMBASE (ins)) {
560 if (!mono_bitset_test_fast (bb->kill_set, ins->dreg))
561 mono_bitset_set_fast (bb->gen_set, ins->dreg);
563 mono_bitset_set_fast (bb->kill_set, ins->dreg);
569 compute_gen_kill_sets (MonoCompile *cfg, MonoRegallocContext *ctx)
571 int i, max_vars = cfg->next_vreg;
575 bitsize = mono_bitset_alloc_size (max_vars, 0);
576 mem = mono_mempool_alloc0 (cfg->mempool, cfg->num_bblocks * bitsize * 3);
578 for (i = 0; i < cfg->num_bblocks; ++i) {
579 MonoBasicBlock *bb = cfg->bblocks [i];
581 bb->gen_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
583 bb->kill_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
585 /* Initialized later */
586 bb->live_in_set = NULL;
587 bb->live_out_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
591 for (i = 0; i < cfg->num_bblocks; ++i) {
592 MonoBasicBlock *bb = cfg->bblocks [i];
594 #ifdef DEBUG_LIVENESS
598 for (ins = bb->code; ins; ins = ins->next)
599 update_gen_kill_set (cfg, ctx, bb, ins);
601 #ifdef DEBUG_LIVENESS
602 printf ("BLOCK BB%d (", bb->block_num);
603 for (j = 0; j < bb->out_count; j++)
604 printf ("BB%d, ", bb->out_bb [j]->block_num);
607 printf ("GEN BB%d: ", bb->block_num); mono_bitset_print (bb->gen_set);
608 printf ("KILL BB%d: ", bb->block_num); mono_bitset_print (bb->kill_set);
612 if (cfg->ret && cfg->ret->opcode == OP_REGVAR) {
613 int hreg = cfg->ret->inst_c0;
615 /* gen_set might be empty if bb_exit is not reachable, like when using a tail call */
616 if (cfg->bb_exit->gen_set)
617 mono_bitset_set (cfg->bb_exit->gen_set, hreg);
622 compute_live_in_out_sets (MonoCompile *cfg, MonoRegallocContext *ctx)
624 MonoBitSet *old_live_out_set;
625 int i, j, max_vars = cfg->next_vreg;
627 gboolean *in_worklist;
628 MonoBasicBlock **worklist;
633 bitsize = mono_bitset_alloc_size (max_vars, 0);
634 mem = mono_mempool_alloc0 (cfg->mempool, cfg->num_bblocks * bitsize);
636 old_live_out_set = mono_bitset_new (max_vars, 0);
637 in_worklist = g_new0 (gboolean, cfg->num_bblocks + 1);
639 worklist = g_new (MonoBasicBlock *, cfg->num_bblocks + 1);
643 * This is a backward dataflow analysis problem, so we process blocks in
644 * decreasing dfn order, this speeds up the iteration.
646 for (i = 0; i < cfg->num_bblocks; i ++) {
647 MonoBasicBlock *bb = cfg->bblocks [i];
649 worklist [l_end ++] = bb;
650 in_worklist [bb->dfn] = TRUE;
656 MonoBasicBlock *bb = worklist [--l_end];
657 MonoBasicBlock *out_bb;
660 in_worklist [bb->dfn] = FALSE;
662 #ifdef DEBUG_LIVENESS
663 printf ("P: %d(%d): IN: ", bb->block_num, bb->dfn);
664 for (j = 0; j < bb->in_count; ++j)
665 printf ("BB%d ", bb->in_bb [j]->block_num);
667 for (j = 0; j < bb->out_count; ++j)
668 printf ("BB%d ", bb->out_bb [j]->block_num);
673 if (bb->out_count == 0)
678 if (!bb->live_in_set) {
679 /* First pass over this bblock */
684 mono_bitset_copyto_fast (bb->live_out_set, old_live_out_set);
687 for (j = 0; j < bb->out_count; j++) {
688 out_bb = bb->out_bb [j];
690 if (!out_bb->live_in_set) {
691 out_bb->live_in_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
694 mono_bitset_copyto_fast (out_bb->live_out_set, out_bb->live_in_set);
695 mono_bitset_sub_fast (out_bb->live_in_set, out_bb->kill_set);
696 mono_bitset_union_fast (out_bb->live_in_set, out_bb->gen_set);
699 mono_bitset_union_fast (bb->live_out_set, out_bb->live_in_set);
702 if (changed || !mono_bitset_equal (old_live_out_set, bb->live_out_set)) {
703 if (!bb->live_in_set) {
704 bb->live_in_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
707 mono_bitset_copyto_fast (bb->live_out_set, bb->live_in_set);
708 mono_bitset_sub_fast (bb->live_in_set, bb->kill_set);
709 mono_bitset_union_fast (bb->live_in_set, bb->gen_set);
711 for (j = 0; j < bb->in_count; j++) {
712 MonoBasicBlock *in_bb = bb->in_bb [j];
714 * Some basic blocks do not seem to be in the
715 * cfg->bblocks array...
717 if (in_bb->gen_set && !in_worklist [in_bb->dfn]) {
718 #ifdef DEBUG_LIVENESS
719 printf ("\tADD: %d\n", in_bb->block_num);
722 * Put the block at the top of the stack, so it
723 * will be processed right away.
725 worklist [l_end ++] = in_bb;
726 in_worklist [in_bb->dfn] = TRUE;
732 #ifdef DEBUG_LIVENESS
733 printf ("IT: %d %d.\n", cfg->num_bblocks, out_iter);
736 mono_bitset_free (old_live_out_set);
739 g_free (in_worklist);
741 /* Compute live_in_set for bblocks skipped earlier */
742 for (i = 0; i < cfg->num_bblocks; ++i) {
743 MonoBasicBlock *bb = cfg->bblocks [i];
745 if (!bb->live_in_set) {
746 bb->live_in_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
749 mono_bitset_copyto_fast (bb->live_out_set, bb->live_in_set);
750 mono_bitset_sub_fast (bb->live_in_set, bb->kill_set);
751 mono_bitset_union_fast (bb->live_in_set, bb->gen_set);
755 #ifdef DEBUG_LIVENESS
756 for (i = cfg->num_bblocks - 1; i >= 0; i--) {
757 MonoBasicBlock *bb = cfg->bblocks [i];
759 printf ("LIVE IN BB%d: ", bb->block_num);
760 mono_bitset_print (bb->live_in_set);
761 printf ("LIVE OUT BB%d: ", bb->block_num);
762 mono_bitset_print (bb->live_out_set);
768 update_liveness (MonoCompile *cfg, MonoRegallocContext *ctx, MonoInst *ins, int inst_num, gint32 *last_use)
770 const char *spec = INS_INFO (ins->opcode);
773 LIVENESS_DEBUG (printf ("\t%x: ", inst_num); mono_print_ins (ins));
776 if (spec [MONO_INST_DEST] != ' ') {
777 if (MONO_IS_STORE_MEMBASE (ins)) {
778 if (last_use [ins->dreg] == 0) {
779 LIVENESS_DEBUG (printf ("\tlast use of R%d set to %x\n", ins->dreg, inst_num + INS_POS_USE));
780 last_use [ins->dreg] = inst_num + INS_POS_USE;
783 if (last_use [ins->dreg] > 0) {
784 LIVENESS_DEBUG (printf ("\tadd range to R%d: [%x, %x]\n", ins->dreg, inst_num + INS_POS_DEF, last_use [ins->dreg]));
785 mono_linterval_add_range (ctx->cfg, ctx->varinfo [ins->dreg].interval, inst_num + INS_POS_DEF, last_use [ins->dreg]);
786 last_use [ins->dreg] = 0;
789 if (!vreg_is_volatile (cfg, ins->dreg) && ((ins->opcode == OP_ICONST) || (ins->opcode == OP_I8CONST) || (ins->opcode == OP_R8CONST) || (ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE))) {
790 LIVENESS_DEBUG (printf ("\tdead def of R%d eliminated\n", ins->dreg));
792 spec = INS_INFO (ins->opcode);
794 LIVENESS_DEBUG (printf ("\tdead def of R%d, add range to R%d: [%x, %x]\n", ins->dreg, ins->dreg, inst_num + INS_POS_DEF, inst_num + INS_POS_DEF));
795 mono_linterval_add_range (ctx->cfg, ctx->varinfo [ins->dreg].interval, inst_num + INS_POS_DEF, inst_num + INS_POS_DEF);
799 if (ins->opcode != OP_NOP)
800 /* Since we process instructions backwards, the list will be properly sorted */
801 ctx->varinfo [ins->dreg].use_pos = g_slist_prepend_mempool (ctx->cfg->mempool, ctx->varinfo [ins->dreg].use_pos, GINT_TO_POINTER (inst_num));
803 /* Set preferred vregs */
804 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
805 if (ins->sreg1 < MONO_FIRST_VREG) {
806 ctx->varinfo [ins->dreg].preferred_reg = ins->sreg1;
807 } else if (ins->dreg < MONO_FIRST_VREG) {
808 ctx->varinfo [ins->sreg1].preferred_reg = ins->dreg;
809 } else if (ctx->varinfo [ins->dreg].preferred_reg != -1) {
811 * Propagate preferred vregs. This works because instructions are
812 * processed in reverse order.
814 ctx->varinfo [ins->sreg1].preferred_reg = ctx->varinfo [ins->dreg].preferred_reg;
821 if (spec [MONO_INST_SRC1] != ' ') {
822 if (last_use [sreg] == 0) {
823 LIVENESS_DEBUG (printf ("\tlast use of R%d set to %x\n", sreg, inst_num + INS_POS_USE));
824 last_use [sreg] = inst_num + INS_POS_USE;
826 ctx->varinfo [sreg].use_pos = g_slist_prepend_mempool (ctx->cfg->mempool, ctx->varinfo [sreg].use_pos, GINT_TO_POINTER (inst_num));
831 if (spec [MONO_INST_SRC2] != ' ') {
832 if (last_use [sreg] == 0) {
833 LIVENESS_DEBUG (printf ("\tlast use of R%d set to %x\n", sreg, inst_num + INS_POS_USE));
834 last_use [sreg] = inst_num + INS_POS_USE;
836 ctx->varinfo [sreg].use_pos = g_slist_prepend_mempool (ctx->cfg->mempool, ctx->varinfo [sreg].use_pos, GINT_TO_POINTER (inst_num));
839 if (ins_get_spec (ins->opcode)[MONO_INST_CLOB] == 'c') {
840 MonoCallInst *call = (MonoCallInst*)ins;
843 for (list = call->out_ireg_args; list; list = list->next) {
846 regpair = (guint32)(gssize)(list->data);
847 sreg = regpair >> 24;
849 if (last_use [sreg] == 0) {
850 LIVENESS_DEBUG (printf ("\tlast use of R%d set to %x\n", sreg, inst_num + INS_POS_USE));
851 last_use [sreg] = inst_num + INS_POS_USE;
853 ctx->varinfo [sreg].use_pos = g_slist_prepend_mempool (ctx->cfg->mempool, ctx->varinfo [sreg].use_pos, GINT_TO_POINTER (inst_num));
856 for (list = call->out_freg_args; list; list = list->next) {
859 regpair = (guint32)(gssize)(list->data);
860 sreg = (regpair >> 24) + MONO_MAX_IREGS;
862 if (last_use [sreg] == 0) {
863 LIVENESS_DEBUG (printf ("\tlast use of R%d set to %x\n", sreg, inst_num + INS_POS_USE));
864 last_use [sreg] = inst_num + INS_POS_USE;
866 ctx->varinfo [sreg].use_pos = g_slist_prepend_mempool (ctx->cfg->mempool, ctx->varinfo [sreg].use_pos, GINT_TO_POINTER (inst_num));
871 if (ins_get_spec (ins->opcode)[MONO_INST_CLOB]) {
872 char clob = ins_get_spec (ins->opcode)[MONO_INST_CLOB];
876 /* A call clobbers some int/fp registers */
877 for (l = mono_arch_get_iregs_clobbered_by_call ((MonoCallInst*)ins); l; l = l->next)
878 mono_linterval_add_range (ctx->cfg, ctx->varinfo [GPOINTER_TO_INT (l->data)].interval, inst_num + INS_POS_CLOB, inst_num + INS_POS_CLOB);
879 for (l = mono_arch_get_fregs_clobbered_by_call ((MonoCallInst*)ins); l; l = l->next)
880 mono_linterval_add_range (ctx->cfg, ctx->varinfo [GPOINTER_TO_INT (l->data)].interval, inst_num + INS_POS_CLOB, inst_num + INS_POS_CLOB);
883 int clob_reg = MONO_ARCH_INST_FIXED_REG (clob);
886 mono_linterval_add_range (ctx->cfg, ctx->varinfo [clob_reg].interval, inst_num + INS_POS_CLOB, inst_num + INS_POS_CLOB);
894 * Compute liveness intervals for all vregs.
897 compute_intervals (MonoCompile *cfg, MonoRegallocContext *ctx)
899 int bnum, idx, i, j, nins, rem, max, max_vars, block_from, block_to, pos, reverse_len;
903 max_vars = cfg->next_vreg;
904 last_use = g_new0 (gint32, max_vars);
907 reverse = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * reverse_len);
909 for (idx = 0; idx < max_vars; ++idx) {
910 ctx->varinfo [idx].interval = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLiveInterval));
914 * Process bblocks in reverse order, so the addition of new live ranges
915 * to the intervals is faster.
917 for (bnum = cfg->num_bblocks - 1; bnum >= 0; --bnum) {
918 MonoBasicBlock *bb = cfg->bblocks [bnum];
921 block_from = (bb->dfn << 16); /* so pos > 0 */
922 if (bnum < cfg->num_bblocks - 1)
923 /* Beginning of the next bblock */
924 block_to = (cfg->bblocks [bnum + 1]->dfn << 16);
926 block_to = (bb->dfn << 16) + 0xffff;
928 LIVENESS_DEBUG (printf ("LIVENESS BLOCK BB%d:\n", bb->block_num));
930 memset (last_use, 0, max_vars * sizeof (gint32));
932 /* For variables in bb->live_out, set last_use to block_to */
934 rem = max_vars % BITS_PER_CHUNK;
935 max = ((max_vars + (BITS_PER_CHUNK -1)) / BITS_PER_CHUNK);
936 for (j = 0; j < max; ++j) {
940 bits_out = mono_bitset_get_fast (bb->live_out_set, j);
941 k = (j * BITS_PER_CHUNK);
944 LIVENESS_DEBUG (printf ("Var R%d live at exit, set last_use to %x\n", k, block_to));
945 last_use [k] = block_to;
952 for (nins = 0, pos = block_from, ins = bb->code; ins; ins = ins->next, ++nins, pos += INS_POS_INTERVAL) {
953 if (nins >= reverse_len) {
954 int new_reverse_len = reverse_len * 2;
955 MonoInst **new_reverse = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * new_reverse_len);
956 memcpy (new_reverse, reverse, sizeof (MonoInst*) * reverse_len);
957 reverse = new_reverse;
958 reverse_len = new_reverse_len;
961 reverse [nins] = ins;
964 g_assert (pos < block_to);
966 /* Process instructions backwards */
967 for (i = nins - 1; i >= 0; --i) {
968 MonoInst *ins = (MonoInst*)reverse [i];
970 update_liveness (cfg, ctx, ins, pos, last_use);
972 pos -= INS_POS_INTERVAL;
975 for (idx = 0; idx < max_vars; ++idx) {
976 if (last_use [idx] != 0) {
977 /* Live at exit, not written -> live on enter */
978 LIVENESS_DEBUG (printf ("Var R%d live at enter, add range to R%d: [%x, %x)\n", idx, idx, block_from, last_use [idx]));
979 mono_linterval_add_range (cfg, ctx->varinfo [idx].interval, block_from, last_use [idx]);
987 * Arguments need to have their live ranges extended to the beginning of
988 * the method to account for the arg reg/memory -> global register copies
989 * in the prolog (bug #74992).
991 for (i = 0; i < cfg->num_varinfo; i ++) {
992 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
993 if ((cfg->varinfo [vi->idx]->opcode == OP_ARG) && (cfg->varinfo [vi->idx] != cfg->ret))
994 mono_linterval_add_range (cfg, ctx->varinfo [cfg->varinfo [i]->dreg].interval, 0, 1);
999 for (idx = 0; idx < max_vars; ++idx) {
1000 printf ("LIVENESS R%d: ", idx);
1001 mono_linterval_print (ctx->varinfo [idx].interval);
1013 * Perform liveness analysis.
1016 analyze_liveness (MonoCompile *cfg, MonoRegallocContext *ctx)
1018 LIVENESS_DEBUG (printf ("LIVENESS 3 %s\n", mono_method_full_name (cfg->method, TRUE)));
1020 /* FIXME: Make only one pass over the IR */
1022 compute_gen_kill_sets (cfg, ctx);
1024 compute_live_in_out_sets (cfg, ctx);
1026 compute_intervals (cfg, ctx);
1031 compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
1033 MonoRegallocInterval *v1 = (MonoRegallocInterval*)a;
1034 MonoRegallocInterval *v2 = (MonoRegallocInterval*)b;
1038 else if (v1->interval->range && v2->interval->range)
1039 return v1->interval->range->from - v2->interval->range->from;
1040 else if (v1->interval->range)
1046 #define LSCAN_DEBUG(a) MINI_DEBUG(cfg->verbose_level, 2, a;)
1051 * Split the interval into two child intervals at POS.
1052 * [a, b] becomes [a, POS - 1], [POS, b].
1055 split_interval (MonoCompile *cfg, MonoRegallocContext *ctx, MonoRegallocInterval *interval, int pos)
1057 MonoRegallocInterval *child1, *child2;
1058 GSList *l, *split_list;
1060 child1 = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRegallocInterval));
1061 child2 = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRegallocInterval));
1062 child1->vreg = ctx->num_intervals ++;
1064 child1->offset = -1;
1065 child1->preferred_reg = -1;
1066 child1->is_volatile = interval->is_volatile;
1067 child1->fp = interval->fp;
1068 child1->type = interval->type;
1069 child2->vreg = ctx->num_intervals ++;
1071 child2->offset = -1;
1072 child2->preferred_reg = -1;
1073 child2->is_volatile = interval->is_volatile;
1074 child2->fp = interval->fp;
1075 child2->type = interval->type;
1077 interval->child1 = child1;
1078 interval->child2 = child2;
1079 child1->parent = interval;
1080 child2->parent = interval;
1082 mono_linterval_split (cfg, interval->interval, &child1->interval, &child2->interval, pos);
1084 /* Split use positions */
1085 for (l = interval->use_pos; l; l = l->next) {
1086 int use_pos = GPOINTER_TO_INT (l->data);
1089 child1->use_pos = g_slist_append_mempool (cfg->mempool, child1->use_pos, l->data);
1091 child2->use_pos = g_slist_append_mempool (cfg->mempool, child2->use_pos, l->data);
1094 /* Remember where spill code needs to be inserted */
1095 split_list = g_hash_table_lookup (ctx->split_positions, GUINT_TO_POINTER (pos));
1096 split_list = g_slist_prepend (split_list, interval);
1097 g_hash_table_insert (ctx->split_positions, GUINT_TO_POINTER (pos), split_list);
1098 g_hash_table_insert (ctx->split_position_set, GUINT_TO_POINTER (pos - (pos % INS_POS_INTERVAL)), GUINT_TO_POINTER (pos));
1100 if (cfg->verbose_level > 2) {
1101 printf ("\tSplit R%d into R%d and R%d at %x\n", interval->vreg, child1->vreg, child2->vreg, pos);
1102 printf ("\t R%d ", interval->vreg);
1103 mono_linterval_print (interval->interval);
1104 printf ("-> R%d ", child1->vreg);
1105 mono_linterval_print (child1->interval);
1106 printf ("||| R%d ", child2->vreg);
1107 mono_linterval_print (child2->interval);
1115 * Return L or one of its children which covers POS.
1117 static MonoRegallocInterval*
1118 child_at (MonoRegallocInterval *l, int pos)
1120 if (l->vreg < MONO_FIRST_VREG)
1124 g_assert (mono_linterval_covers (l->interval, pos));
1128 if (mono_linterval_covers (l->child1->interval, pos))
1129 return child_at (l->child1, pos);
1130 else if (mono_linterval_covers (l->child2->interval, pos))
1131 return child_at (l->child2, pos);
1133 g_assert_not_reached ();
1139 * decompose_volatile_intervals:
1141 * Decompose intervals belonging to volatile variables. Return the decomposed intervals
1142 * which should be allocated to registers.
1145 decompose_volatile_intervals (MonoCompile *cfg, MonoRegallocContext *ctx, GList *intervals)
1147 GList *new_intervals;
1151 * We model volatile intervals by splitting them at use positions and spilling the
1152 * sub intervals, ie. [a, b] is transformed to [a, a], [a + 1, b], [b, b] with the
1153 * middle interval spilled. This ensures that the variable will be spilled after each
1154 * def, and it will be loaded before each use.
1155 * FIXME: Stress test this by making most variables volatile
1157 new_intervals = g_list_copy (intervals);
1158 for (l = intervals; l; l = l->next) {
1159 MonoRegallocInterval *current = l->data;
1162 if (!current->is_volatile)
1166 * Instead of trying to split the arbitrary interval produced by the liveness
1167 * analysis phase, just use one big interval.
1169 current->interval = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLiveInterval));
1170 mono_linterval_add_range (cfg, current->interval, 0, (32767 << 16));
1172 LSCAN_DEBUG (printf ("R%d is volatile ", current->vreg));
1173 LSCAN_DEBUG (mono_linterval_print (current->interval));
1174 LSCAN_DEBUG (printf ("\n"));
1176 new_intervals = g_list_remove (new_intervals, current);
1178 use_pos = current->use_pos;
1180 int pos = GPOINTER_TO_INT (use_pos->data);
1181 use_pos = use_pos->next;
1183 LSCAN_DEBUG (printf ("\tUse pos: %x\n", pos));
1185 /* Split the part of the interval before the definition into its own interval */
1186 if (pos > current->interval->range->from) {
1187 split_interval (cfg, ctx, current, pos);
1188 current = current->child2;
1191 /* Split the use into its own interval */
1192 split_interval (cfg, ctx, current, pos + INS_POS_INTERVAL);
1193 new_intervals = g_list_insert_sorted (new_intervals, current->child1, compare_by_interval_start_pos_func);
1194 current = current->child2;
1196 /* No need to (and hard to) split between use positions at the same place */
1197 while (use_pos && GPOINTER_TO_INT (use_pos->data) == pos)
1198 use_pos = use_pos->next;
1202 return new_intervals;
1208 * The actual linear scan register allocation algorithm.
1211 linear_scan (MonoCompile *cfg, MonoRegallocContext *ctx)
1213 GList *int_regs = mono_arch_get_global_int_regs (cfg);
1214 GList *fp_regs = mono_arch_get_global_fp_regs (cfg);
1216 GList *unhandled, *active, *inactive, *l, *next;
1217 gint32 free_pos [MONO_MAX_IREGS + MONO_MAX_FREGS];
1218 gboolean allocateable [MONO_MAX_IREGS + MONO_MAX_FREGS];
1220 MonoMethodSignature *sig;
1221 MonoMethodHeader *header;
1223 LSCAN_DEBUG (printf ("\nLinear Scan 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
1225 header = mono_method_get_header (cfg->method);
1227 sig = mono_method_signature (cfg->method);
1229 /* Create list of allocatable variables */
1231 for (i = MONO_FIRST_VREG; i < cfg->next_vreg; ++i) {
1232 if (ctx->varinfo [i].interval->range)
1233 vars = g_list_prepend (vars, &ctx->varinfo [i]);
1236 for (i = 0; i < MONO_MAX_IREGS; ++i)
1237 allocateable [i] = g_list_find (int_regs, GINT_TO_POINTER (i)) != NULL;
1238 for (i = 0; i < MONO_MAX_FREGS; ++i)
1239 allocateable [MONO_MAX_IREGS + i] = g_list_find (fp_regs, GINT_TO_POINTER (i)) != NULL;
1240 g_list_free (int_regs);
1241 g_list_free (fp_regs);
1243 unhandled = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func);
1247 /* The hard registers are assigned to themselves */
1248 for (i = 0; i < MONO_MAX_IREGS + MONO_MAX_FREGS; ++i) {
1249 ctx->varinfo [i].hreg = i;
1250 if (ctx->varinfo [i].interval->range)
1251 inactive = g_list_append (inactive, &ctx->varinfo [i]);
1254 unhandled = decompose_volatile_intervals (cfg, ctx, unhandled);
1257 * Handle arguments received on the stack by splitting their interval, and later
1258 * allocating the spilled part to the arg location.
1260 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1261 MonoInst *ins = cfg->args [i];
1262 MonoRegallocInterval *current = &ctx->varinfo [ins->dreg];
1265 if (sig->hasthis && (i == 0))
1266 arg_type = &mono_defaults.object_class->byval_arg;
1268 arg_type = sig->params [i - sig->hasthis];
1270 if (ins->opcode != OP_REGVAR && !MONO_TYPE_ISSTRUCT (arg_type) && !current->is_volatile && current->interval->range) {
1271 /* This ensures there is some part of the interval before the use pos */
1272 g_assert (current->interval->range->from == 0);
1274 /* Have to split at an use pos so a spill load can be inserted */
1275 if (current->use_pos) {
1276 guint32 pos = GPOINTER_TO_INT (current->use_pos->data);
1278 split_interval (cfg, ctx, current, pos);
1279 unhandled = g_list_remove (unhandled, current);
1280 unhandled = g_list_insert_sorted (unhandled, current->child2, compare_by_interval_start_pos_func);
1286 MonoRegallocInterval *current = unhandled->data;
1287 int pos, reg, max_free_pos;
1290 unhandled = g_list_delete_link (unhandled, unhandled);
1292 LSCAN_DEBUG (printf ("Processing R%d: ", current->vreg));
1293 LSCAN_DEBUG (mono_linterval_print (current->interval));
1294 LSCAN_DEBUG (printf ("\n"));
1296 if (!current->interval->range)
1299 /* Happens when splitting intervals */
1300 if (!current->use_pos)
1303 pos = current->interval->range->from;
1305 /* Check for intervals in active which expired or inactive */
1307 /* FIXME: Optimize this */
1310 MonoRegallocInterval *v = l->data;
1313 if (v->interval->last_range->to < pos) {
1314 active = g_list_delete_link (active, l);
1315 LSCAN_DEBUG (printf ("\tInterval R%d has expired\n", v->vreg));
1316 } else if (!mono_linterval_covers (v->interval, pos)) {
1317 inactive = g_list_append (inactive, v);
1318 active = g_list_delete_link (active, l);
1319 LSCAN_DEBUG (printf ("\tInterval R%d became inactive\n", v->vreg));
1324 /* Check for intervals in inactive which are expired or active */
1327 MonoRegallocInterval *v = l->data;
1330 if (v->interval->last_range->to < pos) {
1331 inactive = g_list_delete_link (inactive, l);
1332 LSCAN_DEBUG (printf ("\tInterval R%d has expired\n", v->vreg));
1333 } else if (mono_linterval_covers (v->interval, pos)) {
1334 active = g_list_append (active, v);
1335 inactive = g_list_delete_link (inactive, l);
1336 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", v->vreg));
1341 /* Find a register for the current interval */
1342 if (G_UNLIKELY (current->fp)) {
1343 for (i = MONO_MAX_IREGS; i < MONO_MAX_IREGS + MONO_MAX_FREGS; ++i)
1344 if (allocateable [i])
1345 free_pos [i] = G_MAXINT32;
1349 for (i = 0; i < MONO_MAX_IREGS; ++i)
1350 if (allocateable [i])
1351 free_pos [i] = G_MAXINT32;
1356 for (l = active; l != NULL; l = l->next) {
1357 MonoRegallocInterval *v = l->data;
1360 free_pos [v->hreg] = 0;
1361 LSCAN_DEBUG (printf ("\threg %d is busy (R%d)\n", v->hreg, v->vreg));
1365 for (l = inactive; l != NULL; l = l->next) {
1366 MonoRegallocInterval *v = l->data;
1367 gint32 intersect_pos;
1369 if ((v->hreg >= 0) && (current->fp == v->fp)) {
1370 intersect_pos = mono_linterval_get_intersect_pos (current->interval, v->interval);
1371 if (intersect_pos != -1) {
1372 if (intersect_pos < free_pos [v->hreg])
1373 free_pos [v->hreg] = intersect_pos;
1374 LSCAN_DEBUG (printf ("\threg %d becomes free at %x\n", v->hreg, intersect_pos));
1384 * Arguments should be allocated to the registers they reside in at the start of
1387 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1388 MonoInst *ins = cfg->args [i];
1390 g_assert (ins->opcode == OP_REGVAR);
1391 if (ins->dreg == current->vreg)
1397 if (G_UNLIKELY (current->fp)) {
1398 for (i = MONO_MAX_IREGS; i < MONO_MAX_IREGS + MONO_MAX_FREGS; ++i)
1399 if (free_pos [i] > max_free_pos) {
1401 max_free_pos = free_pos [i];
1404 for (i = 0; i < MONO_MAX_IREGS; ++i)
1405 if (free_pos [i] > max_free_pos) {
1407 max_free_pos = free_pos [i];
1411 if (current->preferred_reg != -1) {
1412 LSCAN_DEBUG (printf ("\tPreferred register is hreg %d\n", current->preferred_reg));
1413 /* FIXME: Add more cases */
1414 if (free_pos [current->preferred_reg] >= free_pos [reg]) {
1415 reg = current->preferred_reg;
1419 * We have a choice to make: assigning to the preferred reg avoids
1420 * a move, while assigning to 'reg' will keep the variable in a
1421 * register for longer.
1423 if (free_pos [current->preferred_reg] >= current->interval->range->from)
1424 reg = current->preferred_reg;
1430 g_assert (reg != -1);
1432 if (!(free_pos [reg] > 0 && free_pos [reg] >= current->interval->range->from) &&
1433 GPOINTER_TO_INT (current->use_pos->data) <= current->interval->range->from) {
1435 * No register is available, and current is needed in a register right now.
1436 * So free up a register by spilling an interval in active.
1438 MonoRegallocInterval *to_spill;
1441 g_assert (!current->is_volatile);
1443 /* Spill the first */
1444 /* FIXME: Optimize the selection of the interval */
1447 for (l = active; l; l = l->next) {
1450 /* Fixed intervals cannot be spilled */
1451 if (to_spill->vreg >= MONO_FIRST_VREG)
1454 g_assert (to_spill);
1456 LSCAN_DEBUG (printf ("\tNo free register found, splitting and spilling R%d\n", to_spill->vreg));
1457 split_pos = GPOINTER_TO_INT (current->use_pos->data);
1459 * Avoid splitting to_spill before the start of current, since
1460 * its second child, which is added to unhandled would begin before
1463 if (split_pos < current->interval->range->from)
1464 split_pos = current->interval->range->from;
1465 split_interval (cfg, ctx, to_spill, split_pos);
1466 to_spill->child1->hreg = to_spill->hreg;
1467 active = g_list_remove (active, to_spill);
1468 unhandled = g_list_insert_sorted (unhandled, to_spill->child2, compare_by_interval_start_pos_func);
1469 reg = to_spill->hreg;
1471 /* Recompute free_pos [reg] */
1472 free_pos [reg] = G_MAXINT32;
1473 for (l = active; l != NULL; l = l->next) {
1474 MonoRegallocInterval *v = l->data;
1476 if (v->hreg == reg) {
1477 free_pos [v->hreg] = 0;
1478 LSCAN_DEBUG (printf ("\threg %d is busy (R%d)\n", v->hreg, v->vreg));
1482 for (l = inactive; l != NULL; l = l->next) {
1483 MonoRegallocInterval *v = l->data;
1484 gint32 intersect_pos;
1486 if ((v->hreg == reg) && (current->fp == v->fp)) {
1487 intersect_pos = mono_linterval_get_intersect_pos (current->interval, v->interval);
1488 if (intersect_pos != -1) {
1489 if (intersect_pos < free_pos [v->hreg])
1490 free_pos [v->hreg] = intersect_pos;
1491 LSCAN_DEBUG (printf ("\threg %d becomes free at %x\n", v->hreg, intersect_pos));
1497 if (free_pos [reg] > 0 && free_pos [reg] >= current->interval->last_range->to) {
1498 /* Register available for whole interval */
1499 current->hreg = reg;
1501 cfg->used_int_regs |= (1 << reg);
1502 LSCAN_DEBUG (printf ("\tAssigned hreg %d to R%d\n", reg, current->vreg));
1504 active = g_list_append (active, current);
1506 else if (free_pos [reg] > 0 && free_pos [reg] >= current->interval->range->from) {
1508 * The register is available for some part of the interval.
1509 * Split the interval, assign the register to the first part of the
1510 * interval, and save the second part for later processing.
1512 LSCAN_DEBUG (printf ("\tRegister %d is available until %x, splitting current.\n", reg, free_pos [reg]));
1513 split_interval (cfg, ctx, current, free_pos [reg]);
1515 current->child1->hreg = reg;
1517 cfg->used_int_regs |= (1 << reg);
1518 LSCAN_DEBUG (printf ("\tAssigned hreg %d to R%d\n", reg, current->child1->vreg));
1519 active = g_list_append (active, current->child1);
1521 unhandled = g_list_insert_sorted (unhandled, current->child2, compare_by_interval_start_pos_func);
1523 /* No register is available */
1524 if (GPOINTER_TO_INT (current->use_pos->data) > current->interval->range->from) {
1526 * The interval is not currently needed in a register. So split it, and
1527 * spill the first part to memory, and save the second part for later
1530 LSCAN_DEBUG (printf ("\tSplitting current at first use pos %x, spilling the first part.\n", GPOINTER_TO_INT (current->use_pos->data)));
1531 split_interval (cfg, ctx, current, GPOINTER_TO_INT (current->use_pos->data));
1532 unhandled = g_list_insert_sorted (unhandled, current->child2, compare_by_interval_start_pos_func);
1534 /* Handled previously */
1535 g_assert_not_reached ();
1541 * The fp registers are numbered from MONO_MAX_IREGS during allocation, but they are
1542 * numbered from 0 in machine code.
1544 for (i = 0; i < cfg->next_vreg; ++i) {
1545 if (ctx->varinfo [i].fp) {
1548 /* Need to process child intervals as well */
1549 /* This happens rarely so it is not perf critical */
1551 children = g_slist_prepend (children, &ctx->varinfo [i]);
1553 MonoRegallocInterval *interval = children->data;
1555 children = g_slist_delete_link (children, children);
1556 if (interval->hreg != -1)
1557 interval->hreg -= MONO_MAX_IREGS;
1558 if (interval->child1)
1559 children = g_slist_prepend (children, interval->child1);
1560 if (interval->child2)
1561 children = g_slist_prepend (children, interval->child2);
1568 collect_spilled_intervals (MonoRegallocInterval *interval, GSList *list)
1570 if ((interval->hreg == -1) && !interval->child1 && interval->interval->range)
1571 list = g_slist_prepend (list, interval);
1573 if (interval->is_volatile && !interval->interval->range)
1574 /* Variables which are only referenced by ldaddr */
1575 list = g_slist_prepend (list, interval);
1577 if (interval->child1) {
1578 list = collect_spilled_intervals (interval->child1, list);
1579 list = collect_spilled_intervals (interval->child2, list);
1586 alloc_spill_slot (MonoCompile *cfg, guint32 size, guint32 align)
1591 res = cfg->stack_offset;
1593 if (cfg->flags & MONO_CFG_HAS_SPILLUP) {
1594 cfg->stack_offset += align - 1;
1595 cfg->stack_offset &= ~(align - 1);
1596 res = cfg->stack_offset;
1597 cfg->stack_offset += size;
1599 cfg->stack_offset += align - 1;
1600 cfg->stack_offset &= ~(align - 1);
1601 cfg->stack_offset += size;
1602 res = - cfg->stack_offset;
1610 assign_spill_slots (MonoCompile *cfg, MonoRegallocContext *ctx)
1612 GSList *spilled_intervals = NULL;
1614 MonoMethodSignature *sig;
1617 /* Handle arguments passed on the stack */
1618 sig = mono_method_signature (cfg->method);
1619 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1620 MonoInst *ins = cfg->args [i];
1623 if (sig->hasthis && (i == 0))
1624 arg_type = &mono_defaults.object_class->byval_arg;
1626 arg_type = sig->params [i - sig->hasthis];
1628 if (MONO_TYPE_ISSTRUCT (arg_type) || (ins->opcode != OP_REGVAR)) {
1629 g_assert (ins->opcode == OP_REGOFFSET);
1630 // FIXME: Add a basereg field to varinfo
1632 g_assert (ins->inst_offset != -1);
1633 ctx->varinfo [ins->dreg].offset = ins->inst_offset;
1637 /* Handle a vtype return */
1638 if (!cfg->vret_addr && MONO_TYPE_ISSTRUCT (sig->ret)) {
1639 MonoInst *ins = cfg->ret;
1641 ctx->varinfo [ins->dreg].offset = ins->inst_offset;
1644 for (i = 0; i < cfg->next_vreg; ++i) {
1645 spilled_intervals = collect_spilled_intervals (&ctx->varinfo [i], spilled_intervals);
1648 LSCAN_DEBUG (printf ("\nSPILL OFFSETS:\n\n"));
1650 for (l = spilled_intervals; l; l = l->next) {
1651 MonoRegallocInterval *interval = l->data;
1652 MonoRegallocInterval *parent;
1655 * All spilled sub-intervals of a interval must share the stack slot.
1656 * This is accomplished by storing the stack offset in the original interval
1657 * and using that offset for all its children.
1660 for (parent = interval; parent->parent != NULL; parent = parent->parent)
1662 if (parent->offset != -1) {
1663 interval->offset = parent->offset;
1664 } else if (interval->offset != -1) {
1665 /* Already allocated (for example, valuetypes as arguments) */
1667 guint32 size, align;
1669 if (MONO_TYPE_ISSTRUCT (interval->type)) {
1670 // FIXME: pinvoke, gsctx
1672 size = mini_type_stack_size (NULL, interval->type, NULL);
1673 } else if (interval->fp) {
1674 size = sizeof (double);
1676 size = sizeof (gpointer);
1679 align = sizeof (gpointer);
1680 interval->offset = alloc_spill_slot (cfg, size, align);
1683 for (parent = interval; parent != NULL; parent = parent->parent) {
1684 if (parent->offset == -1)
1685 parent->offset = interval->offset;
1688 LSCAN_DEBUG (printf ("R%d %d", interval->vreg, interval->offset));
1689 LSCAN_DEBUG (mono_linterval_print (interval->interval));
1690 LSCAN_DEBUG (printf ("\n"));
1697 * Order the instructions in MOVES so earlier moves don't overwrite the sources of
1701 order_moves (MonoCompile *cfg, MonoRegallocContext *ctx, MonoInst **moves, int nmoves)
1707 * Sort the moves so earlier moves don't overwrite the sources of later
1710 /* FIXME: Do proper cycle detection instead of the current ugly hack */
1712 for (i = 0; i < nmoves; ++i) {
1718 for (j = i + 1; j < nmoves; ++j)
1719 if (moves [i]->dreg == moves [j]->sreg1) {
1727 moves [j] = moves [i];
1731 if (niter > nmoves * 2)
1732 /* Possible cycle */
1736 if (niter > nmoves * 2)
1741 if (niter > nmoves * 2) {
1746 * Save all registers to the stack and reload them again.
1747 * FIXME: Optimize this.
1750 /* Allocate spill slots */
1751 offsets = mono_mempool_alloc (cfg->mempool, nmoves * sizeof (int));
1752 for (i = 0; i < nmoves; ++i) {
1753 guint32 size = sizeof (gpointer);
1755 if (cfg->flags & MONO_CFG_HAS_SPILLUP) {
1756 cfg->stack_offset += size - 1;
1757 cfg->stack_offset &= ~(size - 1);
1758 offsets [i] = cfg->stack_offset;
1759 cfg->stack_offset += size;
1761 cfg->stack_offset += size - 1;
1762 cfg->stack_offset &= ~(size - 1);
1763 cfg->stack_offset += size;
1764 offsets [i] = - cfg->stack_offset;
1769 for (i = 0; i < nmoves; ++i) {
1770 if (moves [i]->opcode != OP_MOVE)
1772 MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG);
1773 ins->sreg1 = moves [i]->sreg1;
1774 ins->inst_destbasereg = cfg->frame_reg;
1775 ins->inst_offset = offsets [i];
1777 l = g_slist_append_mempool (cfg->mempool, l, ins);
1778 g_hash_table_insert (ctx->spill_ins, ins, ins);
1782 for (i = 0; i < nmoves; ++i) {
1783 if (moves [i]->opcode != OP_MOVE)
1785 MONO_INST_NEW (cfg, ins, OP_LOAD_MEMBASE);
1786 ins->dreg = moves [i]->dreg;
1787 ins->inst_basereg = cfg->frame_reg;
1788 ins->inst_offset = offsets [i];
1790 l = g_slist_append_mempool (cfg->mempool, l, ins);
1791 g_hash_table_insert (ctx->spill_ins, ins, ins);
1796 for (i = 0; i < nmoves; ++i)
1797 l = g_slist_append_mempool (cfg->mempool, l, moves [i]);
1806 * Add spill loads and stores to the IR at the locations where intervals were split.
1809 add_spill_code (MonoCompile *cfg, MonoRegallocContext *ctx)
1812 MonoInst *ins, *prev, *store, *load, *move, *insert_after;
1813 GSList *spill_list, *l, *ins_to_add, *moves_to_add;
1814 MonoRegallocInterval *child1, *child2;
1815 int pos, pos_interval, pos_interval_limit;
1816 MonoBasicBlock *out_bb;
1817 int i, bb_count, from_pos, to_pos, iter;
1818 gboolean after_last_ins, add_at_head;
1820 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
1821 if (cfg->verbose_level > 1)
1822 printf ("\nREGALLOC-ADD SPILL CODE %d (DFN 0x%x):\n", bb->block_num, bb->dfn);
1824 /* First pass: Add spill loads/stores to the IR */
1825 pos = (bb->dfn << 16) + INS_POS_INTERVAL;
1827 after_last_ins = FALSE;
1828 for (ins = bb->code; !after_last_ins;) {
1830 after_last_ins = TRUE;
1831 } else if (g_hash_table_lookup (ctx->spill_ins, ins)) {
1832 /* Spill instruction added by an earlier bblock */
1833 /* No need to increase pos */
1835 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1836 printf (" <spill ins>\n");
1844 if (!g_hash_table_lookup (ctx->split_position_set, GUINT_TO_POINTER (pos))) {
1845 /* No split position at this instruction */
1846 pos_interval_limit = 0;
1847 pos += INS_POS_INTERVAL;
1849 pos_interval_limit = INS_POS_INTERVAL;
1852 for (pos_interval = 0; pos_interval < pos_interval_limit; ++pos_interval) {
1853 spill_list = g_hash_table_lookup (ctx->split_positions, GUINT_TO_POINTER (pos));
1854 /* Insert stores first, then loads so registers don't get overwritten */
1855 for (iter = 0; iter < 2; ++iter) {
1856 for (l = spill_list; l; l = l->next) {
1857 MonoRegallocInterval *interval = l->data;
1859 /* The childs might be split */
1860 if (interval->child1->child1)
1861 child1 = child_at (interval->child1, pos - pos_interval);
1863 child1 = interval->child1;
1864 if (pos < interval->child2->interval->range->from)
1865 /* Happens when volatile intervals are split */
1867 child2 = child_at (interval->child2, pos);
1869 if ((child1->hreg == -1) && (child2->hreg == -1))
1871 * Happens when an interval is split, then the first child
1876 // FIXME: Why is !is_volatile needed ?
1877 // It seems to fail when the same volatile var is a source and a
1878 // destination of the same instruction
1879 if ((iter == 0) && (child1->hreg != -1) && (child2->hreg != -1) && !interval->is_volatile) {
1883 * This is complex situation: the vreg is expected to be in
1884 * child1->hreg before the instruction, and in child2->hreg
1885 * after the instruction. We can't insert a move before,
1886 * because that could overwrite the input regs of the
1887 * instruction, and we can't insert a move after, since the
1888 * instruction could overwrite the source reg of the move.
1889 * Instead, we insert a store before the instruction, and a
1891 * FIXME: Optimize child1->hreg == child2->hreg
1893 offset = alloc_spill_slot (cfg, sizeof (gpointer), sizeof (gpointer));
1895 NEW_STORE_MEMBASE (cfg, store, mono_type_to_store_membase (cfg, interval->type), cfg->frame_reg, offset, child1->hreg);
1903 g_hash_table_insert (ctx->spill_ins, store, store);
1905 NEW_LOAD_MEMBASE (cfg, load, mono_type_to_load_membase (cfg, interval->type), child2->hreg, cfg->frame_reg, offset);
1907 insert_after_ins (bb, load, ins);
1908 g_hash_table_insert (ctx->spill_ins, load, load);
1910 LSCAN_DEBUG (printf (" Spill store/load added for R%d (R%d -> R%d) at %x\n", interval->vreg, child1->vreg, child2->vreg, pos));
1911 } else if ((iter == 0) && (child1->hreg != -1) && (child2->hreg == -1)) {
1912 g_assert (child2->offset != -1);
1914 NEW_STORE_MEMBASE (cfg, store, mono_type_to_store_membase (cfg, interval->type), cfg->frame_reg, child2->offset, child1->hreg);
1922 g_hash_table_insert (ctx->spill_ins, store, store);
1924 LSCAN_DEBUG (printf (" Spill store added for R%d (R%d -> R%d) at %x\n", interval->vreg, child1->vreg, child2->vreg, pos));
1925 } else if ((iter == 1) && (child1->hreg == -1) && (child2->hreg != -1)) {
1926 g_assert (child1->offset != -1);
1927 NEW_LOAD_MEMBASE (cfg, load, mono_type_to_load_membase (cfg, interval->type), child2->hreg, cfg->frame_reg, child1->offset);
1935 g_hash_table_insert (ctx->spill_ins, load, load);
1937 LSCAN_DEBUG (printf (" Spill load added for R%d (R%d -> R%d) at %x\n", interval->vreg, child1->vreg, child2->vreg, pos));
1945 if (G_UNLIKELY (cfg->verbose_level > 1))
1947 mono_print_ins (ins);
1955 /* Second pass: Resolve data flow */
1956 for (bb_count = 0; bb_count < bb->out_count; ++bb_count) {
1957 out_bb = bb->out_bb [bb_count];
1959 if (!out_bb->live_in_set)
1960 /* Exception handling block */
1963 from_pos = (bb->dfn << 16) + 0xffff;
1964 to_pos = (out_bb->dfn << 16);
1967 for (i = 0; i < cfg->next_vreg; ++i) {
1968 MonoRegallocInterval *interval = &ctx->varinfo [i];
1970 if (mono_bitset_test_fast (out_bb->live_in_set, i) && mono_linterval_covers (interval->interval, from_pos) && mono_linterval_covers (interval->interval, to_pos)) {
1971 child1 = child_at (interval, from_pos);
1972 child2 = child_at (interval, to_pos);
1973 if (child1 != child2) {
1974 if ((child1->hreg != -1) && (child2->hreg == -1)) {
1975 LSCAN_DEBUG (printf (" Add store for R%d (R%d -> R%d) at BB%d -> BB%d [%x - %x]\n", interval->vreg, child1->vreg, child2->vreg, bb->block_num, out_bb->block_num, from_pos, to_pos));
1976 NEW_STORE_MEMBASE (cfg, store, mono_type_to_store_membase (cfg, interval->type), cfg->frame_reg, child2->offset, child1->hreg);
1977 ins_to_add = g_slist_prepend_mempool (cfg->mempool, ins_to_add, store);
1978 g_hash_table_insert (ctx->spill_ins, store, store);
1979 } else if ((child1->hreg != -1) && (child2->hreg != -1)) {
1980 if (child1->hreg != child2->hreg) {
1981 LSCAN_DEBUG (printf (" Add move for R%d (R%d -> R%d) at BB%d -> BB%d [%x - %x]\n", interval->vreg, child1->vreg, child2->vreg, bb->block_num, out_bb->block_num, from_pos, to_pos));
1982 NEW_UNALU (cfg, move, interval->fp ? OP_FMOVE : OP_MOVE, child2->hreg, child1->hreg);
1983 ins_to_add = g_slist_prepend_mempool (cfg->mempool, ins_to_add, move);
1984 g_hash_table_insert (ctx->spill_ins, move, move);
1986 } else if ((child1->hreg == -1) && (child2->hreg != -1)) {
1987 LSCAN_DEBUG (printf (" Add load for R%d (R%d -> R%d) at BB%d -> BB%d [%x - %x]\n", interval->vreg, child1->vreg, child2->vreg, bb->block_num, out_bb->block_num, from_pos, to_pos));
1988 NEW_LOAD_MEMBASE (cfg, load, mono_type_to_load_membase (cfg, interval->type), child2->hreg, cfg->frame_reg, child1->offset);
1989 ins_to_add = g_slist_prepend_mempool (cfg->mempool, ins_to_add, load);
1990 g_hash_table_insert (ctx->spill_ins, load, load);
1992 g_assert (child1->offset == child2->offset);
1998 if (bb->out_count == 1) {
2000 } else if (out_bb->in_count == 1) {
2001 add_at_head = FALSE;
2003 // FIXME: Split critical edges
2008 insert_after = NULL;
2015 * Emit spill instructions in such a way that instructions don't
2016 * overwrite the source registers of instructions coming after them.
2018 /* Simply emit stores, then moves then loads */
2019 for (l = ins_to_add; l; l = l->next) {
2020 MonoInst *ins = l->data;
2022 if (MONO_IS_STORE_MEMBASE (ins)) {
2024 mono_add_ins_to_end (bb, ins);
2026 insert_after_ins (out_bb, ins, insert_after);
2032 /* Collect the moves */
2034 for (l = ins_to_add; l; l = l->next) {
2035 MonoInst *ins = l->data;
2037 if (MONO_IS_MOVE (ins))
2040 moves = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * nmoves);
2042 for (l = ins_to_add; l; l = l->next) {
2043 MonoInst *ins = l->data;
2045 if (MONO_IS_MOVE (ins))
2046 moves [nmoves ++] = ins;
2049 moves_to_add = order_moves (cfg, ctx, moves, nmoves);
2051 for (l = moves_to_add; l; l = l->next) {
2052 MonoInst *ins = l->data;
2055 mono_add_ins_to_end (bb, ins);
2057 insert_after_ins (out_bb, ins, insert_after);
2062 for (l = ins_to_add; l; l = l->next) {
2063 MonoInst *ins = l->data;
2065 if (MONO_IS_LOAD_MEMBASE (ins)) {
2067 mono_add_ins_to_end (bb, ins);
2069 insert_after_ins (out_bb, ins, insert_after);
2082 * Replace references to vregs with their assigned physical registers or spill
2086 rewrite_code (MonoCompile *cfg, MonoRegallocContext *ctx)
2089 MonoInst *ins, *prev;
2092 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2093 if (cfg->verbose_level > 1)
2094 printf ("\nREGALLOC-REWRITE BLOCK %d:\n", bb->block_num);
2096 pos = (bb->dfn << 16);
2098 for (ins = bb->code; ins; ins = ins->next) {
2099 const char *spec = INS_INFO (ins->opcode);
2100 pos += INS_POS_INTERVAL;
2102 if (G_UNLIKELY (cfg->verbose_level > 1))
2103 mono_print_ins (ins);
2105 if (g_hash_table_lookup (ctx->spill_ins, ins)) {
2107 * This instruction was added after liveness info was computed, and thus
2108 * screws up the pos calculation. The instruction already uses hregs.
2110 pos -= INS_POS_INTERVAL;
2116 if (ins->opcode == OP_NOP)
2119 if (ins->opcode == OP_LDADDR) {
2120 MonoRegallocInterval *l = child_at (&ctx->varinfo [ins->dreg], pos + INS_POS_DEF);
2121 MonoInst *var = ins->inst_p0;
2124 g_assert (ctx->varinfo [var->dreg].hreg == -1);
2125 g_assert (ctx->varinfo [var->dreg].offset != -1);
2127 if (ctx->varinfo [var->dreg].offset != 0) {
2129 * The ADD_IMM does not satisfy register constraints on x86/amd64.
2131 MONO_INST_NEW (cfg, move, OP_MOVE);
2132 move->dreg = l->hreg;
2133 move->sreg1 = cfg->frame_reg;
2141 ins->opcode = OP_ADD_IMM;
2142 ins->dreg = l->hreg;
2143 ins->sreg1 = l->hreg;
2144 ins->inst_imm = ctx->varinfo [var->dreg].offset;
2146 ins->opcode = OP_MOVE;
2147 ins->dreg = l->hreg;
2148 ins->sreg1 = cfg->frame_reg;
2150 spec = INS_INFO (OP_NOP);
2153 if (spec [MONO_INST_DEST] != ' ') {
2154 if (MONO_IS_STORE_MEMBASE (ins)) {
2155 MonoRegallocInterval *l = child_at (&ctx->varinfo [ins->dreg], pos + INS_POS_USE);
2156 g_assert (l->hreg != -1);
2157 ins->dreg = l->hreg;
2159 MonoRegallocInterval *l = child_at (&ctx->varinfo [ins->dreg], pos + INS_POS_DEF);
2160 g_assert (l->hreg != -1);
2161 ins->dreg = l->hreg;
2164 if (spec [MONO_INST_SRC1] != ' ') {
2165 MonoRegallocInterval *l = child_at (&ctx->varinfo [ins->sreg1], pos + INS_POS_USE);
2166 g_assert (l->hreg != -1);
2167 ins->sreg1 = l->hreg;
2169 if (spec [MONO_INST_SRC2] != ' ') {
2170 MonoRegallocInterval *l = child_at (&ctx->varinfo [ins->sreg2], pos + INS_POS_USE);
2171 g_assert (l->hreg != -1);
2172 ins->sreg2 = l->hreg;
2175 if (cfg->verbose_level > 1)
2176 mono_print_ins_index (1, ins);
2183 static MonoRegallocContext*
2184 regalloc_ctx_create (MonoCompile *cfg)
2186 MonoRegallocContext *ctx;
2189 ctx = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRegallocContext));
2191 ctx->varinfo = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRegallocInterval) * cfg->next_vreg);
2192 ctx->num_intervals = cfg->next_vreg;
2193 for (i = 0; i < cfg->next_vreg; ++i) {
2196 ctx->varinfo [i].vreg = i;
2197 ctx->varinfo [i].hreg = -1;
2198 ctx->varinfo [i].offset = -1;
2199 ctx->varinfo [i].preferred_reg = -1;
2201 if (i >= MONO_MAX_IREGS && i < MONO_MAX_IREGS + MONO_MAX_FREGS)
2202 ctx->varinfo [i].fp = TRUE;
2204 var = get_vreg_to_inst (cfg, i);
2205 if (var && (var != cfg->ret) && (var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
2206 ctx->varinfo [i].is_volatile = TRUE;
2209 ctx->varinfo [i].type = var->inst_vtype;
2211 ctx->varinfo [i].type = sizeof (gpointer) == 8 ? &mono_defaults.int64_class->byval_arg : &mono_defaults.int_class->byval_arg;
2214 ctx->split_positions = g_hash_table_new (NULL, NULL);
2215 ctx->split_position_set = g_hash_table_new (NULL, NULL);
2216 ctx->spill_ins = g_hash_table_new (NULL, NULL);
2222 mono_global_regalloc (MonoCompile *cfg)
2224 MonoRegallocContext *ctx;
2226 mono_arch_fill_argument_info (cfg);
2228 /* This could create vregs, so it has to come before ctx_create */
2229 handle_reg_constraints (cfg);
2231 ctx = regalloc_ctx_create (cfg);
2233 collect_fp_vregs (cfg, ctx);
2235 analyze_liveness (cfg, ctx);
2237 linear_scan (cfg, ctx);
2239 mono_arch_allocate_vars (cfg);
2241 assign_spill_slots (cfg, ctx);
2243 add_spill_code (cfg, ctx);
2245 rewrite_code (cfg, ctx);
2251 mono_global_regalloc (MonoCompile *cfg)