2 * local-propagation.c: Local constant, copy and tree propagation.
4 * To make some sense of the tree mover, read mono/docs/tree-mover.txt
7 * Paolo Molaro (lupus@ximian.com)
8 * Dietmar Maurer (dietmar@ximian.com)
9 * Massimiliano Mantione (massi@ximian.com)
11 * (C) 2006 Novell, Inc. http://www.novell.com
12 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
25 #include <mono/metadata/debug-helpers.h>
26 #include <mono/metadata/mempool.h>
27 #include <mono/metadata/opcodes.h>
31 #ifndef MONO_ARCH_IS_OP_MEMBASE
32 #define MONO_ARCH_IS_OP_MEMBASE(opcode) FALSE
35 static inline MonoBitSet*
36 mono_bitset_mp_new_noinit (MonoMemPool *mp, guint32 max_size)
38 int size = mono_bitset_alloc_size (max_size, 0);
41 mem = mono_mempool_alloc (mp, size);
42 return mono_bitset_mem_new (mem, max_size, MONO_BITSET_DONT_FREE);
45 struct magic_unsigned {
56 /* http://www.hackersdelight.org/hdcodetxt/magicu.c.txt */
57 static struct magic_unsigned
58 compute_magic_unsigned (guint32 divisor) {
59 guint32 nc, delta, q1, r1, q2, r2;
60 struct magic_unsigned magu;
65 nc = -1 - (-divisor) % divisor;
68 r1 = 0x80000000 - q1 * nc;
69 q2 = 0x7FFFFFFF / divisor;
70 r2 = 0x7FFFFFFF - q2 * divisor;
82 if (r2 + 1 >= divisor - r2) {
86 r2 = 2 * r2 + 1 - divisor;
93 delta = divisor - 1 - r2;
94 } while (!gt && (q1 < delta || (q1 == delta && r1 == 0)));
96 magu.magic_number = q2 + 1;
101 /* http://www.hackersdelight.org/hdcodetxt/magic.c.txt */
102 static struct magic_signed
103 compute_magic_signed (gint32 divisor) {
105 guint32 ad, anc, delta, q1, r1, q2, r2, t;
106 const guint32 two31 = 0x80000000;
107 struct magic_signed mag;
110 t = two31 + ((unsigned)divisor >> 31);
111 anc = t - 1 - t % ad;
114 r1 = two31 - q1 * anc;
116 r2 = two31 - q2 * ad;
135 } while (q1 < delta || (q1 == delta && r1 == 0));
137 mag.magic_number = q2 + 1;
139 mag.magic_number = -mag.magic_number;
145 mono_strength_reduction_division (MonoCompile *cfg, MonoInst *ins)
147 gboolean allocated_vregs = FALSE;
149 * We don't use it on 32bit systems because on those
150 * platforms we emulate long multiplication, driving the
151 * performance back down.
153 switch (ins->opcode) {
154 case OP_IDIV_UN_IMM: {
156 #if SIZEOF_REGISTER == 8
157 guint32 dividend_reg;
161 struct magic_unsigned mag;
162 int power2 = mono_is_power_of_two (ins->inst_imm);
164 /* The decomposition doesn't handle exception throwing */
165 if (ins->inst_imm == 0)
169 ins->opcode = OP_ISHR_UN_IMM;
171 ins->inst_imm = power2;
174 if (cfg->backend->disable_div_with_mul)
176 allocated_vregs = TRUE;
178 * Replacement of unsigned division with multiplication,
179 * shifts and additions Hacker's Delight, chapter 10-10.
181 mag = compute_magic_unsigned (ins->inst_imm);
182 tmp_regl = alloc_lreg (cfg);
183 #if SIZEOF_REGISTER == 8
184 dividend_reg = alloc_lreg (cfg);
185 MONO_EMIT_NEW_I8CONST (cfg, tmp_regl, mag.magic_number);
186 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, dividend_reg, ins->sreg1);
187 MONO_EMIT_NEW_BIALU (cfg, OP_LMUL, tmp_regl, dividend_reg, tmp_regl);
189 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, tmp_regl, tmp_regl, 32);
190 MONO_EMIT_NEW_BIALU (cfg, OP_LADD, tmp_regl, tmp_regl, dividend_reg);
191 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, ins->dreg, tmp_regl, mag.shift);
193 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, ins->dreg, tmp_regl, 32 + mag.shift);
196 tmp_regi = alloc_ireg (cfg);
197 MONO_EMIT_NEW_ICONST (cfg, tmp_regi, mag.magic_number);
198 MONO_EMIT_NEW_BIALU (cfg, OP_BIGMUL_UN, tmp_regl, ins->sreg1, tmp_regi);
199 /* Long shifts below will be decomposed during cprop */
201 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, tmp_regl, tmp_regl, 32);
202 MONO_EMIT_NEW_BIALU (cfg, OP_IADDCC, MONO_LVREG_LS (tmp_regl), MONO_LVREG_LS (tmp_regl), ins->sreg1);
203 /* MONO_LVREG_MS (tmp_reg) is 0, save in it the carry */
204 MONO_EMIT_NEW_BIALU (cfg, OP_IADC, MONO_LVREG_MS (tmp_regl), MONO_LVREG_MS (tmp_regl), MONO_LVREG_MS (tmp_regl));
205 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, tmp_regl, tmp_regl, mag.shift);
207 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, tmp_regl, tmp_regl, 32 + mag.shift);
209 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, MONO_LVREG_LS (tmp_regl));
211 mono_jit_stats.optimized_divisions++;
216 #if SIZEOF_REGISTER == 8
217 guint32 dividend_reg;
221 struct magic_signed mag;
222 int power2 = mono_is_power_of_two (ins->inst_imm);
223 /* The decomposition doesn't handle exception throwing */
224 /* Optimization with MUL does not apply for -1, 0 and 1 divisors */
225 if (ins->inst_imm == 0 || ins->inst_imm == -1) {
227 } else if (ins->inst_imm == 1) {
228 ins->opcode = OP_MOVE;
232 allocated_vregs = TRUE;
234 guint32 r1 = alloc_ireg (cfg);
235 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, r1, ins->sreg1, 31);
236 MONO_EMIT_NEW_BIALU (cfg, OP_IADD, r1, r1, ins->sreg1);
237 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, ins->dreg, r1, 1);
239 } else if (power2 > 0 && power2 < 31) {
240 guint32 r1 = alloc_ireg (cfg);
241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, r1, ins->sreg1, 31);
242 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, r1, r1, (32 - power2));
243 MONO_EMIT_NEW_BIALU (cfg, OP_IADD, r1, r1, ins->sreg1);
244 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, ins->dreg, r1, power2);
248 if (cfg->backend->disable_div_with_mul)
251 * Replacement of signed division with multiplication,
252 * shifts and additions Hacker's Delight, chapter 10-6.
254 mag = compute_magic_signed (ins->inst_imm);
255 tmp_regl = alloc_lreg (cfg);
256 #if SIZEOF_REGISTER == 8
257 dividend_reg = alloc_lreg (cfg);
258 MONO_EMIT_NEW_I8CONST (cfg, tmp_regl, mag.magic_number);
259 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, dividend_reg, ins->sreg1);
260 MONO_EMIT_NEW_BIALU (cfg, OP_LMUL, tmp_regl, dividend_reg, tmp_regl);
261 if ((ins->inst_imm > 0 && mag.magic_number < 0) || (ins->inst_imm < 0 && mag.magic_number > 0)) {
262 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_IMM, tmp_regl, tmp_regl, 32);
263 if (ins->inst_imm > 0 && mag.magic_number < 0) {
264 MONO_EMIT_NEW_BIALU (cfg, OP_LADD, tmp_regl, tmp_regl, dividend_reg);
265 } else if (ins->inst_imm < 0 && mag.magic_number > 0) {
266 MONO_EMIT_NEW_BIALU (cfg, OP_LSUB, tmp_regl, tmp_regl, dividend_reg);
268 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_IMM, tmp_regl, tmp_regl, mag.shift);
270 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_IMM, tmp_regl, tmp_regl, 32 + mag.shift);
272 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, ins->dreg, tmp_regl, SIZEOF_REGISTER * 8 - 1);
273 MONO_EMIT_NEW_BIALU (cfg, OP_LADD, ins->dreg, ins->dreg, tmp_regl);
275 tmp_regi = alloc_ireg (cfg);
276 MONO_EMIT_NEW_ICONST (cfg, tmp_regi, mag.magic_number);
277 MONO_EMIT_NEW_BIALU (cfg, OP_BIGMUL, tmp_regl, ins->sreg1, tmp_regi);
278 if ((ins->inst_imm > 0 && mag.magic_number < 0) || (ins->inst_imm < 0 && mag.magic_number > 0)) {
279 if (ins->inst_imm > 0 && mag.magic_number < 0) {
280 /* Opposite sign, cannot overflow */
281 MONO_EMIT_NEW_BIALU (cfg, OP_IADD, tmp_regi, MONO_LVREG_MS (tmp_regl), ins->sreg1);
282 } else if (ins->inst_imm < 0 && mag.magic_number > 0) {
283 /* Same sign, cannot overflow */
284 MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, tmp_regi, MONO_LVREG_MS (tmp_regl), ins->sreg1);
286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, tmp_regi, tmp_regi, mag.shift);
288 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, tmp_regi, MONO_LVREG_MS (tmp_regl), mag.shift);
290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, ins->dreg, tmp_regi, SIZEOF_REGISTER * 8 - 1);
291 MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg, ins->dreg, tmp_regi);
293 mono_jit_stats.optimized_divisions++;
297 return allocated_vregs;
301 * Replaces ins with optimized opcodes.
303 * We can emit to cbb the equivalent instructions which will be used as
304 * replacement for ins, or simply change the fields of ins. Spec needs to
305 * be updated if we silently change the opcode of ins.
307 * Returns TRUE if additional vregs were allocated.
310 mono_strength_reduction_ins (MonoCompile *cfg, MonoInst *ins, const char **spec)
312 gboolean allocated_vregs = FALSE;
314 /* FIXME: Add long/float */
315 switch (ins->opcode) {
318 if (ins->dreg == ins->sreg1) {
326 #if SIZEOF_REGISTER == 8
330 if (ins->inst_imm == 0) {
331 ins->opcode = OP_MOVE;
336 #if SIZEOF_REGISTER == 8
339 if (ins->inst_imm == 0) {
340 ins->opcode = (ins->opcode == OP_LMUL_IMM) ? OP_I8CONST : OP_ICONST;
343 } else if (ins->inst_imm == 1) {
344 ins->opcode = OP_MOVE;
345 } else if ((ins->opcode == OP_IMUL_IMM) && (ins->inst_imm == -1)) {
346 ins->opcode = OP_INEG;
347 } else if ((ins->opcode == OP_LMUL_IMM) && (ins->inst_imm == -1)) {
348 ins->opcode = OP_LNEG;
350 int power2 = mono_is_power_of_two (ins->inst_imm);
352 ins->opcode = (ins->opcode == OP_MUL_IMM) ? OP_SHL_IMM : ((ins->opcode == OP_LMUL_IMM) ? OP_LSHL_IMM : OP_ISHL_IMM);
353 ins->inst_imm = power2;
357 case OP_IREM_UN_IMM: {
358 int power2 = mono_is_power_of_two (ins->inst_imm);
361 ins->opcode = OP_IAND_IMM;
363 ins->inst_imm = (1 << power2) - 1;
369 if (!COMPILE_LLVM (cfg))
370 allocated_vregs = mono_strength_reduction_division (cfg, ins);
373 #if SIZEOF_REGISTER == 8
377 int power = mono_is_power_of_two (ins->inst_imm);
378 if (ins->inst_imm == 1) {
379 ins->opcode = OP_ICONST;
380 MONO_INST_NULLIFY_SREGS (ins);
385 } else if ((ins->inst_imm > 0) && (ins->inst_imm < (1LL << 32)) && (power != -1)) {
386 gboolean is_long = ins->opcode == OP_LREM_IMM;
387 int compensator_reg = alloc_ireg (cfg);
388 int intermediate_reg;
390 /* Based on gcc code */
392 /* Add compensation for negative numerators */
395 intermediate_reg = compensator_reg;
396 MONO_EMIT_NEW_BIALU_IMM (cfg, is_long ? OP_LSHR_IMM : OP_ISHR_IMM, intermediate_reg, ins->sreg1, is_long ? 63 : 31);
398 intermediate_reg = ins->sreg1;
401 MONO_EMIT_NEW_BIALU_IMM (cfg, is_long ? OP_LSHR_UN_IMM : OP_ISHR_UN_IMM, compensator_reg, intermediate_reg, (is_long ? 64 : 32) - power);
402 MONO_EMIT_NEW_BIALU (cfg, is_long ? OP_LADD : OP_IADD, ins->dreg, ins->sreg1, compensator_reg);
403 /* Compute remainder */
404 MONO_EMIT_NEW_BIALU_IMM (cfg, is_long ? OP_LAND_IMM : OP_AND_IMM, ins->dreg, ins->dreg, (1 << power) - 1);
405 /* Remove compensation */
406 MONO_EMIT_NEW_BIALU (cfg, is_long ? OP_LSUB : OP_ISUB, ins->dreg, ins->dreg, compensator_reg);
408 allocated_vregs = TRUE;
413 #if SIZEOF_REGISTER == 4
415 if (COMPILE_LLVM (cfg))
417 if (ins->inst_c1 == 32) {
418 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
419 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 31);
420 } else if (ins->inst_c1 == 0) {
421 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1));
422 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
423 } else if (ins->inst_c1 > 32) {
424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1 - 32);
425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 31);
427 guint32 tmpreg = alloc_ireg (cfg);
428 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, tmpreg, MONO_LVREG_MS (ins->sreg1), 32 - ins->inst_c1);
429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1);
430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), ins->inst_c1);
431 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->dreg), tmpreg);
432 allocated_vregs = TRUE;
436 case OP_LSHR_UN_IMM: {
437 if (COMPILE_LLVM (cfg))
439 if (ins->inst_c1 == 32) {
440 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
441 MONO_EMIT_NEW_ICONST (cfg, MONO_LVREG_MS (ins->dreg), 0);
442 } else if (ins->inst_c1 == 0) {
443 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1));
444 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
445 } else if (ins->inst_c1 > 32) {
446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1 - 32);
447 MONO_EMIT_NEW_ICONST (cfg, MONO_LVREG_MS (ins->dreg), 0);
449 guint32 tmpreg = alloc_ireg (cfg);
450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, tmpreg, MONO_LVREG_MS (ins->sreg1), 32 - ins->inst_c1);
451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1);
452 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), ins->inst_c1);
453 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->dreg), tmpreg);
454 allocated_vregs = TRUE;
459 if (COMPILE_LLVM (cfg))
461 if (ins->inst_c1 == 32) {
462 /* just move the lower half to the upper and zero the lower word */
463 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_LS (ins->sreg1));
464 MONO_EMIT_NEW_ICONST (cfg, MONO_LVREG_LS (ins->dreg), 0);
465 } else if (ins->inst_c1 == 0) {
466 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1));
467 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
468 } else if (ins->inst_c1 > 32) {
469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_LS (ins->sreg1), ins->inst_c1 - 32);
470 MONO_EMIT_NEW_ICONST (cfg, MONO_LVREG_LS (ins->dreg), 0);
472 guint32 tmpreg = alloc_ireg (cfg);
473 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, tmpreg, MONO_LVREG_LS (ins->sreg1), 32 - ins->inst_c1);
474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1);
475 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), ins->inst_c1);
476 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->dreg), tmpreg);
477 allocated_vregs = TRUE;
487 *spec = INS_INFO (ins->opcode);
488 return allocated_vregs;
494 * A combined local copy and constant propagation pass.
497 mono_local_cprop (MonoCompile *cfg)
499 MonoBasicBlock *bb, *bb_opt;
503 int filter = FILTER_IL_SEQ_POINT;
504 int initial_max_vregs = cfg->next_vreg;
506 max = cfg->next_vreg;
507 defs = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * cfg->next_vreg);
508 def_index = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
509 cfg->cbb = bb_opt = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
511 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
516 /* Manually init the defs entries used by the bblock */
517 MONO_BB_FOR_EACH_INS (bb, ins) {
518 int sregs [MONO_MAX_SRC_REGS];
521 if (ins->dreg != -1) {
522 #if SIZEOF_REGISTER == 4
523 const char *spec = INS_INFO (ins->opcode);
524 if (spec [MONO_INST_DEST] == 'l') {
525 defs [ins->dreg + 1] = NULL;
526 defs [ins->dreg + 2] = NULL;
529 defs [ins->dreg] = NULL;
532 num_sregs = mono_inst_get_src_registers (ins, sregs);
533 for (i = 0; i < num_sregs; ++i) {
534 int sreg = sregs [i];
535 #if SIZEOF_REGISTER == 4
536 const char *spec = INS_INFO (ins->opcode);
537 if (spec [MONO_INST_SRC1 + i] == 'l') {
538 defs [sreg + 1] = NULL;
539 defs [sreg + 2] = NULL;
547 last_call_index = -1;
548 MONO_BB_FOR_EACH_INS (bb, ins) {
549 const char *spec = INS_INFO (ins->opcode);
550 int regtype, srcindex, sreg;
552 int sregs [MONO_MAX_SRC_REGS];
554 if (ins->opcode == OP_NOP) {
555 MONO_DELETE_INS (bb, ins);
559 g_assert (ins->opcode > MONO_CEE_LAST);
561 /* FIXME: Optimize this */
562 if (ins->opcode == OP_LDADDR) {
563 MonoInst *var = (MonoInst *)ins->inst_p0;
565 defs [var->dreg] = NULL;
567 if (!MONO_TYPE_ISSTRUCT (var->inst_vtype))
572 if (MONO_IS_STORE_MEMBASE (ins)) {
576 if ((regtype == 'i') && (sreg != -1) && defs [sreg]) {
577 MonoInst *def = defs [sreg];
579 if ((def->opcode == OP_MOVE) && (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg])) && !vreg_is_volatile (cfg, def->sreg1)) {
580 int vreg = def->sreg1;
581 if (cfg->verbose_level > 2) printf ("CCOPY: R%d -> R%d\n", sreg, vreg);
587 num_sregs = mono_inst_get_src_registers (ins, sregs);
588 for (srcindex = 0; srcindex < num_sregs; ++srcindex) {
591 mono_inst_get_src_registers (ins, sregs);
593 regtype = spec [MONO_INST_SRC1 + srcindex];
594 sreg = sregs [srcindex];
596 if ((regtype == ' ') || (sreg == -1) || (!defs [sreg]))
601 /* Copy propagation */
603 * The first check makes sure the source of the copy did not change since
605 * The second check avoids volatile variables.
606 * The third check avoids copy propagating local vregs through a call,
607 * since the lvreg will be spilled
608 * The fourth check avoids copy propagating a vreg in cases where
609 * it would be eliminated anyway by reverse copy propagation later,
610 * because propagating it would create another use for it, thus making
611 * it impossible to use reverse copy propagation.
613 /* Enabling this for floats trips up the fp stack */
615 * Enabling this for floats on amd64 seems to cause a failure in
616 * basic-math.cs, most likely because it gets rid of some r8->r4
619 if (MONO_IS_MOVE (def) &&
620 (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg])) &&
621 !vreg_is_volatile (cfg, def->sreg1) &&
622 /* This avoids propagating local vregs across calls */
623 ((get_vreg_to_inst (cfg, def->sreg1) || !defs [def->sreg1] || (def_index [def->sreg1] >= last_call_index) || (def->opcode == OP_VMOVE))) &&
624 !(defs [def->sreg1] && mono_inst_next (defs [def->sreg1], filter) == def) &&
625 (!MONO_ARCH_USE_FPSTACK || (def->opcode != OP_FMOVE)) &&
626 (def->opcode != OP_FMOVE)) {
627 int vreg = def->sreg1;
629 if (cfg->verbose_level > 2) printf ("CCOPY/2: R%d -> R%d\n", sreg, vreg);
630 sregs [srcindex] = vreg;
631 mono_inst_set_src_registers (ins, sregs);
633 /* Allow further iterations */
638 /* Constant propagation */
639 /* FIXME: Make is_inst_imm a macro */
640 /* FIXME: Make is_inst_imm take an opcode argument */
641 /* is_inst_imm is only needed for binops */
642 if ((((def->opcode == OP_ICONST) || ((sizeof (gpointer) == 8) && (def->opcode == OP_I8CONST))) &&
643 (((srcindex == 0) && (ins->sreg2 == -1)) || mono_arch_is_inst_imm (def->inst_c0))) ||
644 (!MONO_ARCH_USE_FPSTACK && (def->opcode == OP_R8CONST))) {
647 /* srcindex == 1 -> binop, ins->sreg2 == -1 -> unop */
648 if ((srcindex == 1) && (ins->sreg1 != -1) && defs [ins->sreg1] && (defs [ins->sreg1]->opcode == OP_ICONST) && defs [ins->sreg2]) {
649 /* Both arguments are constants, perform cfold */
650 mono_constant_fold_ins (cfg, ins, defs [ins->sreg1], defs [ins->sreg2], TRUE);
651 } else if ((srcindex == 0) && (ins->sreg2 != -1) && defs [ins->sreg2]) {
652 /* Arg 1 is constant, swap arguments if possible */
653 int opcode = ins->opcode;
654 mono_constant_fold_ins (cfg, ins, defs [ins->sreg1], defs [ins->sreg2], TRUE);
655 if (ins->opcode != opcode) {
656 /* Allow further iterations */
660 } else if ((srcindex == 0) && (ins->sreg2 == -1)) {
661 /* Constant unop, perform cfold */
662 mono_constant_fold_ins (cfg, ins, defs [ins->sreg1], NULL, TRUE);
665 opcode2 = mono_op_to_op_imm (ins->opcode);
666 if ((opcode2 != -1) && mono_arch_is_inst_imm (def->inst_c0) && ((srcindex == 1) || (ins->sreg2 == -1))) {
667 ins->opcode = opcode2;
668 if ((def->opcode == OP_I8CONST) && (sizeof (gpointer) == 4)) {
669 ins->inst_ls_word = def->inst_ls_word;
670 ins->inst_ms_word = def->inst_ms_word;
672 ins->inst_imm = def->inst_c0;
674 sregs [srcindex] = -1;
675 mono_inst_set_src_registers (ins, sregs);
677 if ((opcode2 == OP_VOIDCALL) || (opcode2 == OP_CALL) || (opcode2 == OP_LCALL) || (opcode2 == OP_FCALL))
678 ((MonoCallInst*)ins)->fptr = (gpointer)ins->inst_imm;
680 /* Allow further iterations */
686 #if defined(TARGET_X86) || defined(TARGET_AMD64)
687 if ((ins->opcode == OP_X86_LEA) && (srcindex == 1)) {
688 #if SIZEOF_REGISTER == 8
689 /* FIXME: Use OP_PADD_IMM when the new JIT is done */
690 ins->opcode = OP_LADD_IMM;
692 ins->opcode = OP_ADD_IMM;
694 ins->inst_imm += def->inst_c0 << ins->backend.shift_amount;
698 opcode2 = mono_load_membase_to_load_mem (ins->opcode);
699 if ((srcindex == 0) && (opcode2 != -1) && mono_arch_is_inst_imm (def->inst_c0)) {
700 ins->opcode = opcode2;
701 ins->inst_imm = def->inst_c0 + ins->inst_offset;
706 else if (((def->opcode == OP_ADD_IMM) || (def->opcode == OP_LADD_IMM)) && (MONO_IS_LOAD_MEMBASE (ins) || MONO_ARCH_IS_OP_MEMBASE (ins->opcode))) {
707 /* ADD_IMM is created by spill_global_vars */
709 * We have to guarantee that def->sreg1 haven't changed since def->dreg
710 * was defined. cfg->frame_reg is assumed to remain constant.
712 if ((def->sreg1 == cfg->frame_reg) || ((mono_inst_next (def, filter) == ins) && (def->dreg != def->sreg1))) {
713 ins->inst_basereg = def->sreg1;
714 ins->inst_offset += def->inst_imm;
716 } else if ((ins->opcode == OP_ISUB_IMM) && (def->opcode == OP_IADD_IMM) && (mono_inst_next (def, filter) == ins) && (def->dreg != def->sreg1)) {
717 ins->sreg1 = def->sreg1;
718 ins->inst_imm -= def->inst_imm;
719 } else if ((ins->opcode == OP_IADD_IMM) && (def->opcode == OP_ISUB_IMM) && (mono_inst_next (def, filter) == ins) && (def->dreg != def->sreg1)) {
720 ins->sreg1 = def->sreg1;
721 ins->inst_imm -= def->inst_imm;
722 } else if (ins->opcode == OP_STOREI1_MEMBASE_REG &&
723 (def->opcode == OP_ICONV_TO_U1 || def->opcode == OP_ICONV_TO_I1 || def->opcode == OP_SEXT_I4 || (SIZEOF_REGISTER == 8 && def->opcode == OP_LCONV_TO_U1)) &&
724 (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg]))) {
725 /* Avoid needless sign extension */
726 ins->sreg1 = def->sreg1;
727 } else if (ins->opcode == OP_STOREI2_MEMBASE_REG &&
728 (def->opcode == OP_ICONV_TO_U2 || def->opcode == OP_ICONV_TO_I2 || def->opcode == OP_SEXT_I4 || (SIZEOF_REGISTER == 8 && def->opcode == OP_LCONV_TO_I2)) &&
729 (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg]))) {
730 /* Avoid needless sign extension */
731 ins->sreg1 = def->sreg1;
732 } else if (ins->opcode == OP_COMPARE_IMM && def->opcode == OP_LDADDR && ins->inst_imm == 0) {
735 memset (&dummy_arg1, 0, sizeof (MonoInst));
736 dummy_arg1.opcode = OP_ICONST;
737 dummy_arg1.inst_c0 = 1;
739 mono_constant_fold_ins (cfg, ins, &dummy_arg1, NULL, TRUE);
743 g_assert (cfg->cbb == bb_opt);
744 g_assert (!bb_opt->code);
745 /* Do strength reduction here */
746 if (mono_strength_reduction_ins (cfg, ins, &spec) && max < cfg->next_vreg) {
747 MonoInst **defs_prev = defs;
748 gint32 *def_index_prev = def_index;
749 guint32 prev_max = max;
750 guint32 additional_vregs = cfg->next_vreg - initial_max_vregs;
752 /* We have more vregs so we need to reallocate defs and def_index arrays */
753 max = initial_max_vregs + additional_vregs * 2;
754 defs = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * max);
755 def_index = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * max);
757 /* Keep the entries for the previous vregs, zero the rest */
758 memcpy (defs, defs_prev, sizeof (MonoInst*) * prev_max);
759 memset (defs + prev_max, 0, sizeof (MonoInst*) * (max - prev_max));
760 memcpy (def_index, def_index_prev, sizeof (guint32) * prev_max);
761 memset (def_index + prev_max, 0, sizeof (guint32) * (max - prev_max));
764 if (cfg->cbb->code || (cfg->cbb != bb_opt)) {
765 MonoInst *saved_prev = ins->prev;
767 /* If we have code in cbb, we need to replace ins with the decomposition */
768 mono_replace_ins (cfg, bb, ins, &ins->prev, bb_opt, cfg->cbb);
769 bb_opt->code = bb_opt->last_ins = NULL;
770 bb_opt->in_count = bb_opt->out_count = 0;
773 /* ins is hanging, continue scanning the emitted code */
778 if (spec [MONO_INST_DEST] != ' ') {
779 MonoInst *def = defs [ins->dreg];
781 if (def && (def->opcode == OP_ADD_IMM) && (def->sreg1 == cfg->frame_reg) && (MONO_IS_STORE_MEMBASE (ins))) {
782 /* ADD_IMM is created by spill_global_vars */
783 /* cfg->frame_reg is assumed to remain constant */
784 ins->inst_destbasereg = def->sreg1;
785 ins->inst_offset += def->inst_imm;
788 if (!MONO_IS_STORE_MEMBASE (ins) && !vreg_is_volatile (cfg, ins->dreg)) {
789 defs [ins->dreg] = ins;
790 def_index [ins->dreg] = ins_index;
794 if (MONO_IS_CALL (ins))
795 last_call_index = ins_index;
802 static inline gboolean
803 reg_is_softreg_no_fpstack (int reg, const char spec)
805 return (spec == 'i' && reg >= MONO_MAX_IREGS)
806 || ((spec == 'f' && reg >= MONO_MAX_FREGS) && !MONO_ARCH_USE_FPSTACK)
807 #ifdef MONO_ARCH_SIMD_INTRINSICS
808 || (spec == 'x' && reg >= MONO_MAX_XREGS)
813 static inline gboolean
814 reg_is_softreg (int reg, const char spec)
816 return (spec == 'i' && reg >= MONO_MAX_IREGS)
817 || (spec == 'f' && reg >= MONO_MAX_FREGS)
818 #ifdef MONO_ARCH_SIMD_INTRINSICS
819 || (spec == 'x' && reg >= MONO_MAX_XREGS)
824 static inline gboolean
825 mono_is_simd_accessor (MonoInst *ins)
827 switch (ins->opcode) {
828 #ifdef MONO_ARCH_SIMD_INTRINSICS
836 case OP_INSERTX_U1_SLOW:
837 case OP_INSERTX_I4_SLOW:
838 case OP_INSERTX_R4_SLOW:
839 case OP_INSERTX_R8_SLOW:
840 case OP_INSERTX_I8_SLOW:
851 * Get rid of the dead assignments to local vregs like the ones created by the
855 mono_local_deadce (MonoCompile *cfg)
858 MonoInst *ins, *prev;
859 MonoBitSet *used, *defined;
861 //mono_print_code (cfg, "BEFORE LOCAL-DEADCE");
864 * Assignments to global vregs can't be eliminated so this pass must come
865 * after the handle_global_vregs () pass.
868 used = mono_bitset_mp_new_noinit (cfg->mempool, cfg->next_vreg + 1);
869 defined = mono_bitset_mp_new_noinit (cfg->mempool, cfg->next_vreg + 1);
871 /* First pass: collect liveness info */
872 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
873 /* Manually init the defs entries used by the bblock */
874 MONO_BB_FOR_EACH_INS (bb, ins) {
875 const char *spec = INS_INFO (ins->opcode);
876 int sregs [MONO_MAX_SRC_REGS];
879 if (spec [MONO_INST_DEST] != ' ') {
880 mono_bitset_clear_fast (used, ins->dreg);
881 mono_bitset_clear_fast (defined, ins->dreg);
882 #if SIZEOF_REGISTER == 4
884 mono_bitset_clear_fast (used, ins->dreg + 1);
885 mono_bitset_clear_fast (defined, ins->dreg + 1);
888 num_sregs = mono_inst_get_src_registers (ins, sregs);
889 for (i = 0; i < num_sregs; ++i) {
890 mono_bitset_clear_fast (used, sregs [i]);
891 #if SIZEOF_REGISTER == 4
892 mono_bitset_clear_fast (used, sregs [i] + 1);
898 * Make a reverse pass over the instruction list
900 MONO_BB_FOR_EACH_INS_REVERSE_SAFE (bb, prev, ins) {
901 const char *spec = INS_INFO (ins->opcode);
902 int sregs [MONO_MAX_SRC_REGS];
904 MonoInst *prev_f = mono_inst_prev (ins, FILTER_NOP | FILTER_IL_SEQ_POINT);
906 if (ins->opcode == OP_NOP) {
907 MONO_DELETE_INS (bb, ins);
911 g_assert (ins->opcode > MONO_CEE_LAST);
913 if (MONO_IS_NON_FP_MOVE (ins) && prev_f) {
918 spec2 = INS_INFO (def->opcode);
921 * Perform a limited kind of reverse copy propagation, i.e.
922 * transform B <- FOO; A <- B into A <- FOO
923 * This isn't copyprop, not deadce, but it can only be performed
924 * after handle_global_vregs () has run.
926 if (!get_vreg_to_inst (cfg, ins->sreg1) && (spec2 [MONO_INST_DEST] != ' ') && (def->dreg == ins->sreg1) && !mono_bitset_test_fast (used, ins->sreg1) && !MONO_IS_STORE_MEMBASE (def) && reg_is_softreg (ins->sreg1, spec [MONO_INST_DEST]) && !mono_is_simd_accessor (def)) {
927 if (cfg->verbose_level > 2) {
928 printf ("\tReverse copyprop in BB%d on ", bb->block_num);
929 mono_print_ins (ins);
932 def->dreg = ins->dreg;
933 MONO_DELETE_INS (bb, ins);
934 spec = INS_INFO (ins->opcode);
938 /* Enabling this on x86 could screw up the fp stack */
939 if (reg_is_softreg_no_fpstack (ins->dreg, spec [MONO_INST_DEST])) {
941 * Assignments to global vregs can only be eliminated if there is another
942 * assignment to the same vreg later in the same bblock.
944 if (!mono_bitset_test_fast (used, ins->dreg) &&
945 (!get_vreg_to_inst (cfg, ins->dreg) || (!bb->extended && !vreg_is_volatile (cfg, ins->dreg) && mono_bitset_test_fast (defined, ins->dreg))) &&
946 MONO_INS_HAS_NO_SIDE_EFFECT (ins)) {
947 /* Happens with CMOV instructions */
948 if (prev_f && prev_f->opcode == OP_ICOMPARE_IMM) {
949 MonoInst *prev = prev_f;
951 * Can't use DELETE_INS since that would interfere with the
956 //printf ("DEADCE: "); mono_print_ins (ins);
957 MONO_DELETE_INS (bb, ins);
958 spec = INS_INFO (ins->opcode);
961 if (spec [MONO_INST_DEST] != ' ')
962 mono_bitset_clear_fast (used, ins->dreg);
965 if (spec [MONO_INST_DEST] != ' ')
966 mono_bitset_set_fast (defined, ins->dreg);
967 num_sregs = mono_inst_get_src_registers (ins, sregs);
968 for (i = 0; i < num_sregs; ++i)
969 mono_bitset_set_fast (used, sregs [i]);
970 if (MONO_IS_STORE_MEMBASE (ins))
971 mono_bitset_set_fast (used, ins->dreg);
973 if (MONO_IS_CALL (ins)) {
974 MonoCallInst *call = (MonoCallInst*)ins;
977 if (call->out_ireg_args) {
978 for (l = call->out_ireg_args; l; l = l->next) {
979 guint32 regpair, reg;
981 regpair = (guint32)(gssize)(l->data);
982 reg = regpair & 0xffffff;
984 mono_bitset_set_fast (used, reg);
988 if (call->out_freg_args) {
989 for (l = call->out_freg_args; l; l = l->next) {
990 guint32 regpair, reg;
992 regpair = (guint32)(gssize)(l->data);
993 reg = regpair & 0xffffff;
995 mono_bitset_set_fast (used, reg);
1002 //mono_print_code (cfg, "AFTER LOCAL-DEADCE");
1005 #endif /* DISABLE_JIT */