2 * local-propagation.c: Local constant, copy and tree propagation.
4 * To make some sense of the tree mover, read mono/docs/tree-mover.txt
7 * Paolo Molaro (lupus@ximian.com)
8 * Dietmar Maurer (dietmar@ximian.com)
9 * Massimiliano Mantione (massi@ximian.com)
11 * (C) 2006 Novell, Inc. http://www.novell.com
12 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
25 #include <mono/metadata/debug-helpers.h>
26 #include <mono/metadata/mempool.h>
27 #include <mono/metadata/opcodes.h>
31 #ifndef MONO_ARCH_IS_OP_MEMBASE
32 #define MONO_ARCH_IS_OP_MEMBASE(opcode) FALSE
35 static inline MonoBitSet*
36 mono_bitset_mp_new_noinit (MonoMemPool *mp, guint32 max_size)
38 int size = mono_bitset_alloc_size (max_size, 0);
41 mem = mono_mempool_alloc (mp, size);
42 return mono_bitset_mem_new (mem, max_size, MONO_BITSET_DONT_FREE);
45 struct magic_unsigned {
56 /* http://www.hackersdelight.org/hdcodetxt/magicu.c.txt */
57 static struct magic_unsigned
58 compute_magic_unsigned (guint32 divisor) {
59 guint32 nc, delta, q1, r1, q2, r2;
60 struct magic_unsigned magu;
65 nc = -1 - (-divisor) % divisor;
68 r1 = 0x80000000 - q1 * nc;
69 q2 = 0x7FFFFFFF / divisor;
70 r2 = 0x7FFFFFFF - q2 * divisor;
82 if (r2 + 1 >= divisor - r2) {
86 r2 = 2 * r2 + 1 - divisor;
93 delta = divisor - 1 - r2;
94 } while (!gt && (q1 < delta || (q1 == delta && r1 == 0)));
96 magu.magic_number = q2 + 1;
101 /* http://www.hackersdelight.org/hdcodetxt/magic.c.txt */
102 static struct magic_signed
103 compute_magic_signed (gint32 divisor) {
105 guint32 ad, anc, delta, q1, r1, q2, r2, t;
106 const guint32 two31 = 0x80000000;
107 struct magic_signed mag;
110 t = two31 + ((unsigned)divisor >> 31);
111 anc = t - 1 - t % ad;
114 r1 = two31 - q1 * anc;
116 r2 = two31 - q2 * ad;
135 } while (q1 < delta || (q1 == delta && r1 == 0));
137 mag.magic_number = q2 + 1;
139 mag.magic_number = -mag.magic_number;
145 mono_strength_reduction_division (MonoCompile *cfg, MonoInst *ins)
147 gboolean allocated_vregs = FALSE;
149 * We don't use it on 32bit systems because on those
150 * platforms we emulate long multiplication, driving the
151 * performance back down.
153 switch (ins->opcode) {
154 case OP_IDIV_UN_IMM: {
156 #if SIZEOF_REGISTER == 8
157 guint32 dividend_reg;
161 struct magic_unsigned mag;
162 int power2 = mono_is_power_of_two (ins->inst_imm);
164 /* The decomposition doesn't handle exception throwing */
165 if (ins->inst_imm == 0)
169 ins->opcode = OP_ISHR_UN_IMM;
171 ins->inst_imm = power2;
174 allocated_vregs = TRUE;
176 * Replacement of unsigned division with multiplication,
177 * shifts and additions Hacker's Delight, chapter 10-10.
179 mag = compute_magic_unsigned (ins->inst_imm);
180 tmp_regl = alloc_lreg (cfg);
181 #if SIZEOF_REGISTER == 8
182 dividend_reg = alloc_lreg (cfg);
183 MONO_EMIT_NEW_I8CONST (cfg, tmp_regl, mag.magic_number);
184 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, dividend_reg, ins->sreg1);
185 MONO_EMIT_NEW_BIALU (cfg, OP_LMUL, tmp_regl, dividend_reg, tmp_regl);
187 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, tmp_regl, tmp_regl, 32);
188 MONO_EMIT_NEW_BIALU (cfg, OP_LADD, tmp_regl, tmp_regl, dividend_reg);
189 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, ins->dreg, tmp_regl, mag.shift);
191 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, ins->dreg, tmp_regl, 32 + mag.shift);
194 tmp_regi = alloc_ireg (cfg);
195 MONO_EMIT_NEW_ICONST (cfg, tmp_regi, mag.magic_number);
196 MONO_EMIT_NEW_BIALU (cfg, OP_BIGMUL_UN, tmp_regl, ins->sreg1, tmp_regi);
197 /* Long shifts below will be decomposed during cprop */
199 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, tmp_regl, tmp_regl, 32);
200 MONO_EMIT_NEW_BIALU (cfg, OP_IADDCC, MONO_LVREG_LS (tmp_regl), MONO_LVREG_LS (tmp_regl), ins->sreg1);
201 /* MONO_LVREG_MS (tmp_reg) is 0, save in it the carry */
202 MONO_EMIT_NEW_BIALU (cfg, OP_IADC, MONO_LVREG_MS (tmp_regl), MONO_LVREG_MS (tmp_regl), MONO_LVREG_MS (tmp_regl));
203 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, tmp_regl, tmp_regl, mag.shift);
205 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, tmp_regl, tmp_regl, 32 + mag.shift);
207 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, MONO_LVREG_LS (tmp_regl));
209 mono_jit_stats.optimized_divisions++;
214 #if SIZEOF_REGISTER == 8
215 guint32 dividend_reg;
219 struct magic_signed mag;
220 int power2 = mono_is_power_of_two (ins->inst_imm);
221 /* The decomposition doesn't handle exception throwing */
222 /* Optimization with MUL does not apply for -1, 0 and 1 divisors */
223 if (ins->inst_imm == 0 || ins->inst_imm == -1) {
225 } else if (ins->inst_imm == 1) {
226 ins->opcode = OP_MOVE;
230 allocated_vregs = TRUE;
232 guint32 r1 = alloc_ireg (cfg);
233 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, r1, ins->sreg1, 31);
234 MONO_EMIT_NEW_BIALU (cfg, OP_IADD, r1, r1, ins->sreg1);
235 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, ins->dreg, r1, 1);
237 } else if (power2 > 0 && power2 < 31) {
238 guint32 r1 = alloc_ireg (cfg);
239 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, r1, ins->sreg1, 31);
240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, r1, r1, (32 - power2));
241 MONO_EMIT_NEW_BIALU (cfg, OP_IADD, r1, r1, ins->sreg1);
242 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, ins->dreg, r1, power2);
247 * Replacement of signed division with multiplication,
248 * shifts and additions Hacker's Delight, chapter 10-6.
250 mag = compute_magic_signed (ins->inst_imm);
251 tmp_regl = alloc_lreg (cfg);
252 #if SIZEOF_REGISTER == 8
253 dividend_reg = alloc_lreg (cfg);
254 MONO_EMIT_NEW_I8CONST (cfg, tmp_regl, mag.magic_number);
255 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, dividend_reg, ins->sreg1);
256 MONO_EMIT_NEW_BIALU (cfg, OP_LMUL, tmp_regl, dividend_reg, tmp_regl);
257 if ((ins->inst_imm > 0 && mag.magic_number < 0) || (ins->inst_imm < 0 && mag.magic_number > 0)) {
258 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_IMM, tmp_regl, tmp_regl, 32);
259 if (ins->inst_imm > 0 && mag.magic_number < 0) {
260 MONO_EMIT_NEW_BIALU (cfg, OP_LADD, tmp_regl, tmp_regl, dividend_reg);
261 } else if (ins->inst_imm < 0 && mag.magic_number > 0) {
262 MONO_EMIT_NEW_BIALU (cfg, OP_LSUB, tmp_regl, tmp_regl, dividend_reg);
264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_IMM, tmp_regl, tmp_regl, mag.shift);
266 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_IMM, tmp_regl, tmp_regl, 32 + mag.shift);
268 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_LSHR_UN_IMM, ins->dreg, tmp_regl, SIZEOF_REGISTER * 8 - 1);
269 MONO_EMIT_NEW_BIALU (cfg, OP_LADD, ins->dreg, ins->dreg, tmp_regl);
271 tmp_regi = alloc_ireg (cfg);
272 MONO_EMIT_NEW_ICONST (cfg, tmp_regi, mag.magic_number);
273 MONO_EMIT_NEW_BIALU (cfg, OP_BIGMUL, tmp_regl, ins->sreg1, tmp_regi);
274 if ((ins->inst_imm > 0 && mag.magic_number < 0) || (ins->inst_imm < 0 && mag.magic_number > 0)) {
275 if (ins->inst_imm > 0 && mag.magic_number < 0) {
276 /* Opposite sign, cannot overflow */
277 MONO_EMIT_NEW_BIALU (cfg, OP_IADD, tmp_regi, MONO_LVREG_MS (tmp_regl), ins->sreg1);
278 } else if (ins->inst_imm < 0 && mag.magic_number > 0) {
279 /* Same sign, cannot overflow */
280 MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, tmp_regi, MONO_LVREG_MS (tmp_regl), ins->sreg1);
282 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, tmp_regi, tmp_regi, mag.shift);
284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, tmp_regi, MONO_LVREG_MS (tmp_regl), mag.shift);
286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, ins->dreg, tmp_regi, SIZEOF_REGISTER * 8 - 1);
287 MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg, ins->dreg, tmp_regi);
289 mono_jit_stats.optimized_divisions++;
293 return allocated_vregs;
297 * Replaces ins with optimized opcodes.
299 * We can emit to cbb the equivalent instructions which will be used as
300 * replacement for ins, or simply change the fields of ins. Spec needs to
301 * be updated if we silently change the opcode of ins.
303 * Returns TRUE if additional vregs were allocated.
306 mono_strength_reduction_ins (MonoCompile *cfg, MonoInst *ins, const char **spec)
308 gboolean allocated_vregs = FALSE;
310 /* FIXME: Add long/float */
311 switch (ins->opcode) {
314 if (ins->dreg == ins->sreg1) {
322 #if SIZEOF_REGISTER == 8
326 if (ins->inst_imm == 0) {
327 ins->opcode = OP_MOVE;
332 #if SIZEOF_REGISTER == 8
335 if (ins->inst_imm == 0) {
336 ins->opcode = (ins->opcode == OP_LMUL_IMM) ? OP_I8CONST : OP_ICONST;
339 } else if (ins->inst_imm == 1) {
340 ins->opcode = OP_MOVE;
341 } else if ((ins->opcode == OP_IMUL_IMM) && (ins->inst_imm == -1)) {
342 ins->opcode = OP_INEG;
343 } else if ((ins->opcode == OP_LMUL_IMM) && (ins->inst_imm == -1)) {
344 ins->opcode = OP_LNEG;
346 int power2 = mono_is_power_of_two (ins->inst_imm);
348 ins->opcode = (ins->opcode == OP_MUL_IMM) ? OP_SHL_IMM : ((ins->opcode == OP_LMUL_IMM) ? OP_LSHL_IMM : OP_ISHL_IMM);
349 ins->inst_imm = power2;
353 case OP_IREM_UN_IMM: {
354 int power2 = mono_is_power_of_two (ins->inst_imm);
357 ins->opcode = OP_IAND_IMM;
359 ins->inst_imm = (1 << power2) - 1;
365 allocated_vregs = mono_strength_reduction_division (cfg, ins);
368 #if SIZEOF_REGISTER == 8
372 int power = mono_is_power_of_two (ins->inst_imm);
373 if (ins->inst_imm == 1) {
374 ins->opcode = OP_ICONST;
375 MONO_INST_NULLIFY_SREGS (ins);
380 } else if ((ins->inst_imm > 0) && (ins->inst_imm < (1LL << 32)) && (power != -1)) {
381 gboolean is_long = ins->opcode == OP_LREM_IMM;
382 int compensator_reg = alloc_ireg (cfg);
383 int intermediate_reg;
385 /* Based on gcc code */
387 /* Add compensation for negative numerators */
390 intermediate_reg = compensator_reg;
391 MONO_EMIT_NEW_BIALU_IMM (cfg, is_long ? OP_LSHR_IMM : OP_ISHR_IMM, intermediate_reg, ins->sreg1, is_long ? 63 : 31);
393 intermediate_reg = ins->sreg1;
396 MONO_EMIT_NEW_BIALU_IMM (cfg, is_long ? OP_LSHR_UN_IMM : OP_ISHR_UN_IMM, compensator_reg, intermediate_reg, (is_long ? 64 : 32) - power);
397 MONO_EMIT_NEW_BIALU (cfg, is_long ? OP_LADD : OP_IADD, ins->dreg, ins->sreg1, compensator_reg);
398 /* Compute remainder */
399 MONO_EMIT_NEW_BIALU_IMM (cfg, is_long ? OP_LAND_IMM : OP_AND_IMM, ins->dreg, ins->dreg, (1 << power) - 1);
400 /* Remove compensation */
401 MONO_EMIT_NEW_BIALU (cfg, is_long ? OP_LSUB : OP_ISUB, ins->dreg, ins->dreg, compensator_reg);
403 allocated_vregs = TRUE;
408 #if SIZEOF_REGISTER == 4
410 if (COMPILE_LLVM (cfg))
412 if (ins->inst_c1 == 32) {
413 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 31);
415 } else if (ins->inst_c1 == 0) {
416 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1));
417 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
418 } else if (ins->inst_c1 > 32) {
419 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1 - 32);
420 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 31);
422 guint32 tmpreg = alloc_ireg (cfg);
423 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, tmpreg, MONO_LVREG_MS (ins->sreg1), 32 - ins->inst_c1);
424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1);
425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), ins->inst_c1);
426 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->dreg), tmpreg);
427 allocated_vregs = TRUE;
431 case OP_LSHR_UN_IMM: {
432 if (COMPILE_LLVM (cfg))
434 if (ins->inst_c1 == 32) {
435 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
436 MONO_EMIT_NEW_ICONST (cfg, MONO_LVREG_MS (ins->dreg), 0);
437 } else if (ins->inst_c1 == 0) {
438 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1));
439 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
440 } else if (ins->inst_c1 > 32) {
441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1 - 32);
442 MONO_EMIT_NEW_ICONST (cfg, MONO_LVREG_MS (ins->dreg), 0);
444 guint32 tmpreg = alloc_ireg (cfg);
445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, tmpreg, MONO_LVREG_MS (ins->sreg1), 32 - ins->inst_c1);
446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1);
447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), ins->inst_c1);
448 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->dreg), tmpreg);
449 allocated_vregs = TRUE;
454 if (COMPILE_LLVM (cfg))
456 if (ins->inst_c1 == 32) {
457 /* just move the lower half to the upper and zero the lower word */
458 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_LS (ins->sreg1));
459 MONO_EMIT_NEW_ICONST (cfg, MONO_LVREG_LS (ins->dreg), 0);
460 } else if (ins->inst_c1 == 0) {
461 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1));
462 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
463 } else if (ins->inst_c1 > 32) {
464 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_LS (ins->sreg1), ins->inst_c1 - 32);
465 MONO_EMIT_NEW_ICONST (cfg, MONO_LVREG_LS (ins->dreg), 0);
467 guint32 tmpreg = alloc_ireg (cfg);
468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, tmpreg, MONO_LVREG_LS (ins->sreg1), 32 - ins->inst_c1);
469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), ins->inst_c1);
470 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), ins->inst_c1);
471 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->dreg), tmpreg);
472 allocated_vregs = TRUE;
482 *spec = INS_INFO (ins->opcode);
483 return allocated_vregs;
489 * A combined local copy and constant propagation pass.
492 mono_local_cprop (MonoCompile *cfg)
494 MonoBasicBlock *bb, *bb_opt;
498 int filter = FILTER_IL_SEQ_POINT;
499 int initial_max_vregs = cfg->next_vreg;
501 max = cfg->next_vreg;
502 defs = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * cfg->next_vreg);
503 def_index = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
504 cfg->cbb = bb_opt = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
506 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
511 /* Manually init the defs entries used by the bblock */
512 MONO_BB_FOR_EACH_INS (bb, ins) {
513 int sregs [MONO_MAX_SRC_REGS];
516 if (ins->dreg != -1) {
517 #if SIZEOF_REGISTER == 4
518 const char *spec = INS_INFO (ins->opcode);
519 if (spec [MONO_INST_DEST] == 'l') {
520 defs [ins->dreg + 1] = NULL;
521 defs [ins->dreg + 2] = NULL;
524 defs [ins->dreg] = NULL;
527 num_sregs = mono_inst_get_src_registers (ins, sregs);
528 for (i = 0; i < num_sregs; ++i) {
529 int sreg = sregs [i];
530 #if SIZEOF_REGISTER == 4
531 const char *spec = INS_INFO (ins->opcode);
532 if (spec [MONO_INST_SRC1 + i] == 'l') {
533 defs [sreg + 1] = NULL;
534 defs [sreg + 2] = NULL;
542 last_call_index = -1;
543 MONO_BB_FOR_EACH_INS (bb, ins) {
544 const char *spec = INS_INFO (ins->opcode);
545 int regtype, srcindex, sreg;
547 int sregs [MONO_MAX_SRC_REGS];
549 if (ins->opcode == OP_NOP) {
550 MONO_DELETE_INS (bb, ins);
554 g_assert (ins->opcode > MONO_CEE_LAST);
556 /* FIXME: Optimize this */
557 if (ins->opcode == OP_LDADDR) {
558 MonoInst *var = (MonoInst *)ins->inst_p0;
560 defs [var->dreg] = NULL;
562 if (!MONO_TYPE_ISSTRUCT (var->inst_vtype))
567 if (MONO_IS_STORE_MEMBASE (ins)) {
571 if ((regtype == 'i') && (sreg != -1) && defs [sreg]) {
572 MonoInst *def = defs [sreg];
574 if ((def->opcode == OP_MOVE) && (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg])) && !vreg_is_volatile (cfg, def->sreg1)) {
575 int vreg = def->sreg1;
576 if (cfg->verbose_level > 2) printf ("CCOPY: R%d -> R%d\n", sreg, vreg);
582 num_sregs = mono_inst_get_src_registers (ins, sregs);
583 for (srcindex = 0; srcindex < num_sregs; ++srcindex) {
586 mono_inst_get_src_registers (ins, sregs);
588 regtype = spec [MONO_INST_SRC1 + srcindex];
589 sreg = sregs [srcindex];
591 if ((regtype == ' ') || (sreg == -1) || (!defs [sreg]))
596 /* Copy propagation */
598 * The first check makes sure the source of the copy did not change since
600 * The second check avoids volatile variables.
601 * The third check avoids copy propagating local vregs through a call,
602 * since the lvreg will be spilled
603 * The fourth check avoids copy propagating a vreg in cases where
604 * it would be eliminated anyway by reverse copy propagation later,
605 * because propagating it would create another use for it, thus making
606 * it impossible to use reverse copy propagation.
608 /* Enabling this for floats trips up the fp stack */
610 * Enabling this for floats on amd64 seems to cause a failure in
611 * basic-math.cs, most likely because it gets rid of some r8->r4
614 if (MONO_IS_MOVE (def) &&
615 (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg])) &&
616 !vreg_is_volatile (cfg, def->sreg1) &&
617 /* This avoids propagating local vregs across calls */
618 ((get_vreg_to_inst (cfg, def->sreg1) || !defs [def->sreg1] || (def_index [def->sreg1] >= last_call_index) || (def->opcode == OP_VMOVE))) &&
619 !(defs [def->sreg1] && mono_inst_next (defs [def->sreg1], filter) == def) &&
620 (!MONO_ARCH_USE_FPSTACK || (def->opcode != OP_FMOVE)) &&
621 (def->opcode != OP_FMOVE)) {
622 int vreg = def->sreg1;
624 if (cfg->verbose_level > 2) printf ("CCOPY/2: R%d -> R%d\n", sreg, vreg);
625 sregs [srcindex] = vreg;
626 mono_inst_set_src_registers (ins, sregs);
628 /* Allow further iterations */
633 /* Constant propagation */
634 /* FIXME: Make is_inst_imm a macro */
635 /* FIXME: Make is_inst_imm take an opcode argument */
636 /* is_inst_imm is only needed for binops */
637 if ((((def->opcode == OP_ICONST) || ((sizeof (gpointer) == 8) && (def->opcode == OP_I8CONST))) &&
638 (((srcindex == 0) && (ins->sreg2 == -1)) || mono_arch_is_inst_imm (def->inst_c0))) ||
639 (!MONO_ARCH_USE_FPSTACK && (def->opcode == OP_R8CONST))) {
642 /* srcindex == 1 -> binop, ins->sreg2 == -1 -> unop */
643 if ((srcindex == 1) && (ins->sreg1 != -1) && defs [ins->sreg1] && (defs [ins->sreg1]->opcode == OP_ICONST) && defs [ins->sreg2]) {
644 /* Both arguments are constants, perform cfold */
645 mono_constant_fold_ins (cfg, ins, defs [ins->sreg1], defs [ins->sreg2], TRUE);
646 } else if ((srcindex == 0) && (ins->sreg2 != -1) && defs [ins->sreg2]) {
647 /* Arg 1 is constant, swap arguments if possible */
648 int opcode = ins->opcode;
649 mono_constant_fold_ins (cfg, ins, defs [ins->sreg1], defs [ins->sreg2], TRUE);
650 if (ins->opcode != opcode) {
651 /* Allow further iterations */
655 } else if ((srcindex == 0) && (ins->sreg2 == -1)) {
656 /* Constant unop, perform cfold */
657 mono_constant_fold_ins (cfg, ins, defs [ins->sreg1], NULL, TRUE);
660 opcode2 = mono_op_to_op_imm (ins->opcode);
661 if ((opcode2 != -1) && mono_arch_is_inst_imm (def->inst_c0) && ((srcindex == 1) || (ins->sreg2 == -1))) {
662 ins->opcode = opcode2;
663 if ((def->opcode == OP_I8CONST) && (sizeof (gpointer) == 4)) {
664 ins->inst_ls_word = def->inst_ls_word;
665 ins->inst_ms_word = def->inst_ms_word;
667 ins->inst_imm = def->inst_c0;
669 sregs [srcindex] = -1;
670 mono_inst_set_src_registers (ins, sregs);
672 if ((opcode2 == OP_VOIDCALL) || (opcode2 == OP_CALL) || (opcode2 == OP_LCALL) || (opcode2 == OP_FCALL))
673 ((MonoCallInst*)ins)->fptr = (gpointer)ins->inst_imm;
675 /* Allow further iterations */
681 #if defined(TARGET_X86) || defined(TARGET_AMD64)
682 if ((ins->opcode == OP_X86_LEA) && (srcindex == 1)) {
683 #if SIZEOF_REGISTER == 8
684 /* FIXME: Use OP_PADD_IMM when the new JIT is done */
685 ins->opcode = OP_LADD_IMM;
687 ins->opcode = OP_ADD_IMM;
689 ins->inst_imm += def->inst_c0 << ins->backend.shift_amount;
693 opcode2 = mono_load_membase_to_load_mem (ins->opcode);
694 if ((srcindex == 0) && (opcode2 != -1) && mono_arch_is_inst_imm (def->inst_c0)) {
695 ins->opcode = opcode2;
696 ins->inst_imm = def->inst_c0 + ins->inst_offset;
701 else if (((def->opcode == OP_ADD_IMM) || (def->opcode == OP_LADD_IMM)) && (MONO_IS_LOAD_MEMBASE (ins) || MONO_ARCH_IS_OP_MEMBASE (ins->opcode))) {
702 /* ADD_IMM is created by spill_global_vars */
704 * We have to guarantee that def->sreg1 haven't changed since def->dreg
705 * was defined. cfg->frame_reg is assumed to remain constant.
707 if ((def->sreg1 == cfg->frame_reg) || ((mono_inst_next (def, filter) == ins) && (def->dreg != def->sreg1))) {
708 ins->inst_basereg = def->sreg1;
709 ins->inst_offset += def->inst_imm;
711 } else if ((ins->opcode == OP_ISUB_IMM) && (def->opcode == OP_IADD_IMM) && (mono_inst_next (def, filter) == ins) && (def->dreg != def->sreg1)) {
712 ins->sreg1 = def->sreg1;
713 ins->inst_imm -= def->inst_imm;
714 } else if ((ins->opcode == OP_IADD_IMM) && (def->opcode == OP_ISUB_IMM) && (mono_inst_next (def, filter) == ins) && (def->dreg != def->sreg1)) {
715 ins->sreg1 = def->sreg1;
716 ins->inst_imm -= def->inst_imm;
717 } else if (ins->opcode == OP_STOREI1_MEMBASE_REG &&
718 (def->opcode == OP_ICONV_TO_U1 || def->opcode == OP_ICONV_TO_I1 || def->opcode == OP_SEXT_I4 || (SIZEOF_REGISTER == 8 && def->opcode == OP_LCONV_TO_U1)) &&
719 (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg]))) {
720 /* Avoid needless sign extension */
721 ins->sreg1 = def->sreg1;
722 } else if (ins->opcode == OP_STOREI2_MEMBASE_REG &&
723 (def->opcode == OP_ICONV_TO_U2 || def->opcode == OP_ICONV_TO_I2 || def->opcode == OP_SEXT_I4 || (SIZEOF_REGISTER == 8 && def->opcode == OP_LCONV_TO_I2)) &&
724 (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg]))) {
725 /* Avoid needless sign extension */
726 ins->sreg1 = def->sreg1;
727 } else if (ins->opcode == OP_COMPARE_IMM && def->opcode == OP_LDADDR && ins->inst_imm == 0) {
730 memset (&dummy_arg1, 0, sizeof (MonoInst));
731 dummy_arg1.opcode = OP_ICONST;
732 dummy_arg1.inst_c0 = 1;
734 mono_constant_fold_ins (cfg, ins, &dummy_arg1, NULL, TRUE);
738 g_assert (cfg->cbb == bb_opt);
739 g_assert (!bb_opt->code);
740 /* Do strength reduction here */
741 if (mono_strength_reduction_ins (cfg, ins, &spec) && max < cfg->next_vreg) {
742 MonoInst **defs_prev = defs;
743 gint32 *def_index_prev = def_index;
744 guint32 prev_max = max;
745 guint32 additional_vregs = cfg->next_vreg - initial_max_vregs;
747 /* We have more vregs so we need to reallocate defs and def_index arrays */
748 max = initial_max_vregs + additional_vregs * 2;
749 defs = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * max);
750 def_index = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * max);
752 /* Keep the entries for the previous vregs, zero the rest */
753 memcpy (defs, defs_prev, sizeof (MonoInst*) * prev_max);
754 memset (defs + prev_max, 0, sizeof (MonoInst*) * (max - prev_max));
755 memcpy (def_index, def_index_prev, sizeof (guint32) * prev_max);
756 memset (def_index + prev_max, 0, sizeof (guint32) * (max - prev_max));
759 if (cfg->cbb->code || (cfg->cbb != bb_opt)) {
760 MonoInst *saved_prev = ins->prev;
762 /* If we have code in cbb, we need to replace ins with the decomposition */
763 mono_replace_ins (cfg, bb, ins, &ins->prev, bb_opt, cfg->cbb);
764 bb_opt->code = bb_opt->last_ins = NULL;
765 bb_opt->in_count = bb_opt->out_count = 0;
768 /* ins is hanging, continue scanning the emitted code */
773 if (spec [MONO_INST_DEST] != ' ') {
774 MonoInst *def = defs [ins->dreg];
776 if (def && (def->opcode == OP_ADD_IMM) && (def->sreg1 == cfg->frame_reg) && (MONO_IS_STORE_MEMBASE (ins))) {
777 /* ADD_IMM is created by spill_global_vars */
778 /* cfg->frame_reg is assumed to remain constant */
779 ins->inst_destbasereg = def->sreg1;
780 ins->inst_offset += def->inst_imm;
783 if (!MONO_IS_STORE_MEMBASE (ins) && !vreg_is_volatile (cfg, ins->dreg)) {
784 defs [ins->dreg] = ins;
785 def_index [ins->dreg] = ins_index;
789 if (MONO_IS_CALL (ins))
790 last_call_index = ins_index;
797 static inline gboolean
798 reg_is_softreg_no_fpstack (int reg, const char spec)
800 return (spec == 'i' && reg >= MONO_MAX_IREGS)
801 || ((spec == 'f' && reg >= MONO_MAX_FREGS) && !MONO_ARCH_USE_FPSTACK)
802 #ifdef MONO_ARCH_SIMD_INTRINSICS
803 || (spec == 'x' && reg >= MONO_MAX_XREGS)
808 static inline gboolean
809 reg_is_softreg (int reg, const char spec)
811 return (spec == 'i' && reg >= MONO_MAX_IREGS)
812 || (spec == 'f' && reg >= MONO_MAX_FREGS)
813 #ifdef MONO_ARCH_SIMD_INTRINSICS
814 || (spec == 'x' && reg >= MONO_MAX_XREGS)
819 static inline gboolean
820 mono_is_simd_accessor (MonoInst *ins)
822 switch (ins->opcode) {
823 #ifdef MONO_ARCH_SIMD_INTRINSICS
831 case OP_INSERTX_U1_SLOW:
832 case OP_INSERTX_I4_SLOW:
833 case OP_INSERTX_R4_SLOW:
834 case OP_INSERTX_R8_SLOW:
835 case OP_INSERTX_I8_SLOW:
846 * Get rid of the dead assignments to local vregs like the ones created by the
850 mono_local_deadce (MonoCompile *cfg)
853 MonoInst *ins, *prev;
854 MonoBitSet *used, *defined;
856 //mono_print_code (cfg, "BEFORE LOCAL-DEADCE");
859 * Assignments to global vregs can't be eliminated so this pass must come
860 * after the handle_global_vregs () pass.
863 used = mono_bitset_mp_new_noinit (cfg->mempool, cfg->next_vreg + 1);
864 defined = mono_bitset_mp_new_noinit (cfg->mempool, cfg->next_vreg + 1);
866 /* First pass: collect liveness info */
867 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
868 /* Manually init the defs entries used by the bblock */
869 MONO_BB_FOR_EACH_INS (bb, ins) {
870 const char *spec = INS_INFO (ins->opcode);
871 int sregs [MONO_MAX_SRC_REGS];
874 if (spec [MONO_INST_DEST] != ' ') {
875 mono_bitset_clear_fast (used, ins->dreg);
876 mono_bitset_clear_fast (defined, ins->dreg);
877 #if SIZEOF_REGISTER == 4
879 mono_bitset_clear_fast (used, ins->dreg + 1);
880 mono_bitset_clear_fast (defined, ins->dreg + 1);
883 num_sregs = mono_inst_get_src_registers (ins, sregs);
884 for (i = 0; i < num_sregs; ++i) {
885 mono_bitset_clear_fast (used, sregs [i]);
886 #if SIZEOF_REGISTER == 4
887 mono_bitset_clear_fast (used, sregs [i] + 1);
893 * Make a reverse pass over the instruction list
895 MONO_BB_FOR_EACH_INS_REVERSE_SAFE (bb, prev, ins) {
896 const char *spec = INS_INFO (ins->opcode);
897 int sregs [MONO_MAX_SRC_REGS];
899 MonoInst *prev_f = mono_inst_prev (ins, FILTER_NOP | FILTER_IL_SEQ_POINT);
901 if (ins->opcode == OP_NOP) {
902 MONO_DELETE_INS (bb, ins);
906 g_assert (ins->opcode > MONO_CEE_LAST);
908 if (MONO_IS_NON_FP_MOVE (ins) && prev_f) {
913 spec2 = INS_INFO (def->opcode);
916 * Perform a limited kind of reverse copy propagation, i.e.
917 * transform B <- FOO; A <- B into A <- FOO
918 * This isn't copyprop, not deadce, but it can only be performed
919 * after handle_global_vregs () has run.
921 if (!get_vreg_to_inst (cfg, ins->sreg1) && (spec2 [MONO_INST_DEST] != ' ') && (def->dreg == ins->sreg1) && !mono_bitset_test_fast (used, ins->sreg1) && !MONO_IS_STORE_MEMBASE (def) && reg_is_softreg (ins->sreg1, spec [MONO_INST_DEST]) && !mono_is_simd_accessor (def)) {
922 if (cfg->verbose_level > 2) {
923 printf ("\tReverse copyprop in BB%d on ", bb->block_num);
924 mono_print_ins (ins);
927 def->dreg = ins->dreg;
928 MONO_DELETE_INS (bb, ins);
929 spec = INS_INFO (ins->opcode);
933 /* Enabling this on x86 could screw up the fp stack */
934 if (reg_is_softreg_no_fpstack (ins->dreg, spec [MONO_INST_DEST])) {
936 * Assignments to global vregs can only be eliminated if there is another
937 * assignment to the same vreg later in the same bblock.
939 if (!mono_bitset_test_fast (used, ins->dreg) &&
940 (!get_vreg_to_inst (cfg, ins->dreg) || (!bb->extended && !vreg_is_volatile (cfg, ins->dreg) && mono_bitset_test_fast (defined, ins->dreg))) &&
941 MONO_INS_HAS_NO_SIDE_EFFECT (ins)) {
942 /* Happens with CMOV instructions */
943 if (prev_f && prev_f->opcode == OP_ICOMPARE_IMM) {
944 MonoInst *prev = prev_f;
946 * Can't use DELETE_INS since that would interfere with the
951 //printf ("DEADCE: "); mono_print_ins (ins);
952 MONO_DELETE_INS (bb, ins);
953 spec = INS_INFO (ins->opcode);
956 if (spec [MONO_INST_DEST] != ' ')
957 mono_bitset_clear_fast (used, ins->dreg);
960 if (spec [MONO_INST_DEST] != ' ')
961 mono_bitset_set_fast (defined, ins->dreg);
962 num_sregs = mono_inst_get_src_registers (ins, sregs);
963 for (i = 0; i < num_sregs; ++i)
964 mono_bitset_set_fast (used, sregs [i]);
965 if (MONO_IS_STORE_MEMBASE (ins))
966 mono_bitset_set_fast (used, ins->dreg);
968 if (MONO_IS_CALL (ins)) {
969 MonoCallInst *call = (MonoCallInst*)ins;
972 if (call->out_ireg_args) {
973 for (l = call->out_ireg_args; l; l = l->next) {
974 guint32 regpair, reg;
976 regpair = (guint32)(gssize)(l->data);
977 reg = regpair & 0xffffff;
979 mono_bitset_set_fast (used, reg);
983 if (call->out_freg_args) {
984 for (l = call->out_freg_args; l; l = l->next) {
985 guint32 regpair, reg;
987 regpair = (guint32)(gssize)(l->data);
988 reg = regpair & 0xffffff;
990 mono_bitset_set_fast (used, reg);
997 //mono_print_code (cfg, "AFTER LOCAL-DEADCE");
1000 #endif /* DISABLE_JIT */