From: Marcos Henrich Date: Fri, 28 Aug 2015 12:21:00 +0000 (+0100) Subject: Merge pull request #2003 from esdrubal/seq_test_fix2 X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=commitdiff_plain;h=0db4b09607843d097a6f60f5c8101bb7ef8f3866;hp=d6866aa8dd3adf69fc02b605cd3e70c24411edc5;p=mono.git Merge pull request #2003 from esdrubal/seq_test_fix2 [runtime] Fix test_op_il_seq_point in amd64. --- diff --git a/mono/mini/mini-amd64.c b/mono/mini/mini-amd64.c index 7ac0615d9e1..f0ca62d8d97 100644 --- a/mono/mini/mini-amd64.c +++ b/mono/mini/mini-amd64.c @@ -179,6 +179,15 @@ amd64_is_near_call (guint8 *code) return code [0] == 0xe8; } +static inline gboolean +amd64_use_imm32 (gint64 val) +{ + if (mini_get_debug_options()->single_imm_size) + return FALSE; + + return amd64_is_imm32 (val); +} + #ifdef __native_client_codegen__ /* Keep track of instruction "depth", that is, the level of sub-instruction */ @@ -3182,7 +3191,7 @@ mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) break; case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: - if (!amd64_is_imm32 (ins->inst_imm)) { + if (!amd64_use_imm32 (ins->inst_imm)) { NEW_INS (cfg, ins, temp, OP_I8CONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); @@ -3197,7 +3206,7 @@ mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) #ifndef __native_client_codegen__ /* Don't generate memindex opcodes (to simplify */ /* read sandboxing) */ - if (!amd64_is_imm32 (ins->inst_offset)) { + if (!amd64_use_imm32 (ins->inst_offset)) { NEW_INS (cfg, ins, temp, OP_I8CONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); @@ -3210,7 +3219,7 @@ mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) case OP_STORE_MEMBASE_IMM: #endif case OP_STOREI8_MEMBASE_IMM: - if (!amd64_is_imm32 (ins->inst_imm)) { + if (!amd64_use_imm32 (ins->inst_imm)) { NEW_INS (cfg, ins, temp, OP_I8CONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); @@ -3843,10 +3852,10 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) #endif case OP_LOADI8_MEM: // FIXME: Decompose this earlier - if (amd64_is_imm32 (ins->inst_imm)) + if (amd64_use_imm32 (ins->inst_imm)) amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8); else { - amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); + amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer)); amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8); } break; @@ -3856,10 +3865,10 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) break; case OP_LOADU4_MEM: // FIXME: Decompose this earlier - if (amd64_is_imm32 (ins->inst_imm)) + if (amd64_use_imm32 (ins->inst_imm)) amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4); else { - amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); + amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer)); amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4); } break; @@ -4590,7 +4599,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) case OP_ICONST: case OP_I8CONST: - if ((((guint64)ins->inst_c0) >> 32) == 0) + if ((((guint64)ins->inst_c0) >> 32) == 0 && !mini_get_debug_options()->single_imm_size) amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4); else amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8); @@ -7676,7 +7685,7 @@ mono_arch_flush_register_windows (void) gboolean mono_arch_is_inst_imm (gint64 imm) { - return amd64_is_imm32 (imm); + return amd64_use_imm32 (imm); } /* @@ -8078,7 +8087,7 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) { - if (amd64_is_imm32 (item->key)) + if (amd64_use_imm32 ((gint64)item->key)) item->chunk_size += CMP_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE; @@ -8114,7 +8123,7 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI } } } else { - if (amd64_is_imm32 (item->key)) + if (amd64_use_imm32 ((gint64)item->key)) item->chunk_size += CMP_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE; @@ -8145,10 +8154,10 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) { - if (amd64_is_imm32 (item->key)) + if (amd64_use_imm32 ((gint64)item->key)) amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer)); else { - amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key); + amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof(gpointer)); amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG); } } @@ -8196,10 +8205,10 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI #endif } } else { - if (amd64_is_imm32 (item->key)) + if (amd64_use_imm32 ((gint64)item->key)) amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (gpointer)); else { - amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key); + amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof (gpointer)); amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG); } item->jmp_code = code; diff --git a/mono/mini/mini-runtime.c b/mono/mini/mini-runtime.c index 6bb8d259bca..8b454087638 100644 --- a/mono/mini/mini-runtime.c +++ b/mono/mini/mini-runtime.c @@ -2658,6 +2658,8 @@ mini_parse_debug_options (void) debug_options.gen_sdb_seq_points = TRUE; else if (!strcmp (arg, "gen-compact-seq-points")) debug_options.gen_seq_points_compact_data = TRUE; + else if (!strcmp (arg, "single-imm-size")) + debug_options.single_imm_size = TRUE; else if (!strcmp (arg, "init-stacks")) debug_options.init_stacks = TRUE; else if (!strcmp (arg, "casts")) diff --git a/mono/mini/mini.h b/mono/mini/mini.h index 956db1d0db8..b47a0ab2f87 100644 --- a/mono/mini/mini.h +++ b/mono/mini/mini.h @@ -1916,6 +1916,11 @@ typedef struct { */ gboolean gen_sdb_seq_points; gboolean gen_seq_points_compact_data; + /* + * Setting single_imm_size should guarantee that each time managed code is compiled + * the same instructions and registers are used, regardless of the size of used values. + */ + gboolean single_imm_size; gboolean explicit_null_checks; /* * Fill stack frames with 0x2a in method prologs. This helps with the diff --git a/mono/mini/test_op_il_seq_point.sh b/mono/mini/test_op_il_seq_point.sh index a5c7a3fb36c..5df176e4755 100755 --- a/mono/mini/test_op_il_seq_point.sh +++ b/mono/mini/test_op_il_seq_point.sh @@ -19,6 +19,10 @@ clean_aot () { rm -rf *.exe..so *.exe.dylib *.exe.dylib.dSYM } +# The test compares the generated native code size between a compilation with and without seq points. +# In some architectures ie:amd64 when possible 32bit instructions and registers are used instead of 64bit ones. +# Using MONO_DEBUG=single-imm-size avoids 32bit optimizations thus mantaining the native code size between compilations. + get_methods () { if [ -z $4 ]; then MONO_PATH=$1 $2 -v --compile-all=1 $3 | grep '^Method .*code length' | sed 's/emitted[^()]*//' | sort @@ -39,14 +43,14 @@ get_method () { diff_methods () { TMP_FILE=$(tmp_file) - echo "$(get_methods $1 $2 $3 $4)" >$TMP_FILE - diff <(cat $TMP_FILE) <(echo "$(MONO_DEBUG=gen-compact-seq-points get_methods $1 $2 $3 $4)") + echo "$(MONO_DEBUG=single-imm-size get_methods $1 $2 $3 $4)" >$TMP_FILE + diff <(cat $TMP_FILE) <(echo "$(MONO_DEBUG=gen-compact-seq-points,single-imm-size get_methods $1 $2 $3 $4)") } diff_method () { TMP_FILE=$(tmp_file) - echo "$(get_method $1 $2 $3 $4 $5)" >$TMP_FILE - sdiff -w 150 <(cat $TMP_FILE) <(echo "$(MONO_DEBUG=gen-compact-seq-points get_method $1 $2 $3 $4 $5 | grep -Ev il_seq_point)") + echo "$(MONO_DEBUG=single-imm-size get_method $1 $2 $3 $4 $5)" >$TMP_FILE + sdiff -w 150 <(cat $TMP_FILE) <(echo "$(MONO_DEBUG=gen-compact-seq-points,single-imm-size get_method $1 $2 $3 $4 $5 | grep -Ev il_seq_point)") } get_method_name () {