#define MONO_EMIT_NEW_AMD64_ICOMPARE_MEMBASE_REG(cfg,basereg,offset,operand) do { \ MonoInst *inst; \ inst = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \ inst->opcode = OP_AMD64_ICOMPARE_MEMBASE_REG; \ inst->inst_basereg = basereg; \ inst->inst_offset = offset; \ inst->sreg2 = operand; \ mono_bblock_add_inst (cfg->cbb, inst); \ } while (0) #define MONO_EMIT_NEW_AMD64_ICOMPARE_MEMBASE_IMM(cfg,basereg,offset,operand) do { \ MonoInst *inst; \ inst = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \ inst->opcode = OP_AMD64_ICOMPARE_MEMBASE_IMM; \ inst->inst_basereg = basereg; \ inst->inst_offset = offset; \ inst->inst_imm = operand; \ mono_bblock_add_inst (cfg->cbb, inst); \ } while (0) /* override the arch independant versions with fast x86 versions */ #undef MONO_EMIT_BOUNDS_CHECK #undef MONO_EMIT_BOUNDS_CHECK_IMM #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \ if (! (state->tree->flags & MONO_INST_NORANGECHECK)) { \ MONO_EMIT_NEW_AMD64_ICOMPARE_MEMBASE_REG (cfg, array_reg, G_STRUCT_OFFSET (array_type, array_length_field), index_reg); \ MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \ } \ } while (0) #define MONO_EMIT_BOUNDS_CHECK_IMM(cfg, array_reg, array_type, array_length_field, index_imm) do { \ if (! (state->tree->flags & MONO_INST_NORANGECHECK)) { \ MONO_EMIT_NEW_AMD64_ICOMPARE_MEMBASE_IMM (cfg, array_reg, G_STRUCT_OFFSET (array_type, array_length_field), index_imm); \ MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \ } \ } while (0) int cbranch_to_fcbranch (int opcode); int bialu_to_bialu_imm (int opcode); int ceq_to_fceq (int opcode); int call_reg_to_call_membase (int opcode); %% # # inssel-amd64.brg: burg file for special AMD64 instructions # # Author: # Dietmar Maurer (dietmar@ximian.com) # Paolo Molaro (lupus@ximian.com) # # (C) 2002 Ximian, Inc. # stmt: OP_START_HANDLER { MonoInst *spvar = mono_find_spvar_for_region (s, s->cbb->region); MONO_EMIT_NEW_STORE_MEMBASE (s, OP_STORE_MEMBASE_REG, spvar->inst_basereg, spvar->inst_offset, X86_ESP); } stmt: CEE_ENDFINALLY { MonoInst *spvar = mono_find_spvar_for_region (s, s->cbb->region); MONO_EMIT_NEW_LOAD_MEMBASE (s, X86_ESP, spvar->inst_basereg, spvar->inst_offset); tree->opcode = CEE_RET; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_ENDFILTER (reg) { MonoInst *spvar = mono_find_spvar_for_region (s, s->cbb->region); MONO_EMIT_NEW_UNALU (s, OP_MOVE, X86_EAX, state->left->reg1); MONO_EMIT_NEW_LOAD_MEMBASE (s, X86_ESP, spvar->inst_basereg, spvar->inst_offset); tree->opcode = CEE_RET; mono_bblock_add_inst (s->cbb, tree); } freg: OP_LCONV_TO_R8 (reg) { tree->sreg1 = state->left->reg1; tree->dreg = state->reg1; mono_bblock_add_inst (s->cbb, tree); } freg: OP_LCONV_TO_R4 (reg) { tree->sreg1 = state->left->reg1; tree->dreg = state->reg1; mono_bblock_add_inst (s->cbb, tree); } cflags: OP_COMPARE (CEE_LDIND_I4 (base), reg) { tree->opcode = OP_AMD64_ICOMPARE_MEMBASE_REG; tree->inst_basereg = state->left->left->tree->inst_basereg; tree->inst_offset = state->left->left->tree->inst_offset; tree->sreg2 = state->right->reg1; mono_bblock_add_inst (s->cbb, tree); } cflags: OP_COMPARE (CEE_LDIND_I4 (base), OP_ICONST) { tree->opcode = OP_AMD64_ICOMPARE_MEMBASE_IMM; tree->inst_basereg = state->left->left->tree->inst_basereg; tree->inst_offset = state->left->left->tree->inst_offset; tree->inst_imm = state->right->tree->inst_c0; mono_bblock_add_inst (s->cbb, tree); } cflags: OP_COMPARE (reg, CEE_LDIND_I4 (base)) { tree->opcode = OP_AMD64_ICOMPARE_REG_MEMBASE; tree->sreg2 = state->right->left->tree->inst_basereg; tree->inst_offset = state->right->left->tree->inst_offset; tree->sreg1 = state->left->reg1; mono_bblock_add_inst (s->cbb, tree); } stmt: CEE_STIND_I1 (base, OP_CEQ (cflags)) { tree->opcode = OP_X86_SETEQ_MEMBASE; tree->inst_offset = state->left->tree->inst_offset; tree->inst_basereg = state->left->tree->inst_basereg; mono_bblock_add_inst (s->cbb, tree); } reg: OP_LOCALLOC (OP_ICONST) { if (tree->flags & MONO_INST_INIT) { /* microcoded in mini-x86.c */ tree->sreg1 = mono_regstate_next_int (s->rs); tree->dreg = state->reg1; MONO_EMIT_NEW_ICONST (s, tree->sreg1, state->left->tree->inst_c0); mono_bblock_add_inst (s->cbb, tree); } else { guint32 size = state->left->tree->inst_c0; size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1); MONO_EMIT_NEW_BIALU_IMM (s, OP_SUB_IMM, X86_ESP, X86_ESP, size); MONO_EMIT_UNALU (s, tree, OP_MOVE, state->reg1, X86_ESP); } } reg: OP_LOCALLOC (reg) { tree->sreg1 = state->left->tree->dreg; tree->dreg = state->reg1; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_SETRET (reg) { tree->opcode = OP_MOVE; tree->sreg1 = state->left->reg1; tree->dreg = X86_EAX; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_SETRET (reg) { MONO_EMIT_NEW_UNALU (s, OP_MOVE, X86_EDX, state->left->reg2); tree->opcode = OP_MOVE; tree->sreg1 = state->left->reg1; tree->dreg = X86_EAX; mono_bblock_add_inst (s->cbb, tree); } reg: CEE_LDIND_REF (OP_REGVAR), reg: CEE_LDIND_I (OP_REGVAR), reg: CEE_LDIND_I4 (OP_REGVAR), reg: CEE_LDIND_U4 (OP_REGVAR) "0" { /* This rule might not work on all archs */ state->reg1 = state->left->tree->dreg; tree->dreg = state->reg1; } stmt: OP_SETRET (CEE_LDIND_REF (OP_REGVAR)), stmt: OP_SETRET (CEE_LDIND_I4 (OP_REGVAR)), stmt: OP_SETRET (CEE_LDIND_U4 (OP_REGVAR)), stmt: OP_SETRET (CEE_LDIND_I (OP_REGVAR)) { tree->opcode = OP_MOVE; tree->sreg1 = state->left->left->tree->dreg; tree->dreg = X86_EAX; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_SETRET (freg) { if (mono_method_signature (s->method)->ret->type == MONO_TYPE_R4) tree->opcode = OP_AMD64_SET_XMMREG_R4; else tree->opcode = OP_AMD64_SET_XMMREG_R8; tree->sreg1 = state->left->reg1; tree->dreg = 0; /* %xmm0 */ mono_bblock_add_inst (s->cbb, tree); /* nothing to do */ } stmt: OP_SETRET (OP_ICONST) { if (state->left->tree->inst_c0 == 0) { MONO_EMIT_BIALU (s, tree, CEE_XOR, AMD64_RAX, AMD64_RAX, AMD64_RAX); } else { tree->opcode = OP_ICONST; tree->inst_c0 = state->left->tree->inst_c0; tree->dreg = X86_EAX; mono_bblock_add_inst (s->cbb, tree); } } stmt: OP_OUTARG (reg) { tree->opcode = OP_X86_PUSH; tree->sreg1 = state->left->reg1; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG_REG (reg) { MonoCallInst *call = (MonoCallInst*)tree->inst_right; tree->opcode = OP_MOVE; tree->sreg1 = state->left->reg1; tree->dreg = mono_regstate_next_int (s->rs); mono_bblock_add_inst (s->cbb, tree); mono_call_inst_add_outarg_reg (s, call, tree->dreg, tree->unused, FALSE); } # we need to reduce this code duplication with some burg syntax extension stmt: OP_OUTARG (CEE_LDIND_REF (OP_REGVAR)) { tree->opcode = OP_X86_PUSH; tree->sreg1 = state->left->left->tree->dreg; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG (CEE_LDIND_I4 (OP_REGVAR)) { tree->opcode = OP_X86_PUSH; tree->sreg1 = state->left->left->tree->dreg; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG (CEE_LDIND_U4 (OP_REGVAR)) { tree->opcode = OP_X86_PUSH; tree->sreg1 = state->left->left->tree->dreg; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG (CEE_LDIND_I (OP_REGVAR)) { tree->opcode = OP_X86_PUSH; tree->sreg1 = state->left->left->tree->dreg; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG (CEE_LDIND_I (base)) { tree->opcode = OP_X86_PUSH_MEMBASE; tree->inst_basereg = state->left->left->tree->inst_basereg; tree->inst_offset = state->left->left->tree->inst_offset; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG (CEE_LDIND_REF (base)) { tree->opcode = OP_X86_PUSH_MEMBASE; tree->inst_basereg = state->left->left->tree->inst_basereg; tree->inst_offset = state->left->left->tree->inst_offset; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG (CEE_LDIND_REF (OP_REGVAR)) { tree->opcode = OP_X86_PUSH; tree->sreg1 = state->left->left->tree->dreg; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG (CEE_LDOBJ (reg)) { tree->opcode = OP_X86_PUSH; tree->sreg1 = state->left->reg1; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG (freg) { MONO_EMIT_NEW_BIALU_IMM (s, OP_SUB_IMM, X86_ESP, X86_ESP, 8); tree->opcode = OP_STORER8_MEMBASE_REG; tree->sreg1 = state->left->reg1; tree->inst_destbasereg = X86_ESP; tree->inst_offset = 0; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG_R4 (freg) { MONO_EMIT_NEW_BIALU_IMM (s, OP_SUB_IMM, X86_ESP, X86_ESP, 8); tree->opcode = OP_STORER4_MEMBASE_REG; tree->sreg1 = state->left->reg1; tree->inst_destbasereg = X86_ESP; tree->inst_offset = 0; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG_R8 (freg) { MONO_EMIT_NEW_BIALU_IMM (s, OP_SUB_IMM, X86_ESP, X86_ESP, 8); tree->opcode = OP_STORER8_MEMBASE_REG; tree->sreg1 = state->left->reg1; tree->inst_destbasereg = X86_ESP; tree->inst_offset = 0; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_AMD64_OUTARG_XMMREG_R4 (freg) { MonoCallInst *call = (MonoCallInst*)tree->inst_right; tree->opcode = OP_AMD64_SET_XMMREG_R4; tree->sreg1 = state->left->reg1; tree->dreg = mono_regstate_next_float (s->rs); mono_bblock_add_inst (s->cbb, tree); mono_call_inst_add_outarg_reg (s, call, tree->dreg, tree->unused, TRUE); } stmt: OP_AMD64_OUTARG_XMMREG_R8 (freg) { MonoCallInst *call = (MonoCallInst*)tree->inst_right; tree->opcode = OP_AMD64_SET_XMMREG_R8; tree->sreg1 = state->left->reg1; tree->dreg = mono_regstate_next_float (s->rs); mono_bblock_add_inst (s->cbb, tree); mono_call_inst_add_outarg_reg (s, call, tree->dreg, tree->unused, TRUE); } stmt: OP_OUTARG_VT (CEE_LDOBJ (base)) { MonoInst *vt = state->left->left->tree; //g_print ("vt size: %d at R%d + %d\n", tree->inst_imm, vt->inst_basereg, vt->inst_offset); if (!tree->inst_imm) return; if (tree->inst_imm == 8) { /* Can't use this for < 8 since it does an 8 byte memory load */ tree->opcode = OP_X86_PUSH_MEMBASE; tree->inst_basereg = vt->inst_basereg; tree->inst_offset = vt->inst_offset; mono_bblock_add_inst (s->cbb, tree); } else if (tree->inst_imm <= 20) { int sz = tree->inst_imm; sz += 7; sz &= ~7; MONO_EMIT_NEW_BIALU_IMM (s, OP_SUB_IMM, X86_ESP, X86_ESP, sz); mini_emit_memcpy (s, X86_ESP, 0, vt->inst_basereg, vt->inst_offset, tree->inst_imm, 0); } else { tree->opcode = OP_X86_PUSH_OBJ; tree->inst_basereg = vt->inst_basereg; tree->inst_offset = vt->inst_offset; mono_bblock_add_inst (s->cbb, tree); } } stmt: OP_OUTARG_VT (OP_ICONST) { tree->opcode = OP_X86_PUSH_IMM; tree->inst_imm = state->left->tree->inst_c0; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_OUTARG_VT (reg) { tree->opcode = OP_X86_PUSH; tree->sreg1 = state->left->tree->dreg; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_AMD64_OUTARG_ALIGN_STACK { MONO_EMIT_NEW_BIALU_IMM (s, OP_SUB_IMM, X86_ESP, X86_ESP, 8); } base: OP_INARG_VT (base) { MONO_EMIT_NEW_LOAD_MEMBASE (s, state->reg1, state->left->tree->inst_basereg, state->left->tree->inst_offset); } reg: OP_LDADDR (OP_INARG_VT (base)) { MONO_EMIT_NEW_LOAD_MEMBASE (s, state->reg1, state->left->left->tree->inst_basereg, state->left->left->tree->inst_offset); } reg: CEE_LDOBJ (OP_INARG_VT (base)) { MONO_EMIT_NEW_LOAD_MEMBASE (s, state->reg1, state->left->left->tree->inst_basereg, state->left->left->tree->inst_offset); } reg: OP_LDADDR (OP_REGOFFSET) "1" { if (state->left->tree->inst_offset) { tree->opcode = OP_X86_LEA_MEMBASE; tree->sreg1 = state->left->tree->inst_basereg; tree->inst_imm = state->left->tree->inst_offset; tree->dreg = state->reg1; } else { tree->opcode = OP_MOVE; tree->sreg1 = state->left->tree->inst_basereg; tree->dreg = state->reg1; } mono_bblock_add_inst (s->cbb, tree); } reg: CEE_LDOBJ (OP_REGOFFSET) "1" { if (state->left->tree->inst_offset) { tree->opcode = OP_X86_LEA_MEMBASE; tree->sreg1 = state->left->tree->inst_basereg; tree->inst_imm = state->left->tree->inst_offset; tree->dreg = state->reg1; } else { tree->opcode = OP_MOVE; tree->sreg1 = state->left->tree->inst_basereg; tree->dreg = state->reg1; } mono_bblock_add_inst (s->cbb, tree); } reg: CEE_LDELEMA (reg, reg) "15" { guint32 size = mono_class_array_element_size (tree->klass); MONO_EMIT_BOUNDS_CHECK (s, state->left->reg1, MonoArray, max_length, state->right->reg1); if (size == 1 || size == 2 || size == 4 || size == 8) { static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 }; int reg; /* The array reg is 64 bits but the index reg is only 32 */ tree->dreg = mono_regstate_next_float (s->rs); reg = mono_regstate_next_int (s->rs); MONO_EMIT_NEW_UNALU (s, OP_SEXT_I4, reg, state->right->reg1); tree->opcode = OP_X86_LEA; tree->dreg = state->reg1; tree->sreg1 = state->left->reg1; tree->sreg2 = reg; tree->inst_imm = G_STRUCT_OFFSET (MonoArray, vector); tree->unused = fast_log2 [size]; mono_bblock_add_inst (s->cbb, tree); } else { int mult_reg = mono_regstate_next_int (s->rs); int add_reg = mono_regstate_next_int (s->rs); MONO_EMIT_NEW_BIALU_IMM (s, OP_MUL_IMM, mult_reg, state->right->reg1, size); MONO_EMIT_NEW_BIALU (s, CEE_ADD, add_reg, mult_reg, state->left->reg1); MONO_EMIT_NEW_BIALU_IMM (s, OP_ADD_IMM, state->reg1, add_reg, G_STRUCT_OFFSET (MonoArray, vector)); } } stmt: CEE_STIND_R8 (OP_REGVAR, freg) { /* nothing to do: the value is already on the FP stack */ } stmt: CEE_STIND_I4 (base, CEE_ADD (CEE_LDIND_I4 (base), OP_ICONST)) { int con = state->right->right->tree->inst_c0; if (con == 1) { tree->opcode = OP_X86_INC_MEMBASE; } else { tree->opcode = OP_X86_ADD_MEMBASE_IMM; tree->inst_imm = con; } tree->inst_basereg = state->left->tree->inst_basereg; tree->inst_offset = state->left->tree->inst_offset; mono_bblock_add_inst (s->cbb, tree); } cost { MBTREE_TYPE *t1 = state->right->left->left->tree; MBTREE_TYPE *t2 = state->left->tree; MBCOND (t1->inst_basereg == t2->inst_basereg && t1->inst_offset == t2->inst_offset); return 2; } stmt: CEE_STIND_I4 (base, CEE_SUB (CEE_LDIND_I4 (base), OP_ICONST)) { int con = state->right->right->tree->inst_c0; if (con == 1) { tree->opcode = OP_X86_DEC_MEMBASE; } else { tree->opcode = OP_X86_SUB_MEMBASE_IMM; tree->inst_imm = con; } tree->inst_basereg = state->left->tree->inst_basereg; tree->inst_offset = state->left->tree->inst_offset; mono_bblock_add_inst (s->cbb, tree); } cost { MBTREE_TYPE *t1 = state->right->left->left->tree; MBTREE_TYPE *t2 = state->left->tree; MBCOND (t1->inst_basereg == t2->inst_basereg && t1->inst_offset == t2->inst_offset); return 2; } stmt: CEE_STIND_I4 (OP_REGVAR, CEE_SUB (CEE_LDIND_I4 (OP_REGVAR), OP_ICONST)) { int con = state->right->right->tree->inst_c0; int dreg = state->left->tree->dreg; int sreg = state->right->left->left->tree->dreg; if (con == 1) { if (dreg != sreg) MONO_EMIT_NEW_UNALU (s, OP_MOVE, dreg, sreg); tree->opcode = OP_X86_DEC_REG; tree->dreg = tree->sreg1 = dreg; } else if (con == -1) { if (dreg != sreg) MONO_EMIT_NEW_UNALU (s, OP_MOVE, dreg, sreg); tree->opcode = OP_X86_INC_REG; tree->dreg = tree->sreg1 = dreg; } else { tree->opcode = OP_SUB_IMM; tree->inst_imm = con; tree->sreg1 = sreg; tree->dreg = dreg; } mono_bblock_add_inst (s->cbb, tree); } stmt: CEE_STIND_I (OP_REGVAR, CEE_ADD (CEE_LDIND_I (OP_REGVAR), OP_ICONST)), stmt: CEE_STIND_I4 (OP_REGVAR, CEE_ADD (CEE_LDIND_I4 (OP_REGVAR), OP_ICONST)) { int con = state->right->right->tree->inst_c0; int dreg = state->left->tree->dreg; int sreg = state->right->left->left->tree->dreg; if (con == 1) { if (dreg != sreg) MONO_EMIT_NEW_UNALU (s, OP_MOVE, dreg, sreg); tree->opcode = OP_X86_INC_REG; tree->dreg = tree->sreg1 = dreg; } else if (con == -1) { if (dreg != sreg) MONO_EMIT_NEW_UNALU (s, OP_MOVE, dreg, sreg); tree->opcode = OP_X86_DEC_REG; tree->dreg = tree->sreg1 = dreg; } else { tree->opcode = OP_ADD_IMM; tree->inst_imm = con; tree->sreg1 = sreg; tree->dreg = dreg; } mono_bblock_add_inst (s->cbb, tree); } reg: CEE_LDIND_I2 (OP_REGVAR) { MONO_EMIT_UNALU (s, tree, OP_SEXT_I2, state->reg1, state->left->tree->dreg); } # The XOR rule stmt: CEE_STIND_I8 (OP_REGVAR, OP_ICONST), stmt: CEE_STIND_I4 (OP_REGVAR, OP_ICONST), stmt: CEE_STIND_I2 (OP_REGVAR, OP_ICONST), stmt: CEE_STIND_I1 (OP_REGVAR, OP_ICONST), stmt: CEE_STIND_REF (OP_REGVAR, OP_ICONST), stmt: CEE_STIND_I (OP_REGVAR, OP_ICONST), stmt: CEE_STIND_I8 (OP_REGVAR, OP_I8CONST), stmt: CEE_STIND_I4 (OP_REGVAR, OP_I8CONST), stmt: CEE_STIND_I2 (OP_REGVAR, OP_I8CONST), stmt: CEE_STIND_I1 (OP_REGVAR, OP_I8CONST), stmt: CEE_STIND_REF (OP_REGVAR, OP_I8CONST), stmt: CEE_STIND_I (OP_REGVAR, OP_I8CONST) { int r = state->left->tree->dreg; MONO_EMIT_BIALU (s, tree, CEE_XOR, r, r, r); } cost { MBCOND (!state->right->tree->inst_c0); return 0; } # on x86, fp compare overwrites EAX, so we must # either improve the local register allocator or # emit coarse opcodes which saves EAX for us. reg: OP_CEQ (OP_COMPARE (freg, freg)), reg: OP_CLT (OP_COMPARE (freg, freg)), reg: OP_CGT (OP_COMPARE (freg, freg)), reg: OP_CLT_UN (OP_COMPARE (freg, freg)), reg: OP_CGT_UN (OP_COMPARE (freg, freg)) { MONO_EMIT_BIALU (s, tree, ceq_to_fceq (tree->opcode), state->reg1, state->left->left->reg1, state->left->right->reg1); } # fpcflags overwrites EAX, but this does not matter for statements # because we are the last operation in the tree. stmt: CEE_BNE_UN (fpcflags), stmt: CEE_BEQ (fpcflags), stmt: CEE_BLT (fpcflags), stmt: CEE_BLT_UN (fpcflags), stmt: CEE_BGT (fpcflags), stmt: CEE_BGT_UN (fpcflags), stmt: CEE_BGE (fpcflags), stmt: CEE_BGE_UN (fpcflags), stmt: CEE_BLE (fpcflags), stmt: CEE_BLE_UN (fpcflags) { tree->opcode = cbranch_to_fcbranch (tree->opcode); mono_bblock_add_inst (s->cbb, tree); } stmt: CEE_POP (freg) "0" { /* we need to pop the value from the x86 FP stack */ MONO_EMIT_UNALU (s, tree, OP_X86_FPOP, -1, state->left->reg1); } # override the rules in inssel-float.brg that work for machines with FP registers freg: OP_FCONV_TO_R8 (freg) "0" { tree->opcode = OP_FMOVE; tree->sreg1 = state->left->reg1; tree->dreg = state->reg1; mono_bblock_add_inst (s->cbb, tree); } freg: OP_FCONV_TO_R4 (freg) "0" { tree->opcode = OP_FMOVE; tree->sreg1 = state->left->reg1; tree->dreg = state->reg1; mono_bblock_add_inst (s->cbb, tree); } reg: CEE_ADD(reg, CEE_LDIND_I4 (base)) { MonoInst *base = state->right->left->tree; MONO_EMIT_BIALU_MEMBASE (cfg, tree, OP_X86_ADD_MEMBASE, state->reg1, state->left->reg1, base->inst_basereg, base->inst_offset); } reg: CEE_SUB(reg, CEE_LDIND_I4 (base)) { MonoInst *base = state->right->left->tree; MONO_EMIT_BIALU_MEMBASE (cfg, tree, OP_X86_SUB_MEMBASE, state->reg1, state->left->reg1, base->inst_basereg, base->inst_offset); } reg: CEE_MUL(reg, CEE_LDIND_I4 (base)) { MonoInst *base = state->right->left->tree; MONO_EMIT_BIALU_MEMBASE (cfg, tree, OP_X86_MUL_MEMBASE, state->reg1, state->left->reg1, base->inst_basereg, base->inst_offset); } reg: OP_LSHL (reg, reg), reg: OP_LSHR (reg, reg), reg: OP_LSHR_UN (reg, reg), reg: OP_LMUL (reg, reg), reg: OP_LDIV (reg, reg), reg: OP_LDIV_UN (reg, reg), reg: OP_LREM (reg, reg), reg: OP_LREM_UN (reg, reg), reg: OP_LMUL_OVF (reg, reg), reg: OP_LMUL_OVF_UN (reg, reg) "0" { MONO_EMIT_BIALU (s, tree, tree->opcode, state->reg1, state->left->reg1, state->right->reg1); } reg: OP_LMUL (reg, OP_I8CONST), reg: OP_LSHL (reg, OP_ICONST), reg: OP_LSHR (reg, OP_ICONST), reg: OP_LSHR_UN (reg, OP_ICONST) { MONO_EMIT_BIALU_IMM (s, tree, bialu_to_bialu_imm (tree->opcode), state->reg1, state->left->reg1, state->right->tree->inst_c0); } cost { MBCOND (mono_arch_is_inst_imm (state->right->tree->inst_c0)); return 0; } reg: OP_ATOMIC_ADD_NEW_I4 (base, reg), reg: OP_ATOMIC_ADD_NEW_I8 (base, reg), reg: OP_ATOMIC_ADD_I4 (base, reg), reg: OP_ATOMIC_ADD_I8 (base, reg), reg: OP_ATOMIC_EXCHANGE_I4 (base, reg), reg: OP_ATOMIC_EXCHANGE_I8 (base, reg) { tree->opcode = tree->opcode; tree->dreg = state->reg1; tree->sreg2 = state->right->reg1; tree->inst_basereg = state->left->tree->inst_basereg; tree->inst_offset = state->left->tree->inst_offset; mono_bblock_add_inst (s->cbb, tree); } # Optimized call instructions # mono_arch_patch_delegate_trampoline depends on these reg: OP_CALL_REG (CEE_LDIND_I (base)), freg: OP_FCALL_REG (CEE_LDIND_I (base)), reg: OP_LCALL_REG (CEE_LDIND_I (base)) { tree->opcode = call_reg_to_call_membase (tree->opcode); tree->inst_basereg = state->left->left->tree->inst_basereg; tree->inst_offset = state->left->left->tree->inst_offset; tree->dreg = state->reg1; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_VOIDCALL_REG (CEE_LDIND_I (base)) { tree->opcode = call_reg_to_call_membase (tree->opcode); tree->inst_basereg = state->left->left->tree->inst_basereg; tree->inst_offset = state->left->left->tree->inst_offset; mono_bblock_add_inst (s->cbb, tree); } stmt: OP_VCALL_REG (CEE_LDIND_I (base), reg) { mono_arch_emit_this_vret_args (s, (MonoCallInst*)tree, -1, -1, state->right->reg1); tree->opcode = call_reg_to_call_membase (tree->opcode); tree->inst_basereg = state->left->left->tree->inst_basereg; tree->inst_offset = state->left->left->tree->inst_offset; tree->dreg = state->reg1; mono_bblock_add_inst (s->cbb, tree); } %% int bialu_to_bialu_imm (int opcode) { switch (opcode) { case OP_LMUL: return OP_LMUL_IMM; case OP_LSHL: return OP_LSHL_IMM; case OP_LSHR: return OP_LSHR_IMM; case OP_LSHR_UN: return OP_LSHR_UN_IMM; default: g_assert_not_reached (); } return -1; } int cbranch_to_fcbranch (int opcode) { switch (opcode) { case CEE_BNE_UN: return OP_FBNE_UN; case CEE_BEQ: return OP_FBEQ; case CEE_BLT: return OP_FBLT; case CEE_BLT_UN: return OP_FBLT_UN; case CEE_BGT: return OP_FBGT; case CEE_BGT_UN: return OP_FBGT_UN; case CEE_BGE: return OP_FBGE; case CEE_BGE_UN: return OP_FBGE_UN; case CEE_BLE: return OP_FBLE; case CEE_BLE_UN: return OP_FBLE_UN; default: g_assert_not_reached (); } return -1; } int ceq_to_fceq (int opcode) { switch (opcode) { case OP_CEQ: return OP_FCEQ; case OP_CLT: return OP_FCLT; case OP_CGT: return OP_FCGT; case OP_CLT_UN: return OP_FCLT_UN; case OP_CGT_UN: return OP_FCGT_UN; default: g_assert_not_reached (); } return -1; } int call_reg_to_call_membase (int opcode) { switch (opcode) { case OP_CALL_REG: return OP_CALL_MEMBASE; case OP_FCALL_REG: return OP_FCALL_MEMBASE; case OP_VCALL_REG: return OP_VCALL_MEMBASE; case OP_LCALL_REG: return OP_LCALL_MEMBASE; case OP_VOIDCALL_REG: return OP_VOIDCALL_MEMBASE; default: g_assert_not_reached (); } return -1; }