/* Rd := (Rm * Rs)[31:0]; 32 x 32 -> 32 */
#define ARM_MUL_COND(p, rd, rm, rs, cond) \
ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_UMULL_COND(p, rdhi, rdlo, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_UMULL, rdhi, rm, rs, rdlo, 0, cond))
+#define ARM_SMULL_COND(p, rdhi, rdlo, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_SMULL, rdhi, rm, rs, rdlo, 0, cond))
#define ARM_MUL(p, rd, rm, rs) \
ARM_MUL_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_UMULL(p, rdhi, rdlo, rm, rs) \
+ ARM_UMULL_COND(p, rdhi, rdlo, rm, rs, ARMCOND_AL)
+#define ARM_SMULL(p, rdhi, rdlo, rm, rs) \
+ ARM_SMULL_COND(p, rdhi, rdlo, rm, rs, ARMCOND_AL)
#define ARM_MULS_COND(p, rd, rm, rs, cond) \
ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
#define ARM_MULS(p, rd, rm, rs) \
ARM_MULS_COND(p, rd, rm, rs, ARMCOND_AL)
#define ARM_MUL_REG_REG(p, rd, rm, rs) ARM_MUL(p, rd, rm, rs)
#define ARM_MULS_REG_REG(p, rd, rm, rs) ARM_MULS(p, rd, rm, rs)
+#define ARM_UMULL_REG_REG(p, rdhi, rdlo, rm, rs) ARM_UMULL(p, rdhi, rdlo, rm, rs)
+#define ARM_SMULL_REG_REG(p, rdhi, rdlo, rm, rs) ARM_SMULL(p, rdhi, rdlo, rm, rs)
/* inline */
#define ARM_IASM_MUL_COND(rd, rm, rs, cond) \
ARM_DMB (code, ARM_DMB_SY);
break;
}
- /*case OP_BIGMUL:
- ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
- ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
+ case OP_BIGMUL:
+ ARM_SMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_BIGMUL_UN:
- ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
- ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
- break;*/
+ ARM_UMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
case OP_STOREI1_MEMBASE_IMM:
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
g_assert (arm_is_imm12 (ins->inst_offset));