case OP_DIV_IMM:
case OP_REM_IMM:
case OP_IDIV_IMM:
- case OP_IREM_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
mono_decompose_op_imm (cfg, bb, ins);
break;
+ case OP_IREM_IMM:
+ /* Keep the opcode if we can implement it efficiently */
+ if (!((ins->inst_imm > 0) && (mono_is_power_of_two (ins->inst_imm) != -1)))
+ mono_decompose_op_imm (cfg, bb, ins);
+ break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
if (!amd64_is_imm32 (ins->inst_imm)) {
}
/*
- * emit_tls_get:
+ * mono_amd64_emit_tls_get:
* @code: buffer to store code to
* @dreg: hard register where to place the result
* @tls_offset: offset info
*
- * emit_tls_get emits in @code the native code that puts in the dreg register
- * the item in the thread local storage identified by tls_offset.
+ * mono_amd64_emit_tls_get emits in @code the native code that puts in
+ * the dreg register the item in the thread local storage identified
+ * by tls_offset.
*
* Returns: a pointer to the end of the stored code
*/
-static guint8*
-emit_tls_get (guint8* code, int dreg, int tls_offset)
+guint8*
+mono_amd64_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
#ifdef PLATFORM_WIN32
g_assert (tls_offset < 64);
amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
}
break;
+ case OP_IREM_IMM: {
+ int power = mono_is_power_of_two (ins->inst_imm);
+
+ g_assert (ins->sreg1 == X86_EAX);
+ g_assert (ins->dreg == X86_EAX);
+ g_assert (power >= 0);
+
+ if (power == 0) {
+ amd64_mov_reg_imm (code, ins->dreg, 0);
+ break;
+ }
+
+ /* Based on gcc code */
+
+ /* Add compensation for negative dividents */
+ amd64_mov_reg_reg_size (code, AMD64_RDX, AMD64_RAX, 4);
+ if (power > 1)
+ amd64_shift_reg_imm_size (code, X86_SAR, AMD64_RDX, 31, 4);
+ amd64_shift_reg_imm_size (code, X86_SHR, AMD64_RDX, 32 - power, 4);
+ amd64_alu_reg_reg_size (code, X86_ADD, AMD64_RAX, AMD64_RDX, 4);
+ /* Compute remainder */
+ amd64_alu_reg_imm_size (code, X86_AND, AMD64_RAX, (1 << power) - 1, 4);
+ /* Remove compensation */
+ amd64_alu_reg_reg_size (code, X86_SUB, AMD64_RAX, AMD64_RDX, 4);
+ break;
+ }
case OP_LMUL_OVF:
amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
break;
case OP_TLS_GET: {
- code = emit_tls_get (code, ins->dreg, ins->inst_offset);
+ code = mono_amd64_emit_tls_get (code, ins->dreg, ins->inst_offset);
break;
}
case OP_MEMORY_BARRIER: {
if (appdomain_tls_offset != -1 && lmf_tls_offset != -1) {
guint8 *buf, *no_domain_branch;
- code = emit_tls_get (code, AMD64_RAX, appdomain_tls_offset);
+ code = mono_amd64_emit_tls_get (code, AMD64_RAX, appdomain_tls_offset);
if ((domain >> 32) == 0)
amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4);
else
amd64_alu_reg_reg (code, X86_CMP, AMD64_RAX, AMD64_ARG_REG1);
no_domain_branch = code;
x86_branch8 (code, X86_CC_NE, 0, 0);
- code = emit_tls_get ( code, AMD64_RAX, lmf_addr_tls_offset);
+ code = mono_amd64_emit_tls_get ( code, AMD64_RAX, lmf_addr_tls_offset);
amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
buf = code;
x86_branch8 (code, X86_CC_NE, 0, 0);
} else {
if (lmf_addr_tls_offset != -1) {
/* Load lmf quicky using the FS register */
- code = emit_tls_get (code, AMD64_RAX, lmf_addr_tls_offset);
+ code = mono_amd64_emit_tls_get (code, AMD64_RAX, lmf_addr_tls_offset);
#ifdef PLATFORM_WIN32
/* The TLS key actually contains a pointer to the MonoJitTlsData structure */
/* FIXME: Add a separate key for LMF to avoid this */
/* check if we need to restore protection of the stack after a stack overflow */
if (mono_get_jit_tls_offset () != -1) {
guint8 *patch;
- code = emit_tls_get (code, X86_ECX, mono_get_jit_tls_offset ());
+ code = mono_amd64_emit_tls_get (code, X86_ECX, mono_get_jit_tls_offset ());
/* we load the value in a separate instruction: this mechanism may be
* used later as a safer way to do thread interruption
*/