static int stind_to_store_membase (int opcode);
int mono_op_to_op_imm (int opcode);
-int mono_op_to_op_imm_noemul (MonoCompile *cfg, int opcode);
+int mono_op_to_op_imm_noemul (int opcode);
MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
- if (!cfg->need_got_var)
+#ifdef MONO_ARCH_NEED_GOT_VAR
+ if (!cfg->compile_aot)
return NULL;
if (!cfg->got_var) {
cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
}
return cfg->got_var;
+#else
+ return NULL;
+#endif
}
static MonoInst *
#ifdef ENABLE_LLVM
call->imt_arg_reg = method_reg;
#endif
- mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
return;
}
MonoInst *dummy_use;
int nursery_shift_bits;
size_t nursery_size;
+ gboolean has_card_table_wb = FALSE;
if (!cfg->gen_write_barriers)
return;
mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
- if (cfg->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
+#ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
+ has_card_table_wb = TRUE;
+#endif
+
+ if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
MonoInst *wbarrier;
MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
{
MonoInst *vtable_arg;
int context_used;
+ gboolean use_op_generic_class_init = FALSE;
context_used = mini_class_check_context_used (cfg, klass);
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
- if (!COMPILE_LLVM (cfg) && cfg->have_op_generic_class_init) {
+#ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
+ if (!COMPILE_LLVM (cfg))
+ use_op_generic_class_init = TRUE;
+#endif
+
+ if (use_op_generic_class_init) {
MonoInst *ins;
/*
return ins;
}
-static G_GNUC_UNUSED MonoInst*
+#ifndef MONO_ARCH_EMULATE_MUL_DIV
+static MonoInst*
mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
{
int bounds_reg = alloc_preg (cfg);
return ins;
}
+#endif
static MonoInst*
mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
if (rank == 1)
return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
+#ifndef MONO_ARCH_EMULATE_MUL_DIV
/* emit_ldelema_2 depends on OP_LMUL */
- if (!cfg->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
+ if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
}
+#endif
if (mini_is_gsharedvt_variable_klass (eclass))
element_size = 0;
type_from_op (cfg, ins, NULL, NULL);
return ins;
- } else if (!cfg->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
+#if !defined(MONO_ARCH_EMULATE_MUL_DIV)
+ } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
int dreg = alloc_ireg (cfg);
int t1 = alloc_ireg (cfg);
ins->type = STACK_I4;
return ins;
+#endif
} else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
!strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
!strcmp (cmethod->klass->name, "Selector"))
) {
- if (cfg->have_objc_get_selector &&
- !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
+#ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
+ if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
(args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
cfg->compile_aot) {
MonoInst *pi;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
+#endif
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
gboolean supported_tail_call;
int i;
+#ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
+#else
+ supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
+#endif
for (i = 0; i < fsig->param_count; ++i) {
if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
!(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))) {
MonoInst *this_temp, *this_arg_temp, *store;
MonoInst *iargs [4];
+ gboolean use_imt = FALSE;
g_assert (fsig->is_inflated);
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE (*ip);
- if (cfg->have_generalized_imt_thunk && cfg->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
+#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
+ if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
+ use_imt = TRUE;
+#endif
+
+ if (use_imt) {
g_assert (!imt_arg);
if (!context_used)
g_assert (cmethod->is_inflated);
/* Use the immediate opcodes if possible */
if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
- int imm_opcode = mono_op_to_op_imm_noemul (cfg, ins->opcode);
+ int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
int imm_opcode;
- imm_opcode = mono_op_to_op_imm_noemul (cfg, ins->opcode);
- if (cfg->emulate_mul_div || cfg->emulate_div) {
- /* Keep emulated opcodes which are optimized away later */
- if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
- imm_opcode = mono_op_to_op_imm (ins->opcode);
- }
+ imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
+#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
+ /* Keep emulated opcodes which are optimized away later */
+ if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
+ imm_opcode = mono_op_to_op_imm (ins->opcode);
}
+#endif
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
if (sp [1]->opcode == OP_I8CONST) {
ad_ins = mono_get_domain_intrinsic (cfg);
jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
- if (cfg->have_tls_get && ad_ins && jit_tls_ins) {
+ if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
NEW_BBLOCK (cfg, next_bb);
NEW_BBLOCK (cfg, call_bb);
}
int
-mono_op_to_op_imm_noemul (MonoCompile *cfg, int opcode)
+mono_op_to_op_imm_noemul (int opcode)
{
switch (opcode) {
+#if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
case OP_LSHR:
case OP_LSHL:
case OP_LSHR_UN:
- if (SIZEOF_REGISTER == 4 && cfg->emulate_long_shift_opts)
- return -1;
- break;
+ return -1;
+#endif
+#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
case OP_IDIV:
case OP_IDIV_UN:
case OP_IREM:
case OP_IREM_UN:
- if (cfg->emulate_mul_div || cfg->emulate_div)
- return -1;
- break;
+ return -1;
+#endif
+#if defined(MONO_ARCH_EMULATE_MUL_DIV)
case OP_IMUL:
- if (cfg->emulate_mul_div)
- return -1;
- break;
+ return -1;
+#endif
default:
- break;
+ return mono_op_to_op_imm (opcode);
}
- return mono_op_to_op_imm (opcode);
}
/**
* sregs could use it. So set a flag, and do it after
* the sregs.
*/
- if ((!cfg->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
+ if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
dest_has_lvreg = TRUE;
}
}
sreg = alloc_dreg (cfg, stacktypes [regtype]);
- if ((!cfg->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
+ if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
if (var->dreg == prev_dreg) {
/*
* sreg refers to the value loaded by the load
}
}
+#ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
/*
* Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
* by storing the current native offset into MonoMethodVar->live_range_start/end.
*/
- if (cfg->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
+ if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
for (i = 0; i < cfg->num_varinfo; ++i) {
int vreg = MONO_VARINFO (cfg, i)->vreg;
MonoInst *ins;
}
}
}
+#endif
if (cfg->gsharedvt_locals_var_ins) {
/* Nullify if unused */