#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/mono-debug-debugger.h>
-#include <mono/metadata/gc-internal.h>
+#include <mono/metadata/gc-internals.h>
#include <mono/metadata/security-manager.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/security-core-clr.h>
#include "jit.h"
#include "debugger-agent.h"
#include "seq-points.h"
+#include "aot-compiler.h"
+#include "mini-llvm.h"
#define BRANCH_COST 10
#define INLINE_LENGTH_LIMIT 20
CHECK_TYPE (ins); \
/* Have to insert a widening op */ \
add_widen_op (cfg, ins, &sp [0], &sp [1]); \
- ins->dreg = alloc_dreg ((cfg), (ins)->type); \
+ ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
*sp++ = mono_decompose_opcode ((cfg), (ins)); \
} while (0)
ins->sreg1 = sp [0]->dreg; \
type_from_op (cfg, ins, sp [0], NULL); \
CHECK_TYPE (ins); \
- (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
+ (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
*sp++ = mono_decompose_opcode (cfg, ins); \
} while (0)
CHECK_TYPE (cmp); \
add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
type_from_op (cfg, ins, sp [0], sp [1]); \
- ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
+ ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
GET_BBLOCK (cfg, tblock, target); \
link_bblock (cfg, cfg->cbb, tblock); \
ins->inst_true_bb = tblock; \
}
}
if (!found) {
- newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
+ newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
for (i = 0; i < from->out_count; ++i) {
newa [i] = from->out_bb [i];
}
}
}
if (!found) {
- newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
+ newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
for (i = 0; i < to->in_count; ++i) {
newa [i] = to->in_bb [i];
}
{
MonoInst *var;
- var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
+ var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
if (var)
return;
MonoInst *
mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
{
- return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
+ return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
}
static MonoInst*
{
MonoInst *var;
- var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
+ var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
if (var)
return var;
MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
-#ifdef MONO_ARCH_NEED_GOT_VAR
- if (!cfg->compile_aot)
+ if (!cfg->compile_aot || !cfg->backend->need_got_var)
return NULL;
if (!cfg->got_var) {
cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
}
return cfg->got_var;
-#else
- return NULL;
-#endif
}
static MonoInst *
* table == 0 means this is a reference made from a wrapper.
*/
if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
- MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
+ MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
jump_info_token->image = image;
jump_info_token->token = token;
g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
}
//printf ("\n");
if (!found) {
- bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
+ bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
for (i = 0; i < count; ++i) {
/*
* try to reuse temps already allocated for this purpouse, if they occupy the same
}
}
+static MonoInst*
+emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
+{
+ MonoInst *ins;
+
+ if (cfg->compile_aot) {
+ EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
+ } else {
+ MonoJumpInfo ji;
+ gpointer target;
+
+ ji.type = patch_type;
+ ji.data.target = data;
+ target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE);
+
+ EMIT_NEW_PCONST (cfg, ins, target);
+ }
+ return ins;
+}
+
static void
mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
{
NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
MONO_ADD_INS (cfg->cbb, ins);
args [0] = ins;
- if (cfg->compile_aot)
- EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
- else
- EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
+ args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
#else
{
if (klass_inst) {
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
- } else if (cfg->compile_aot) {
- int const_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
} else {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
+ MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
+ MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
}
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
}
return;
}
-#if !NO_UNALIGNED_ACCESS
- if (SIZEOF_REGISTER == 8) {
+ if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
if (offset % 8) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
offset += 4;
size -= 8;
}
}
-#endif
while (size >= 4) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
}
}
-#if !NO_UNALIGNED_ACCESS
- if (SIZEOF_REGISTER == 8) {
+ if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
while (size >= 8) {
cur_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
size -= 8;
}
}
-#endif
while (size >= 4) {
cur_reg = alloc_preg (cfg);
}
static void
-emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
+emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
{
MonoInst *ins, *c;
if (target->byref) {
/* FIXME: check that the pointed to types match */
- if (arg->type == STACK_MP)
- return arg->klass != mono_class_from_mono_type (target);
+ if (arg->type == STACK_MP) {
+ MonoClass *base_class = mono_class_from_mono_type (target);
+ /* This is needed to handle gshared types + ldaddr */
+ simple_type = mini_get_underlying_type (&base_class->byval_arg);
+ return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
+ }
if (arg->type == STACK_PTR)
return 0;
return 1;
return 0;
case MONO_TYPE_GENERICINST:
if (mono_type_generic_inst_is_valuetype (simple_type)) {
+ MonoClass *target_class;
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type (simple_type);
+ target_class = mono_class_from_mono_type (target);
/* The second cases is needed when doing partial sharing */
- if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
+ if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
return 1;
return 0;
} else {
return -1;
}
+static int
+callvirt_to_call_reg (int opcode)
+{
+ switch (opcode) {
+ case OP_CALL_MEMBASE:
+ return OP_CALL_REG;
+ case OP_VOIDCALL_MEMBASE:
+ return OP_VOIDCALL_REG;
+ case OP_FCALL_MEMBASE:
+ return OP_FCALL_REG;
+ case OP_RCALL_MEMBASE:
+ return OP_RCALL_REG;
+ case OP_VCALL_MEMBASE:
+ return OP_VCALL_REG;
+ case OP_LCALL_MEMBASE:
+ return OP_LCALL_REG;
+ default:
+ g_assert_not_reached ();
+ }
+
+ return -1;
+}
+
/* Either METHOD or IMT_ARG needs to be set */
static void
emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
int method_reg;
if (COMPILE_LLVM (cfg)) {
- method_reg = alloc_preg (cfg);
-
if (imt_arg) {
+ method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
- } else if (cfg->compile_aot) {
- MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
} else {
- MonoInst *ins;
- MONO_INST_NEW (cfg, ins, OP_PCONST);
- ins->inst_p0 = method;
- ins->dreg = method_reg;
- MONO_ADD_INS (cfg->cbb, ins);
+ MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
+ method_reg = ins->dreg;
}
#ifdef ENABLE_LLVM
call->imt_arg_reg = method_reg;
#endif
- mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
return;
}
- method_reg = alloc_preg (cfg);
-
if (imt_arg) {
+ method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
- } else if (cfg->compile_aot) {
- MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
} else {
- MonoInst *ins;
- MONO_INST_NEW (cfg, ins, OP_PCONST);
- ins->inst_p0 = method;
- ins->dreg = method_reg;
- MONO_ADD_INS (cfg->cbb, ins);
+ MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
+ method_reg = ins->dreg;
}
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
static MonoJumpInfo *
mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
{
- MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
+ MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
ji->ip.i = ip;
ji->type = type;
inline static MonoCallInst *
mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
- MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
+ MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
{
MonoType *sig_ret;
MonoCallInst *call;
int i;
#endif
+ if (cfg->llvm_only)
+ tail = FALSE;
+
if (tail) {
emit_instrumentation_call (cfg, mono_profiler_method_leave);
MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
} else
- MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
+ MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
call->args = args;
call->signature = sig;
call->vret_var = loada;
} else if (!MONO_TYPE_IS_VOID (sig_ret))
- call->inst.dreg = alloc_dreg (cfg, call->inst.type);
+ call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (COMPILE_SOFT_FLOAT (cfg)) {
static void
set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
{
-#ifdef MONO_ARCH_RGCTX_REG
mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
cfg->uses_rgctx_reg = TRUE;
call->rgctx_reg = TRUE;
#ifdef ENABLE_LLVM
call->rgctx_arg_reg = rgctx_reg;
#endif
-#else
- NOT_IMPLEMENTED;
-#endif
}
inline static MonoInst*
#ifndef DISABLE_REMOTING
gboolean might_be_remote = FALSE;
#endif
- gboolean virtual = this_ins != NULL;
+ gboolean virtual_ = this_ins != NULL;
gboolean enable_for_aot = TRUE;
int context_used;
MonoCallInst *call;
+ MonoInst *call_target = NULL;
int rgctx_reg = 0;
gboolean need_unbox_trampoline;
if (!sig)
sig = mono_method_signature (method);
+ if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
+ MonoInst *icall_args [16];
+ MonoInst *ins;
+
+ // FIXME: Optimize this
+
+ guint32 imt_slot = mono_method_get_imt_slot (method);
+
+ icall_args [0] = this_ins;
+ EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
+ if (imt_arg) {
+ icall_args [2] = imt_arg;
+ } else {
+ EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, method);
+ icall_args [2] = ins;
+ }
+ EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
+
+ call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
+ }
+
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
#endif
+ if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
+ // FIXME: Vcall optimizations below
+ MonoInst *icall_args [16];
+ MonoInst *ins;
+
+ if (sig->generic_param_count) {
+ /*
+ * Generic virtual call, pass the concrete method as the imt argument.
+ */
+ imt_arg = emit_get_rgctx_method (cfg, context_used,
+ method, MONO_RGCTX_INFO_METHOD);
+ }
+
+ // FIXME: Optimize this
+
+ int slot = mono_method_get_vtable_index (method);
+
+ icall_args [0] = this_ins;
+ EMIT_NEW_ICONST (cfg, icall_args [1], slot);
+ if (imt_arg) {
+ icall_args [2] = imt_arg;
+ } else {
+ EMIT_NEW_PCONST (cfg, ins, NULL);
+ icall_args [2] = ins;
+ }
+ call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall, icall_args);
+ }
+
need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
- call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
+ call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
#ifndef DISABLE_REMOTING
if (might_be_remote)
call->inst.inst_left = this_ins;
call->tail_call = tail;
- if (virtual) {
+ if (virtual_) {
int vtable_reg, slot_reg, this_reg;
int offset;
this_reg = this_ins->dreg;
- if ((method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
+ if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
MonoInst *dummy_use;
MONO_EMIT_NULL_CHECK (cfg, this_reg);
MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
call->inst.opcode = callvirt_to_call (call->inst.opcode);
+ } else if (call_target) {
+ vtable_reg = alloc_preg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
+
+ call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
+ call->inst.sreg1 = call_target->dreg;
+ call->inst.flags &= !MONO_INST_HAS_METHOD;
} else {
vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
call->inst.sreg1 = slot_reg;
call->inst.inst_offset = offset;
- call->virtual = TRUE;
+ call->is_virtual = TRUE;
}
}
MonoInst *dummy_use;
int nursery_shift_bits;
size_t nursery_size;
- gboolean has_card_table_wb = FALSE;
if (!cfg->gen_write_barriers)
return;
mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
-#ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
- has_card_table_wb = TRUE;
-#endif
-
- if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
+ if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
MonoInst *wbarrier;
MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
MONO_ADD_INS (cfg->cbb, wbarrier);
} else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
int offset_reg = alloc_preg (cfg);
- int card_reg = alloc_preg (cfg);
+ int card_reg;
MonoInst *ins;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
/*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
* IMM's larger than 32bits.
*/
- if (cfg->compile_aot) {
- MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
- } else {
- MONO_INST_NEW (cfg, ins, OP_PCONST);
- ins->inst_p0 = card_table;
- ins->dreg = card_reg;
- MONO_ADD_INS (cfg->cbb, ins);
- }
+ ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
+ card_reg = ins->dreg;
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
} else if (context_used) {
iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
} else {
- if (cfg->compile_aot) {
- EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
- } else {
- EMIT_NEW_PCONST (cfg, iargs [2], klass);
+ iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
+ if (!cfg->compile_aot)
mono_class_compute_gc_descriptor (klass);
- }
}
if (size_ins)
static MonoJumpInfoRgctxEntry *
mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
{
- MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
+ MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
res->method = method;
res->in_mrgctx = in_mrgctx;
- res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
+ res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
res->data->type = patch_type;
res->data->data.target = patch_data;
res->info_type = info_type;
return res;
}
-/*
- * emit_rgctx_fetch:
- *
- * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
- * given by RGCTX.
- */
static inline MonoInst*
-emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
+emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
{
- /* Inline version, not currently used */
- // FIXME: This can be called from mono_decompose_vtype_opts (), which can't create new bblocks
+ MonoInst *args [16];
+ MonoInst *call;
+
+ // FIXME: No fastpath since the slot is not a compile time constant
+ args [0] = rgctx;
+ EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
+ if (entry->in_mrgctx)
+ call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
+ else
+ call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
+ return call;
#if 0
+ /*
+ * FIXME: This can be called during decompose, which is a problem since it creates
+ * new bblocks.
+ * Also, the fastpath doesn't work since the slot number is dynamically allocated.
+ */
int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
gboolean mrgctx;
MonoBasicBlock *is_null_bb, *end_bb;
MONO_START_BB (cfg, end_bb);
return res;
-#else
- return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
#endif
}
+/*
+ * emit_rgctx_fetch:
+ *
+ * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
+ * given by RGCTX.
+ */
+static inline MonoInst*
+emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
+{
+ if (cfg->llvm_only)
+ return emit_rgctx_fetch_inline (cfg, rgctx, entry);
+ else
+ return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
+}
+
static MonoInst*
emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoRgctxInfoType rgctx_type)
MonoJumpInfoRgctxEntry *entry;
MonoInst *rgctx;
- call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
+ call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
call_info->sig = sig;
call_info->method = cmethod;
MonoJumpInfoRgctxEntry *entry;
MonoInst *rgctx;
- info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
+ info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
info->klass = klass;
info->method = virt_method;
get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
{
MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
- MonoRuntimeGenericContextInfoTemplate *template;
+ MonoRuntimeGenericContextInfoTemplate *template_;
int i, idx;
g_assert (info);
MonoRuntimeGenericContextInfoTemplate *new_entries;
int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
- new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
+ new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
info->entries = new_entries;
}
idx = info->num_entries;
- template = &info->entries [idx];
- template->info_type = rgctx_type;
- template->data = data;
+ template_ = &info->entries [idx];
+ template_->info_type = rgctx_type;
+ template_->data = data;
info->num_entries ++;
{
MonoInst *vtable_arg;
int context_used;
- gboolean use_op_generic_class_init = FALSE;
context_used = mini_class_check_context_used (cfg, klass);
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
-#ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
- if (!COMPILE_LLVM (cfg))
- use_op_generic_class_init = TRUE;
-#endif
-
- if (use_op_generic_class_init) {
+ if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
MonoInst *ins;
/*
if (cfg->opt & MONO_OPT_SHARED) {
int class_reg = alloc_preg (cfg);
+ MonoInst *ins;
+
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
- if (cfg->compile_aot) {
- int klass_reg = alloc_preg (cfg);
- MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
- MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
- } else {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
- }
+ ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
+ MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
} else if (context_used) {
MonoInst *vtable_ins;
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
/* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
MonoInst *unbox_call;
MonoMethodSignature *unbox_sig;
- unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
+ unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
unbox_sig->ret = &klass->byval_arg;
unbox_sig->param_count = 1;
unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
if (context_used) {
MonoInst *data;
- int rgctx_info;
+ MonoRgctxInfoType rgctx_info;
MonoInst *iargs [2];
gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
/* Non-ref case */
* klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
* construct that method at JIT time, so have to do things by hand.
*/
- box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
+ box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
box_sig->ret = &mono_defaults.object_class->byval_arg;
box_sig->param_count = 1;
box_sig->params [0] = &klass->byval_arg;
EMIT_NEW_CLASSCONST (cfg, args [1], klass);
/* inline cache*/
- if (cfg->compile_aot) {
- idx = get_castclass_cache_idx (cfg);
- EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
- } else {
- EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
- }
+ idx = get_castclass_cache_idx (cfg);
+ args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
/*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
return emit_castclass_with_cache (cfg, klass, args);
}
{
- MonoInst *load, *and, *cmp, *ceq;
+ MonoInst *load, *and_, *cmp, *ceq;
int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
int dest_reg = alloc_ireg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
- EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
+ EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
if (!is_i4) {
load = mono_decompose_opcode (cfg, load);
- and = mono_decompose_opcode (cfg, and);
+ and_ = mono_decompose_opcode (cfg, and_);
cmp = mono_decompose_opcode (cfg, cmp);
ceq = mono_decompose_opcode (cfg, ceq);
}
* Returns NULL and set the cfg exception on error.
*/
static G_GNUC_UNUSED MonoInst*
-handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
+handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
{
MonoInst *ptr;
int dreg;
MonoDomain *domain;
guint8 **code_slot;
- if (virtual) {
+ if (virtual_ && !cfg->llvm_only) {
MonoMethod *invoke = mono_get_delegate_invoke (klass);
g_assert (invoke);
return NULL;
}
- obj = handle_alloc (cfg, klass, FALSE, 0);
+ obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
if (!obj)
return NULL;
+ if (cfg->llvm_only) {
+ MonoInst *args [16];
+
+ /*
+ * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
+ * the address of a gshared method. So use a JIT icall.
+ * FIXME: Optimize this.
+ */
+ args [0] = obj;
+ args [1] = target;
+ args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
+ mono_emit_jit_icall (cfg, virtual_ ? mono_init_delegate_virtual : mono_init_delegate, args);
+
+ return obj;
+ }
+
/* Inline the contents of mono_delegate_ctor */
/* Set target field */
mono_domain_lock (domain);
if (!domain_jit_info (domain)->method_code_hash)
domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
- code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
+ code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
if (!code_slot) {
- code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
+ code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
}
mono_domain_unlock (domain);
- if (cfg->compile_aot)
- EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
- else
- EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
+ code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
}
if (cfg->compile_aot) {
MonoDelegateClassMethodPair *del_tramp;
- del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
+ del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
del_tramp->klass = klass;
del_tramp->method = context_used ? NULL : method;
- del_tramp->virtual = virtual;
+ del_tramp->is_virtual = virtual_;
EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
} else {
- if (virtual)
+ if (virtual_)
trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
else
trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
}
/* Set invoke_impl field */
- if (virtual) {
+ if (virtual_) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
} else {
dreg = alloc_preg (cfg);
}
dreg = alloc_preg (cfg);
- MONO_EMIT_NEW_ICONST (cfg, dreg, virtual ? 1 : 0);
+ MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
/* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
args [4] = ins;
if (mini_is_gsharedvt_type (fsig->params [0])) {
- int addr_reg;
+ int addr_reg, deref_arg_reg;
- args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+ ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
+ deref_arg_reg = alloc_preg (cfg);
+ /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
+ EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
addr_reg = ins->dreg;
return ins;
}
-#ifndef MONO_ARCH_EMULATE_MUL_DIV
static MonoInst*
mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
{
return ins;
}
-#endif
static MonoInst*
mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
if (rank == 1)
return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
-#ifndef MONO_ARCH_EMULATE_MUL_DIV
/* emit_ldelema_2 depends on OP_LMUL */
- if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
+ if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
}
-#endif
if (mini_is_gsharedvt_variable_klass (eclass))
element_size = 0;
int index_reg = sp [1]->dreg;
int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
+ if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
+ MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
+
if (safety_checks)
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
{
uint32_t align;
+ int param_size, return_size;
param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
+ return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
- //Only allow for valuetypes
- if (!param_klass->valuetype || !return_klass->valuetype)
+ if (cfg->verbose_level > 3)
+ printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
+
+ //Don't allow mixing reference types with value types
+ if (param_klass->valuetype != return_klass->valuetype) {
+ if (cfg->verbose_level > 3)
+ printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
return FALSE;
+ }
+
+ if (!param_klass->valuetype) {
+ if (cfg->verbose_level > 3)
+ printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
+ return TRUE;
+ }
//That are blitable
if (param_klass->has_references || return_klass->has_references)
/* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
- (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
+ (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
+ if (cfg->verbose_level > 3)
+ printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
return FALSE;
+ }
if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
- return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
+ return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
+ if (cfg->verbose_level > 3)
+ printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
return FALSE;
+ }
+
+ param_size = mono_class_value_size (param_klass, &align);
+ return_size = mono_class_value_size (return_klass, &align);
+
+ //We can do it if sizes match
+ if (param_size == return_size) {
+ if (cfg->verbose_level > 3)
+ printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
+ return TRUE;
+ }
- //And have the same size
- if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
+ //No simple way to handle struct if sizes don't match
+ if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
+ if (cfg->verbose_level > 3)
+ printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
return FALSE;
- return TRUE;
+ }
+
+ /*
+ * Same reg size category.
+ * A quick note on why we don't require widening here.
+ * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
+ *
+ * Since the source value comes from a function argument, the JIT will already have
+ * the value in a VREG and performed any widening needed before (say, when loading from a field).
+ */
+ if (param_size <= 4 && return_size <= 4) {
+ if (cfg->verbose_level > 3)
+ printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
+ return TRUE;
+ }
+
+ return FALSE;
}
static MonoInst*
MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
- //Valuetypes that are semantically equivalent
+ if (mini_is_gsharedvt_variable_type (fsig->ret))
+ return NULL;
+
+ //Valuetypes that are semantically equivalent or numbers than can be widened to
if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
return args [0];
int add_reg = alloc_preg (cfg);
#if SIZEOF_REGISTER == 8
- /* The array reg is 64 bits but the index reg is only 32 */
- MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
+ if (COMPILE_LLVM (cfg)) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
+ } else {
+ /* The array reg is 64 bits but the index reg is only 32 */
+ MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
+ }
#else
index_reg = args [1]->dreg;
#endif
type_from_op (cfg, ins, NULL, NULL);
return ins;
-#if !defined(MONO_ARCH_EMULATE_MUL_DIV)
- } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
+ } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
int dreg = alloc_ireg (cfg);
int t1 = alloc_ireg (cfg);
ins->type = STACK_I4;
return ins;
-#endif
} else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
ins = NULL;
#if SIZEOF_REGISTER == 8
- if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
- if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
+ if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
+ if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
ins->dreg = mono_alloc_preg (cfg);
ins->sreg1 = args [0]->dreg;
(strcmp (cmethod->klass->name, "Volatile") == 0)) {
ins = NULL;
- if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
+ if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
guint32 opcode = 0;
gboolean is_ref = mini_type_is_reference (fsig->params [0]);
gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
}
}
- if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
+ if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
guint32 opcode = 0;
gboolean is_ref = mini_type_is_reference (fsig->params [0]);
EMIT_NEW_ICONST (cfg, ins, 0);
#endif
}
+ } else if (cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
+ (strcmp (cmethod->klass->name, "Assembly") == 0)) {
+ if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
+ /* No stack walks are current available, so implement this as an intrinsic */
+ MonoInst *assembly_ins;
+
+ EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
+ ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
+ return ins;
+ }
} else if (cmethod->klass == mono_defaults.math_class) {
/*
* There is general branchless code for Min/Max, but it does not work for
!strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
!strcmp (cmethod->klass->name, "Selector"))
) {
-#ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
- if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
+ if (cfg->backend->have_objc_get_selector &&
+ !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
(args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
cfg->compile_aot) {
MonoInst *pi;
cfg->disable_llvm = TRUE;
if (args [0]->opcode == OP_GOT_ENTRY) {
- pi = args [0]->inst_p1;
+ pi = (MonoInst *)args [0]->inst_p1;
g_assert (pi->opcode == OP_PATCH_INFO);
g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
- ji = pi->inst_p0;
+ ji = (MonoJumpInfoToken *)pi->inst_p0;
} else {
g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
- ji = args [0]->inst_p0;
+ ji = (MonoJumpInfoToken *)args [0]->inst_p0;
}
NULLIFY_INS (args [0]);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
-#endif
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
MonoInst *var = cfg->locals [local];
if (COMPILE_SOFT_FLOAT (cfg)) {
MonoInst *store;
- int reg = alloc_dreg (cfg, var->type);
+ int reg = alloc_dreg (cfg, (MonoStackType)var->type);
emit_init_rvar (cfg, reg, type);
EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
} else {
guint32 prev_cil_offset_to_bb_len;
MonoMethod *prev_current_method;
MonoGenericContext *prev_generic_context;
- gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
+ gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
}
prev_locals = cfg->locals;
- cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
+ cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
for (i = 0; i < cheader->num_locals; ++i)
cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
prev_disable_inline = cfg->disable_inline;
if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
- virtual = TRUE;
+ virtual_ = TRUE;
- costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
+ costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
ret_var_set = cfg->ret_var_set;
MonoMethod *method;
if (m->wrapper_type != MONO_WRAPPER_NONE) {
- method = mono_method_get_wrapper_data (m, token);
+ method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
if (context) {
MonoError error;
method = mono_class_inflate_generic_method_checked (method, context, &error);
MonoClass *klass;
if (method->wrapper_type != MONO_WRAPPER_NONE) {
- klass = mono_method_get_wrapper_data (method, token);
+ klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
if (context)
klass = mono_class_inflate_generic_class (klass, context);
} else {
/*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
/* for aot code we do the lookup on load */
if (aot && data_ptr)
- return GUINT_TO_POINTER (rva);
+ return (const char *)GUINT_TO_POINTER (rva);
} else {
/*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
g_assert (!aot);
gboolean supported_tail_call;
int i;
-#ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
-#else
- supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
-#endif
for (i = 0; i < fsig->param_count; ++i) {
if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
return;
}
+static void
+emit_setret (MonoCompile *cfg, MonoInst *val)
+{
+ MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
+ MonoInst *ins;
+
+ if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
+ MonoInst *ret_addr;
+
+ if (!cfg->vret_addr) {
+ EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
+ } else {
+ EMIT_NEW_RETLOADA (cfg, ret_addr);
+
+ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
+ ins->klass = mono_class_from_mono_type (ret_type);
+ }
+ } else {
+#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
+ if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
+ MonoInst *iargs [1];
+ MonoInst *conv;
+
+ iargs [0] = val;
+ conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
+ mono_arch_emit_setret (cfg, cfg->method, conv);
+ } else {
+ mono_arch_emit_setret (cfg, cfg->method, val);
+ }
+#else
+ mono_arch_emit_setret (cfg, cfg->method, val);
+#endif
+ }
+}
+
+static MonoMethodSignature*
+sig_to_rgctx_sig (MonoMethodSignature *sig)
+{
+ // FIXME: memory allocation
+ MonoMethodSignature *res;
+ int i;
+
+ res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
+ memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
+ res->param_count = sig->param_count + 1;
+ for (i = 0; i < sig->param_count; ++i)
+ res->params [i] = sig->params [i];
+ res->params [sig->param_count] = &mono_defaults.int_class->byval_arg;
+ return res;
+}
+
/*
* mono_method_to_ir:
*
* (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
* on some platforms.
*/
- if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
+ if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
init_locals = header->init_locals;
else
init_locals = TRUE;
cfg->real_offset = inline_offset;
}
- cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
+ cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
cfg->cil_offset_to_bb_len = header->code_size;
cfg->current_method = method;
if (cfg->verbose_level > 2)
printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
- param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
+ param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
if (sig->hasthis)
param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
for (n = 0; n < sig->param_count; ++n)
MonoBasicBlock *try_bb;
MonoExceptionClause *clause = &header->clauses [i];
GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
+
try_bb->real_offset = clause->try_offset;
try_bb->try_start = TRUE;
try_bb->region = ((i + 1) << 8) | clause->flags;
/* mostly like handle_stack_args (), but just sets the input args */
/* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
tblock->in_scount = 1;
- tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
+ tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
cfg->cbb = tblock;
tblock->flags |= BB_EXCEPTION_HANDLER;
tblock->real_offset = clause->data.filter_offset;
tblock->in_scount = 1;
- tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
+ tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
/* The filter block shares the exvar with the handler block */
tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
MonoInst *var, *locals_var;
int dreg;
- info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
+ info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
info->method = cfg->method;
info->count_entries = 16;
- info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
+ info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
cfg->gsharedvt_info = info;
var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
}
/* we use a spare stack slot in SWITCH and NEWOBJ and others */
- stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
+ stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
ins_flag = 0;
start_new_bblock = 0;
#endif
/* FIXME: we should really allocate this only late in the compilation process */
- f = mono_domain_alloc (cfg->domain, sizeof (float));
+ f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
CHECK_OPSIZE (5);
CHECK_STACK_OVF (1);
#endif
/* FIXME: we should really allocate this only late in the compilation process */
- d = mono_domain_alloc (cfg->domain, sizeof (double));
+ d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
CHECK_OPSIZE (9);
CHECK_STACK_OVF (1);
break;
case CEE_JMP: {
MonoCallInst *call;
+ MonoMethodSignature *fsig;
+ int i, n;
INLINE_FAILURE ("jmp");
GSHAREDVT_FAILURE (*ip);
emit_instrumentation_call (cfg, mono_profiler_method_leave);
- if (ARCH_HAVE_OP_TAIL_CALL) {
- MonoMethodSignature *fsig = mono_method_signature (cmethod);
- int i, n;
+ fsig = mono_method_signature (cmethod);
+ n = fsig->param_count + fsig->hasthis;
+ if (cfg->llvm_only) {
+ MonoInst **args;
+ args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
+ for (i = 0; i < n; ++i)
+ EMIT_NEW_ARGLOAD (cfg, args [i], i);
+ ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
+ /*
+ * The code in mono-basic-block.c treats the rest of the code as dead, but we
+ * have to emit a normal return since llvm expects it.
+ */
+ if (cfg->ret)
+ emit_setret (cfg, ins);
+ MONO_INST_NEW (cfg, ins, OP_BR);
+ ins->inst_target_bb = end_bblock;
+ MONO_ADD_INS (cfg->cbb, ins);
+ link_bblock (cfg, cfg->cbb, end_bblock);
+ ip += 5;
+ break;
+ } else if (cfg->backend->have_op_tail_call) {
/* Handle tail calls similarly to calls */
- n = fsig->param_count + fsig->hasthis;
-
DISABLE_AOT (cfg);
MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
call->method = cmethod;
call->tail_call = TRUE;
call->signature = mono_method_signature (cmethod);
- call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
+ call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
call->inst.inst_p0 = cmethod;
for (i = 0; i < n; ++i)
EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
CHECK_STACK (n);
- //g_assert (!virtual || fsig->hasthis);
+ //g_assert (!virtual_ || fsig->hasthis);
sp -= n;
INLINE_FAILURE ("indirect call");
if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
- int info_type;
+ MonoJumpInfoType info_type;
gpointer info_data;
/*
* with the contents of the aotconst as the patch info.
*/
if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
- info_type = addr->inst_c1;
+ info_type = (MonoJumpInfoType)addr->inst_c1;
info_data = addr->inst_p0;
} else {
- info_type = addr->inst_right->inst_c1;
+ info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
info_data = addr->inst_right->inst_left;
}
MonoInst *addr = NULL;
MonoMethodSignature *fsig = NULL;
int array_rank = 0;
- int virtual = *ip == CEE_CALLVIRT;
+ int virtual_ = *ip == CEE_CALLVIRT;
gboolean pass_imt_from_rgctx = FALSE;
MonoInst *imt_arg = NULL;
MonoInst *keep_this_alive = NULL;
if (mono_security_core_clr_enabled ())
ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
- if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
+ if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
/* MS.NET seems to silently convert this to a callvirt */
- virtual = 1;
+ virtual_ = 1;
{
/*
*/
const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
- if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
- virtual = 1;
+ if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
+ virtual_ = 1;
}
if (!cmethod->klass->inited)
CHECK_STACK (n);
- //g_assert (!virtual || fsig->hasthis);
+ //g_assert (!virtual_ || fsig->hasthis);
sp -= n;
+ /*
+ * We have the `constrained.' prefix opcode.
+ */
if (constrained_class) {
if (mini_is_gsharedvt_klass (constrained_class)) {
if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
}
}
- /*
- * We have the `constrained.' prefix opcode.
- */
if (constrained_partial_call) {
gboolean need_box = TRUE;
NEW_BBLOCK (cfg, end_bb);
box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
/* Non-ref case */
CHECK_CFG_EXCEPTION;
}
}
- virtual = 0;
+ virtual_ = 0;
}
constrained_class = NULL;
}
if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
MONO_METHOD_IS_FINAL (cmethod)) &&
!mono_class_is_marshalbyref (cmethod->klass)) {
- if (virtual)
+ if (virtual_)
check_this = TRUE;
- virtual = 0;
+ virtual_ = 0;
}
}
MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
/* Calling virtual generic methods */
- if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
+ if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
!(MONO_METHOD_IS_FINAL (cmethod) &&
cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
fsig->generic_param_count &&
- !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))) {
+ !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
+ !cfg->llvm_only) {
MonoInst *this_temp, *this_arg_temp, *store;
MonoInst *iargs [4];
- gboolean use_imt = FALSE;
g_assert (fsig->is_inflated);
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE (*ip);
-#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
- if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
- use_imt = TRUE;
-#endif
-
- if (use_imt) {
+ if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
g_assert (!imt_arg);
if (!context_used)
g_assert (cmethod->is_inflated);
/* Inlining */
if ((cfg->opt & MONO_OPT_INLINE) &&
- (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
+ (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
mono_method_check_inlining (cfg, cmethod)) {
int costs;
gboolean always = FALSE;
!(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
MonoRgctxInfoType info_type;
- if (virtual) {
+ if (virtual_) {
//if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
//GSHAREDVT_FAILURE (*ip);
// disable for possible remoting calls
if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
keep_this_alive = sp [0];
- if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
+ if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
else
info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
(!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
!mono_class_generic_sharing_enabled (cmethod->klass)) &&
- (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
+ (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
INLINE_FAILURE ("gshared");
goto call_end;
}
- ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
+ ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
if (ins)
goto call_end;
//printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
- if (ARCH_HAVE_OP_TAIL_CALL) {
+ if (cfg->backend->have_op_tail_call) {
/* Handle tail calls similarly to normal calls */
tail_call = TRUE;
} else {
cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
}
+ /*
+ * Interface calls in llvm-only mode are complicated becase the callee might need an rgctx arg,
+ * (i.e. its a vtype method), and there is no way to for the caller to know this at compile time.
+ * So we make resolve_iface_call return the rgctx, and do two calls with different signatures
+ * based on whenever there is an rgctx or not.
+ */
+ if (cfg->llvm_only && virtual_ && cmethod && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
+ MonoInst *args_buf [16], *icall_args [16];
+ MonoInst **args;
+ MonoBasicBlock *rgctx_bb, *end_bb;
+ MonoInst *call1, *call2, *call_target;
+ MonoMethodSignature *rgctx_sig;
+ int rgctx_reg, tmp_reg;
+
+ MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
+
+ NEW_BBLOCK (cfg, rgctx_bb);
+ NEW_BBLOCK (cfg, end_bb);
+
+ // FIXME: Optimize this
+
+ guint32 imt_slot = mono_method_get_imt_slot (cmethod);
+
+ icall_args [0] = sp [0];
+ EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
+ if (imt_arg) {
+ icall_args [2] = imt_arg;
+ } else {
+ EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
+ icall_args [2] = ins;
+ }
+
+ rgctx_reg = alloc_preg (cfg);
+ MONO_EMIT_NEW_PCONST (cfg, rgctx_reg, NULL);
+ EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], rgctx_reg, &mono_defaults.int_class->byval_arg);
+ //EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
+
+ call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
+
+ // FIXME: Only do this if needed (generic calls)
+
+ // Check whenever to pass an rgctx
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, rgctx_bb);
+ /* Non rgctx case */
+ call1 = mono_emit_calli (cfg, fsig, sp, call_target, NULL, vtable_arg);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+ /* Rgctx case */
+ MONO_START_BB (cfg, rgctx_bb);
+ /* Make a call with an rgctx */
+ if (fsig->param_count + 2 < 16)
+ args = args_buf;
+ else
+ args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
+ args [0] = sp [0];
+ for (i = 0; i < fsig->param_count; ++i)
+ args [i + 1] = sp [i + 1];
+ tmp_reg = alloc_preg (cfg);
+ EMIT_NEW_UNALU (cfg, args [fsig->param_count + 1], OP_MOVE, tmp_reg, rgctx_reg);
+ rgctx_sig = sig_to_rgctx_sig (fsig);
+ call2 = mono_emit_calli (cfg, rgctx_sig, args, call_target, NULL, NULL);
+ call2->dreg = call1->dreg;
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+ /* End */
+ MONO_START_BB (cfg, end_bb);
+ ins = call1;
+ goto call_end;
+ }
+
/* Common call */
INLINE_FAILURE ("call");
- ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
+ ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
imt_arg, vtable_arg);
- if (tail_call) {
+ if (tail_call && !cfg->llvm_only) {
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
} else {
emit_instrumentation_call (cfg, mono_profiler_method_leave);
- if (cfg->lmf_var && cfg->cbb->in_count)
+ if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
emit_pop_lmf (cfg);
if (cfg->ret) {
if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
UNVERIFIED;
- if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
- MonoInst *ret_addr;
-
- if (!cfg->vret_addr) {
- MonoInst *ins;
-
- EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
- } else {
- EMIT_NEW_RETLOADA (cfg, ret_addr);
-
- EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
- ins->klass = mono_class_from_mono_type (ret_type);
- }
- } else {
-#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
- if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
- MonoInst *iargs [1];
- MonoInst *conv;
-
- iargs [0] = *sp;
- conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
- mono_arch_emit_setret (cfg, method, conv);
- } else {
- mono_arch_emit_setret (cfg, method, *sp);
- }
-#else
- mono_arch_emit_setret (cfg, method, *sp);
-#endif
- }
+ emit_setret (cfg, *sp);
}
}
if (sp != stack_start)
MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
type_from_op (cfg, ins, sp [0], NULL);
MONO_ADD_INS (cfg->cbb, ins);
- ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
+ ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
GET_BBLOCK (cfg, tblock, target);
ins->inst_true_bb = tblock;
GET_BBLOCK (cfg, tblock, ip);
GET_BBLOCK (cfg, default_bblock, target);
default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
- targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
+ targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
for (i = 0; i < n; ++i) {
GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
targets [i] = tblock;
for (i = 0; i < n; ++i)
link_bblock (cfg, cfg->cbb, targets [i]);
- table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
+ table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
table->table = targets;
table->table_size = n;
ins->sreg1 = src1->dreg;
ins->inst_p0 = table;
ins->inst_many_bb = targets;
- ins->klass = GUINT_TO_POINTER (n);
+ ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
MONO_ADD_INS (cfg->cbb, ins);
} else {
if (sizeof (gpointer) == 8)
ins->sreg2 = sp [1]->dreg;
type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
- ins->dreg = alloc_dreg ((cfg), (ins)->type);
+ ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
/* Use the immediate opcodes if possible */
if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
add_widen_op (cfg, ins, &sp [0], &sp [1]);
- ins->dreg = alloc_dreg ((cfg), (ins)->type);
+ ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
/* FIXME: Pass opcode to is_inst_imm */
}
else if (method->wrapper_type != MONO_WRAPPER_NONE) {
MonoInst *iargs [1];
- char *str = mono_method_get_wrapper_data (method, n);
+ char *str = (char *)mono_method_get_wrapper_data (method, n);
if (cfg->compile_aot)
EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
EMIT_NEW_CLASSCONST (cfg, args [1], klass);
/* inline cache*/
- if (cfg->compile_aot) {
- idx = get_castclass_cache_idx (cfg);
- EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
- } else {
- EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
- }
+ idx = get_castclass_cache_idx (cfg);
+ args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
*sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
ip += 5;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
if (method->wrapper_type != MONO_WRAPPER_NONE) {
- field = mono_method_get_wrapper_data (method, token);
+ field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
klass = field->parent;
}
else {
context_used = mini_class_check_context_used (cfg, klass);
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ /* The value is offset by 1 */
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
/* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
MonoInst *offset_ins;
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ /* The value is offset by 1 */
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
} else {
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
MonoInst *offset_ins;
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ /* The value is offset by 1 */
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
MonoInst *offset_ins;
offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
+ /* The value is offset by 1 */
+ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
} else if (field->offset == 0) {
int index_reg = sp [1]->dreg;
int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
+ if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
+ MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
+
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
} else {
CHECK_STACK (1);
--sp;
- MONO_INST_NEW (cfg, ins, OP_CKFINITE);
- ins->sreg1 = sp [0]->dreg;
- ins->dreg = alloc_freg (cfg);
- ins->type = STACK_R8;
- MONO_ADD_INS (cfg->cbb, ins);
+ if (cfg->llvm_only) {
+ MonoInst *iargs [1];
- *sp++ = mono_decompose_opcode (cfg, ins);
+ iargs [0] = sp [0];
+ *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_CKFINITE);
+ ins->sreg1 = sp [0]->dreg;
+ ins->dreg = alloc_freg (cfg);
+ ins->type = STACK_R8;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ *sp++ = mono_decompose_opcode (cfg, ins);
+ }
++ip;
break;
if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
handle = mono_method_get_wrapper_data (method, n);
- handle_class = mono_method_get_wrapper_data (method, n + 1);
+ handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
if (handle_class == mono_defaults.typehandle_class)
handle = &((MonoClass*)handle)->byval_arg;
}
typeof(Gen<>). */
context_used = 0;
} else if (handle_class == mono_defaults.typehandle_class) {
- context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
+ context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
} else if (handle_class == mono_defaults.fieldhandle_class)
context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
else if (handle_class == mono_defaults.methodhandle_class)
- context_used = mini_method_check_context_used (cfg, handle);
+ context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
else
g_assert_not_reached ();
}
(cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
(cmethod->klass == mono_defaults.systemtype_class) &&
(strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
- MonoClass *tclass = mono_class_from_mono_type (handle);
+ MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
mono_class_init (tclass);
if (context_used) {
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
}
} else {
- EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
+ EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, (MonoType *)handle));
}
ins->type = STACK_OBJ;
ins->klass = cmethod->klass;
if (context_used) {
if (handle_class == mono_defaults.typehandle_class) {
ins = emit_get_rgctx_klass (cfg, context_used,
- mono_class_from_mono_type (handle),
+ mono_class_from_mono_type ((MonoType *)handle),
MONO_RGCTX_INFO_TYPE);
} else if (handle_class == mono_defaults.methodhandle_class) {
ins = emit_get_rgctx_method (cfg, context_used,
- handle, MONO_RGCTX_INFO_METHOD);
+ (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
} else if (handle_class == mono_defaults.fieldhandle_class) {
ins = emit_get_rgctx_field (cfg, context_used,
- handle, MONO_RGCTX_INFO_CLASS_FIELD);
+ (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
} else {
g_assert_not_reached ();
}
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
+ /* This can complicate code generation for llvm since the return value might not be defined */
+ if (COMPILE_LLVM (cfg))
+ INLINE_FAILURE ("throw");
break;
case CEE_ENDFINALLY:
/* mono_save_seq_point_info () depends on this */
}
}
+#ifdef ENABLE_LLVM
+ cfg->cbb->try_end = (intptr_t)(ip - header->code);
+#endif
+
if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
GList *tmp;
MonoExceptionClause *clause;
for (tmp = handlers; tmp; tmp = tmp->next) {
- clause = tmp->data;
+ clause = (MonoExceptionClause *)tmp->data;
tblock = cfg->cil_offset_to_bb [clause->handler_offset];
g_assert (tblock);
link_bblock (cfg, cfg->cbb, tblock);
GET_BBLOCK (cfg, tblock, target);
link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
+
start_new_bblock = 1;
if (*ip == CEE_LEAVE)
break;
}
- case CEE_MONO_LDPTR_CARD_TABLE: {
- int shift_bits;
- gpointer card_mask;
- CHECK_STACK_OVF (1);
-
- if (cfg->compile_aot)
- EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
- else
- EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
-
- *sp++ = ins;
- ip += 2;
- inline_costs += 10 * num_calls++;
- break;
- }
- case CEE_MONO_LDPTR_NURSERY_START: {
- int shift_bits;
- size_t size;
- CHECK_STACK_OVF (1);
-
- if (cfg->compile_aot)
- EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
- else
- EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
-
- *sp++ = ins;
- ip += 2;
- inline_costs += 10 * num_calls++;
- break;
- }
+ case CEE_MONO_LDPTR_CARD_TABLE:
+ case CEE_MONO_LDPTR_NURSERY_START:
+ case CEE_MONO_LDPTR_NURSERY_BITS:
case CEE_MONO_LDPTR_INT_REQ_FLAG: {
CHECK_STACK_OVF (1);
- if (cfg->compile_aot)
- EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
- else
- EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
+ switch (ip [1]) {
+ case CEE_MONO_LDPTR_CARD_TABLE:
+ ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
+ break;
+ case CEE_MONO_LDPTR_NURSERY_START:
+ ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
+ break;
+ case CEE_MONO_LDPTR_NURSERY_BITS:
+ ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
+ break;
+ case CEE_MONO_LDPTR_INT_REQ_FLAG:
+ ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
+ break;
+ }
*sp++ = ins;
ip += 2;
CHECK_OPSIZE (6);
token = read32 (ip + 2);
- cmethod = mono_method_get_wrapper_data (method, token);
+ cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
if (cfg->compile_aot) {
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
CHECK_OPSIZE (6);
--sp;
token = read32 (ip + 2);
- klass = mono_method_get_wrapper_data (method, token);
+ klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
g_assert (klass->valuetype);
mono_class_init (klass);
ip += 2;
break;
case CEE_MONO_TLS: {
- int key;
+ MonoTlsKey key;
CHECK_STACK_OVF (1);
CHECK_OPSIZE (6);
- key = (gint32)read32 (ip + 2);
+ key = (MonoTlsKey)read32 (ip + 2);
g_assert (key < TLS_KEY_NUM);
ins = mono_create_tls_get (cfg, key);
ins->sreg2 = sp [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
- cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
+ cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
ip += 2;
inline_costs += 10 * num_calls++;
ad_ins = mono_get_domain_intrinsic (cfg);
jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
- if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
+ if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
NEW_BBLOCK (cfg, next_bb);
NEW_BBLOCK (cfg, call_bb);
cmp->opcode = OP_ICOMPARE;
MONO_ADD_INS (cfg->cbb, cmp);
ins->type = STACK_I4;
- ins->dreg = alloc_dreg (cfg, ins->type);
+ ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
type_from_op (cfg, ins, arg1, arg2);
if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
/* FIXME: SGEN support */
- if (invoke_context_used == 0) {
+ if (invoke_context_used == 0 || cfg->llvm_only) {
ip += 6;
if (cfg->verbose_level > 3)
g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
}
}
- if (cfg->lmf_var && cfg->method == method) {
+ if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
cfg->cbb = init_localsbb;
emit_push_lmf (cfg);
}
}
static inline int
-op_to_op_src1_membase (int load_opcode, int opcode)
+op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
{
#ifdef TARGET_X86
/* FIXME: This has sign extension issues */
switch (opcode) {
case OP_X86_PUSH:
-#ifdef __mono_ilp32__
- if (load_opcode == OP_LOADI8_MEMBASE)
-#else
- if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
-#endif
+ if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
return OP_X86_PUSH_MEMBASE;
break;
/* FIXME: This only works for 32 bit immediates
break;
case OP_COMPARE:
case OP_LCOMPARE:
-#ifdef __mono_ilp32__
- if (load_opcode == OP_LOAD_MEMBASE)
+ if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
return OP_AMD64_ICOMPARE_MEMBASE_REG;
- if (load_opcode == OP_LOADI8_MEMBASE)
-#else
- if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
-#endif
+ if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
return OP_AMD64_COMPARE_MEMBASE_REG;
break;
case OP_ICOMPARE:
}
static inline int
-op_to_op_src2_membase (int load_opcode, int opcode)
+op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
{
#ifdef TARGET_X86
if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
#endif
#ifdef TARGET_AMD64
-#ifdef __mono_ilp32__
- if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
-#else
- if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
-#endif
+ if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
switch (opcode) {
case OP_ICOMPARE:
return OP_AMD64_ICOMPARE_REG_MEMBASE;
case OP_IXOR:
return OP_X86_XOR_REG_MEMBASE;
}
-#ifdef __mono_ilp32__
- } else if (load_opcode == OP_LOADI8_MEMBASE) {
-#else
- } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
-#endif
+ } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
switch (opcode) {
case OP_COMPARE:
case OP_LCOMPARE:
MonoBasicBlock *bb;
int i, pos;
- vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
+ vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (cfg->uses_simd_intrinsics)
guint32 *lvregs;
guint32 i, lvregs_len;
gboolean dest_has_lvreg = FALSE;
- guint32 stacktypes [128];
+ MonoStackType stacktypes [128];
MonoInst **live_range_start, **live_range_end;
MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
int *gsharedvt_vreg_to_idx = NULL;
}
if (cfg->gsharedvt) {
- gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
+ gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *ins = cfg->varinfo [i];
* the variable again.
*/
orig_next_vreg = cfg->next_vreg;
- vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
- lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
+ vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
+ lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
lvregs_len = 0;
/*
* when variable addresses are known.
*/
if (ins->opcode == OP_LDADDR) {
- MonoInst *var = ins->inst_p0;
+ MonoInst *var = (MonoInst *)ins->inst_p0;
if (var->opcode == OP_VTARG_ADDR) {
/* Happens on SPARC/S390 where vtypes are passed by reference */
* sregs could use it. So set a flag, and do it after
* the sregs.
*/
- if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
+ if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
dest_has_lvreg = TRUE;
}
}
}
/* Try to fuse the load into the instruction */
- if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
- ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
+ if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
+ ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
sregs [0] = var->inst_basereg;
//mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
- } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
- ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
+ } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
+ ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
sregs [1] = var->inst_basereg;
//mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
sreg = alloc_dreg (cfg, stacktypes [regtype]);
- if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
+ if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
if (var->dreg == prev_dreg) {
/*
* sreg refers to the value loaded by the load
}
}
-#ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
/*
* Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
* by storing the current native offset into MonoMethodVar->live_range_start/end.
*/
- if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
+ if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
for (i = 0; i < cfg->num_varinfo; ++i) {
int vreg = MONO_VARINFO (cfg, i)->vreg;
MonoInst *ins;
}
}
}
-#endif
if (cfg->gsharedvt_locals_var_ins) {
/* Nullify if unused */