#ifdef MINI_OP
#undef MINI_OP
#endif
-#define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
+#ifdef MINI_OP3
+#undef MINI_OP3
+#endif
+#define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
+#define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
#define NONE ' '
#define IREG 'i'
#define FREG 'f'
#include "mini-ops.h"
};
#undef MINI_OP
+#undef MINI_OP3
+
+#define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
+#define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
+/*
+ * This should contain the index of the last sreg + 1. This is not the same
+ * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
+ */
+const gint8 ins_sreg_counts[] = {
+#include "mini-ops.h"
+};
+#undef MINI_OP
+#undef MINI_OP3
extern GHashTable *jit_icall_name_hash;
(vi)->idx = (id); \
} while (0)
+void
+mono_inst_set_src_registers (MonoInst *ins, int *regs)
+{
+ ins->sreg1 = regs [0];
+ ins->sreg2 = regs [1];
+ ins->sreg3 = regs [2];
+}
+
guint32
mono_alloc_ireg (MonoCompile *cfg)
{
} \
} while (0)
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
#define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
(dest)->dreg = alloc_preg ((cfg)); \
ADD_WIDEN_OP (ins, sp [0], sp [1]); \
ins->dreg = alloc_dreg ((cfg), (ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
- *sp++ = ins; \
- mono_decompose_opcode ((cfg), (ins)); \
+ *sp++ = mono_decompose_opcode ((cfg), (ins)); \
} while (0)
#define ADD_UNOP(op) do { \
CHECK_TYPE (ins); \
(ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
- *sp++ = ins; \
- mono_decompose_opcode (cfg, ins); \
+ *sp++ = mono_decompose_opcode (cfg, ins); \
} while (0)
#define ADD_BINCOND(next_block) do { \
MonoExceptionClause *clause;
int i;
- /* first search for handlers and filters */
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
else
return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
}
- }
- /* search the try blocks */
- for (i = 0; i < header->num_clauses; ++i) {
- clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, offset))
return ((i + 1) << 8) | clause->flags;
}
* The got_var contains the address of the Global Offset Table when AOT
* compiling.
*/
-inline static MonoInst *
+MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
#ifdef MONO_ARCH_NEED_GOT_VAR
switch (size) {
case 1:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
- break;
+ return;
case 2:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
- break;
+ return;
case 4:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
- break;
+ return;
#if SIZEOF_REGISTER == 8
case 8:
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
+ return;
#endif
}
- return;
}
val_reg = alloc_preg (cfg);
inline static MonoCallInst *
mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
- MonoInst **args, int calli, int virtual)
+ MonoInst **args, int calli, int virtual, int tail)
{
MonoCallInst *call;
#ifdef MONO_ARCH_SOFT_FLOAT
int i;
#endif
- MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
+ if (tail)
+ MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
+ else
+ MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
call->args = args;
call->signature = sig;
type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
+ if (tail) {
+ if (MONO_TYPE_ISSTRUCT (sig->ret)) {
+ call->vret_var = cfg->vret_addr;
+ //g_assert_not_reached ();
+ }
+ } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
MonoInst *loada;
}
#endif
+#ifdef ENABLE_LLVM
+ if (COMPILE_LLVM (cfg))
+ mono_llvm_emit_call (cfg, call);
+ else
+ mono_arch_emit_call (cfg, call);
+#else
mono_arch_emit_call (cfg, call);
+#endif
cfg->param_area = MAX (cfg->param_area, call->stack_usage);
cfg->flags |= MONO_CFG_HAS_CALLS;
inline static MonoInst*
mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
{
- MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
+ MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
call->inst.sreg1 = addr->dreg;
if (rgctx_arg) {
mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
cfg->uses_rgctx_reg = TRUE;
+ call->rgctx_reg = TRUE;
}
return (MonoInst*)call;
#else
#endif
}
+static MonoInst*
+emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
+
static MonoInst*
mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
MonoInst **args, MonoInst *this, MonoInst *imt_arg)
{
+ gboolean might_be_remote;
gboolean virtual = this != NULL;
gboolean enable_for_aot = TRUE;
+ int context_used;
MonoCallInst *call;
if (method->string_ctor) {
sig = ctor_sig;
}
- call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
+ might_be_remote = this && sig->hasthis &&
+ (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
+ !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
+
+ context_used = mono_method_check_context_used (method);
+ if (might_be_remote && context_used) {
+ MonoInst *addr;
- if (this && sig->hasthis &&
- (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
- !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
+ g_assert (cfg->generic_sharing_context);
+
+ addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
+
+ return mono_emit_calli (cfg, sig, args, addr);
+ }
+
+ call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
+
+ if (might_be_remote)
call->method = mono_marshal_get_remoting_invoke_with_check (method);
- } else {
+ else
call->method = method;
- }
call->inst.flags |= MONO_INST_HAS_METHOD;
call->inst.inst_left = this;
#ifdef MONO_ARCH_RGCTX_REG
mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
cfg->uses_rgctx_reg = TRUE;
+ call->rgctx_reg = TRUE;
#else
NOT_IMPLEMENTED;
#endif
g_assert (sig);
- call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
+ call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
call->fptr = func;
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
((MonoCallInst*)ins)->fptr_is_patch = TRUE;
return ins;
}
+
+static MonoInst*
+mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
+{
+ if (!MONO_TYPE_IS_VOID (fsig->ret)) {
+ if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
+ int widen_op = -1;
+
+ /*
+ * Native code might return non register sized integers
+ * without initializing the upper bits.
+ */
+ switch (mono_type_to_load_membase (cfg, fsig->ret)) {
+ case OP_LOADI1_MEMBASE:
+ widen_op = OP_ICONV_TO_I1;
+ break;
+ case OP_LOADU1_MEMBASE:
+ widen_op = OP_ICONV_TO_U1;
+ break;
+ case OP_LOADI2_MEMBASE:
+ widen_op = OP_ICONV_TO_I2;
+ break;
+ case OP_LOADU2_MEMBASE:
+ widen_op = OP_ICONV_TO_U2;
+ break;
+ default:
+ break;
+ }
+
+ if (widen_op != -1) {
+ int dreg = alloc_preg (cfg);
+ MonoInst *widen;
+
+ EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
+ widen->type = ins->type;
+ ins = widen;
+ }
+ }
+ }
+
+ return ins;
+}
static MonoMethod*
get_memcpy_method (void)
return emit_rgctx_fetch (cfg, rgctx, entry);
}
+static void
+emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
+{
+ MonoInst *vtable_arg;
+ MonoCallInst *call;
+ int context_used = 0;
+
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
+
+ if (context_used) {
+ vtable_arg = emit_get_rgctx_klass (cfg, context_used,
+ klass, MONO_RGCTX_INFO_VTABLE);
+ } else {
+ MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
+
+ if (!vtable)
+ return;
+ EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
+ }
+
+ call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
+#ifdef MONO_ARCH_VTABLE_REG
+ mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
+ cfg->uses_vtable_reg = TRUE;
+#else
+ NOT_IMPLEMENTED;
+#endif
+}
+
static void
mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
{
return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
+#ifdef MONO_CROSS_COMPILE
+ MonoMethod *managed_alloc = NULL;
+#else
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
+#endif
gboolean pass_lw;
if (managed_alloc) {
cfg->flags |= MONO_CFG_HAS_VARARGS;
+ /* mono_array_new_va () needs a vararg calling convention */
+ cfg->disable_llvm = TRUE;
+
/* FIXME: This uses info->sig, but it should use the signature of the wrapper */
return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
}
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
if (size == 1 || size == 2 || size == 4 || size == 8) {
static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
#endif
MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
add_reg = ins->dreg;
/* Avoid a warning */
#ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
if (strcmp (cmethod->name, "Exchange") == 0) {
guint32 opcode;
+ gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_EXCHANGE_I4;
#if SIZEOF_REGISTER == 8
- else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
- (fsig->params [0]->type == MONO_TYPE_I) ||
- (fsig->params [0]->type == MONO_TYPE_OBJECT))
+ else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
+ (fsig->params [0]->type == MONO_TYPE_I))
opcode = OP_ATOMIC_EXCHANGE_I8;
#else
- else if ((fsig->params [0]->type == MONO_TYPE_I) ||
- (fsig->params [0]->type == MONO_TYPE_OBJECT))
+ else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
opcode = OP_ATOMIC_EXCHANGE_I4;
#endif
else
default:
g_assert_not_reached ();
}
+
+#if HAVE_WRITE_BARRIERS
+ if (is_ref) {
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
+ }
+#endif
}
#endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
-#ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
- /*
- * Can't implement CompareExchange methods this way since they have
- * three arguments. We can implement one of the common cases, where the new
- * value is a constant.
- */
+#ifdef MONO_ARCH_HAVE_ATOMIC_CAS
if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
- if ((fsig->params [1]->type == MONO_TYPE_I4 ||
- (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
- && args [2]->opcode == OP_ICONST) {
- MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
+ int size = 0;
+ gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
+ if (fsig->params [1]->type == MONO_TYPE_I4)
+ size = 4;
+ else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
+ size = sizeof (gpointer);
+ else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
+ size = 8;
+ if (size == 4) {
+ MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
ins->dreg = alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
- ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
+ ins->sreg3 = args [2]->dreg;
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
+ } else if (size == 8) {
+ MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
+ ins->dreg = alloc_ireg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ ins->sreg2 = args [1]->dreg;
+ ins->sreg3 = args [2]->dreg;
+ ins->type = STACK_I8;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ /* g_assert_not_reached (); */
+ }
+#if HAVE_WRITE_BARRIERS
+ if (is_ref) {
+ MonoMethod *write_barrier = mono_gc_get_write_barrier ();
+ mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
}
- /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
+#endif
}
-#endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
+#endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
if (ins)
return ins;
if (strcmp (method->name, "InternalAllocateStr") == 0) {
MonoInst *iargs [2];
MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
+#ifdef MONO_CROSS_COMPILE
+ MonoMethod *managed_alloc = NULL;
+#else
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
+#endif
if (!managed_alloc)
return NULL;
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
}
static MonoMethod*
-verification_exception (void)
+field_access_exception (void)
{
static MonoMethod *method = NULL;
if (!method) {
MonoSecurityManager *secman = mono_security_manager_get_methods ();
method = mono_class_get_method_from_name (secman->securitymanager,
- "VerificationException", 0);
+ "FieldAccessException", 2);
}
g_assert (method);
return method;
}
static void
-emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
+emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
+ MonoBasicBlock *bblock, unsigned char *ip)
{
- MonoMethod *thrower = verification_exception ();
+ MonoMethod *thrower = field_access_exception ();
+ MonoInst *args [2];
- mono_emit_method_call (cfg, thrower, NULL, NULL);
+ EMIT_NEW_METHODCONST (cfg, args [0], caller);
+ EMIT_NEW_METHODCONST (cfg, args [1], field);
+ mono_emit_method_call (cfg, thrower, args, NULL);
+}
+
+/*
+ * Return the original method is a wrapper is specified. We can only access
+ * the custom attributes from the original method.
+ */
+static MonoMethod*
+get_original_method (MonoMethod *method)
+{
+ if (method->wrapper_type == MONO_WRAPPER_NONE)
+ return method;
+
+ /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
+ if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
+ return NULL;
+
+ /* in other cases we need to find the original method */
+ return mono_marshal_method_from_wrapper (method);
}
static void
-ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
- MonoBasicBlock *bblock, unsigned char *ip)
+ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
+ MonoBasicBlock *bblock, unsigned char *ip)
{
- MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
- MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
- gboolean is_safe = TRUE;
+ /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
+ if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
+ return;
- if (!(caller_level >= callee_level ||
- caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
- callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
- is_safe = FALSE;
- }
+ /* we can't get the coreclr security level on wrappers since they don't have the attributes */
+ caller = get_original_method (caller);
+ if (!caller)
+ return;
- if (!is_safe)
- emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
+ /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
+ if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
+ emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
}
-static gboolean
-method_is_safe (MonoMethod *method)
+static void
+ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
+ MonoBasicBlock *bblock, unsigned char *ip)
{
- /*
- if (strcmp (method->name, "unsafeMethod") == 0)
- return FALSE;
- */
- return TRUE;
+ /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
+ if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
+ return;
+
+ /* we can't get the coreclr security level on wrappers since they don't have the attributes */
+ caller = get_original_method (caller);
+ if (!caller)
+ return;
+
+ /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
+ if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
+ emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
}
/*
case MONO_TYPE_U1:
size = 1; break;
/* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
-#if G_BYTE_ORDER == G_LITTLE_ENDIAN
+#if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
case MONO_TYPE_CHAR:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
GSList *class_inits = NULL;
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
int context_used;
+ gboolean init_locals;
/* serialization and xdomain stuff may need access to private fields and methods */
dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
cfg->cil_start = ip;
end = ip + header->code_size;
mono_jit_stats.cil_code_size += header->code_size;
+ init_locals = header->init_locals;
+
+ /*
+ * Methods without init_locals set could cause asserts in various passes
+ * (#497220).
+ */
+ init_locals = TRUE;
method_definition = method;
while (method_definition->is_inflated) {
}
}
- if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
+ if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
/* we use a separate basic block for the initialization code */
NEW_BBLOCK (cfg, init_localsbb);
cfg->bb_init = init_localsbb;
}
}
}
- if (!method_is_safe (method))
- emit_throw_verification_exception (cfg, bblock, ip);
}
if (header->code_size == 0)
cfg->coverage_info->data [cil_offset].cil_code = ip;
/* TODO: Use an increment here */
-#if defined(__i386__)
+#if defined(TARGET_X86)
MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
ins->inst_imm = 1;
break;
case CEE_LDC_R4: {
float *f;
+ gboolean use_aotconst = FALSE;
+
+#ifdef TARGET_POWERPC
+ /* FIXME: Clean this up */
+ if (cfg->compile_aot)
+ use_aotconst = TRUE;
+#endif
+
/* FIXME: we should really allocate this only late in the compilation process */
f = mono_domain_alloc (cfg->domain, sizeof (float));
CHECK_OPSIZE (5);
CHECK_STACK_OVF (1);
- MONO_INST_NEW (cfg, ins, OP_R4CONST);
- ins->type = STACK_R8;
- ins->dreg = alloc_dreg (cfg, STACK_R8);
+
+ if (use_aotconst) {
+ MonoInst *cons;
+ int dreg;
+
+ EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
+
+ dreg = alloc_freg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
+ ins->type = STACK_R8;
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_R4CONST);
+ ins->type = STACK_R8;
+ ins->dreg = alloc_dreg (cfg, STACK_R8);
+ ins->inst_p0 = f;
+ MONO_ADD_INS (bblock, ins);
+ }
++ip;
readr4 (ip, f);
- ins->inst_p0 = f;
- MONO_ADD_INS (bblock, ins);
-
ip += 4;
*sp++ = ins;
break;
}
case CEE_LDC_R8: {
double *d;
+ gboolean use_aotconst = FALSE;
+
+#ifdef TARGET_POWERPC
+ /* FIXME: Clean this up */
+ if (cfg->compile_aot)
+ use_aotconst = TRUE;
+#endif
+
/* FIXME: we should really allocate this only late in the compilation process */
d = mono_domain_alloc (cfg->domain, sizeof (double));
CHECK_OPSIZE (9);
CHECK_STACK_OVF (1);
- MONO_INST_NEW (cfg, ins, OP_R8CONST);
- ins->type = STACK_R8;
- ins->dreg = alloc_dreg (cfg, STACK_R8);
+
+ if (use_aotconst) {
+ MonoInst *cons;
+ int dreg;
+
+ EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
+
+ dreg = alloc_freg (cfg);
+ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
+ ins->type = STACK_R8;
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_R8CONST);
+ ins->type = STACK_R8;
+ ins->dreg = alloc_dreg (cfg, STACK_R8);
+ ins->inst_p0 = d;
+ MONO_ADD_INS (bblock, ins);
+ }
++ip;
readr8 (ip, d);
- ins->inst_p0 = d;
- MONO_ADD_INS (bblock, ins);
-
ip += 8;
- *sp++ = ins;
+ *sp++ = ins;
break;
}
case CEE_DUP: {
ip++;
--sp;
-#ifdef __i386__
+#ifdef TARGET_X86
if (sp [0]->type == STACK_R8)
/* we need to pop the value from the x86 FP stack */
MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
CHECK_CFG_EXCEPTION;
-#ifdef __x86_64__
+#ifdef MONO_ARCH_USE_OP_TAIL_CALL
{
MonoMethodSignature *fsig = mono_method_signature (cmethod);
int i, n;
gboolean pass_mrgctx = FALSE;
MonoInst *vtable_arg = NULL;
gboolean check_this = FALSE;
+ gboolean supported_tail_call = FALSE;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
UNVERIFIED;
+ /*
+ * If the callee is a shared method, then its static cctor
+ * might not get called after the call was patched.
+ */
+ if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
+ emit_generic_class_init (cfg, cmethod->klass);
+ }
if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
(cmethod->klass->generic_class || cmethod->klass->generic_container)) {
/* Prevent inlining of methods that contain indirect calls */
INLINE_FAILURE;
-#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
- if (cmethod->wrapper_type == MONO_WRAPPER_NONE) {
+#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
+ if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
g_assert (!imt_arg);
if (context_used) {
imt_arg = emit_get_rgctx_method (cfg, context_used,
}
if (!MONO_TYPE_IS_VOID (fsig->ret))
- *sp++ = ins;
+ *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
ip += 5;
ins_flag = 0;
break;
}
+#ifdef MONO_ARCH_USE_OP_TAIL_CALL
+ supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
+#else
+ supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
+#endif
+
/* Tail prefix */
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
- if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
- (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
+ if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
MonoCallInst *call;
/* Prevent inlining of methods with tail calls (the call stack would be altered) */
INLINE_FAILURE;
+#ifdef MONO_ARCH_USE_OP_TAIL_CALL
+ /* Handle tail calls similarly to calls */
+ call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
+#else
MONO_INST_NEW_CALL (cfg, call, OP_JMP);
call->tail_call = TRUE;
call->method = cmethod;
call->signature = mono_method_signature (cmethod);
-#ifdef __x86_64__
- /* Handle tail calls similarly to calls */
- call->inst.opcode = OP_TAILCALL;
- call->args = sp;
- mono_arch_emit_call (cfg, call);
-#else
/*
* We implement tail calls by storing the actual arguments into the
* argument variables, then emitting a CEE_JMP.
call = (MonoCallInst*)ins;
mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
cfg->uses_rgctx_reg = TRUE;
+ call->rgctx_reg = TRUE;
#else
NOT_IMPLEMENTED;
#endif
*/
ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
NULLIFY_INS (addr);
+ } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
+ ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
+ NULLIFY_INS (addr);
} else {
ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
}
}
- if (!MONO_TYPE_IS_VOID (fsig->ret)) {
- if (fsig->pinvoke && !fsig->ret->byref) {
- int widen_op = -1;
-
- /*
- * Native code might return non register sized integers
- * without initializing the upper bits.
- */
- switch (mono_type_to_load_membase (cfg, fsig->ret)) {
- case OP_LOADI1_MEMBASE:
- widen_op = OP_ICONV_TO_I1;
- break;
- case OP_LOADU1_MEMBASE:
- widen_op = OP_ICONV_TO_U1;
- break;
- case OP_LOADI2_MEMBASE:
- widen_op = OP_ICONV_TO_I2;
- break;
- case OP_LOADU2_MEMBASE:
- widen_op = OP_ICONV_TO_U2;
- break;
- default:
- break;
- }
-
- if (widen_op != -1) {
- int dreg = alloc_preg (cfg);
- MonoInst *widen;
-
- EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
- widen->type = ins->type;
- ins = widen;
- }
- }
-
- *sp++ = ins;
- }
+ if (!MONO_TYPE_IS_VOID (fsig->ret))
+ *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
ip += 5;
ins_flag = 0;
ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
if (ins) {
if (!MONO_TYPE_IS_VOID (fsig->ret))
- *sp++ = ins;
+ *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
ip += 5;
ins_flag = 0;
}
if (!MONO_TYPE_IS_VOID (fsig->ret))
- *sp++ = ins;
+ *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
ip += 5;
ins_flag = 0;
table->table_size = n;
use_op_switch = FALSE;
-#ifdef __arm__
+#ifdef TARGET_ARM
/* ARM implements SWITCH statements differently */
/* FIXME: Make it use the generic implementation */
if (!cfg->compile_aot)
use_op_switch = TRUE;
#endif
-
+
+ if (COMPILE_LLVM (cfg))
+ use_op_switch = TRUE;
+
+ cfg->cbb->has_jump_table = 1;
+
if (use_op_switch) {
MONO_INST_NEW (cfg, ins, OP_SWITCH);
ins->sreg1 = src1->dreg;
CHECK_STACK (2);
sp -= 2;
+ NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
+ ins->flags |= ins_flag;
+ ins_flag = 0;
+ MONO_ADD_INS (bblock, ins);
+
#if HAVE_WRITE_BARRIERS
if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
/* insert call to write barrier */
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, sp, NULL);
- ins_flag = 0;
- ip++;
- break;
}
#endif
- NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
- ins->flags |= ins_flag;
- ins_flag = 0;
- MONO_ADD_INS (bblock, ins);
inline_costs += 1;
++ip;
break;
}
MONO_ADD_INS ((cfg)->cbb, (ins));
- *sp++ = ins;
- mono_decompose_opcode (cfg, ins);
+ *sp++ = mono_decompose_opcode (cfg, ins);
ip++;
break;
case CEE_ADD:
}
}
MONO_ADD_INS ((cfg)->cbb, (ins));
- *sp++ = ins;
- mono_decompose_opcode (cfg, ins);
+ *sp++ = mono_decompose_opcode (cfg, ins);
ip++;
break;
case CEE_NEG:
/* Optimize the ldobj+stobj combination */
/* The reference case ends up being a load+store anyway */
- if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
+ if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
CHECK_STACK (1);
sp --;
INLINE_FAILURE;
ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
callvirt_this_arg, NULL, vtable_arg);
- if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
- GENERIC_SHARING_FAILURE (*ip);
}
}
FIELD_ACCESS_FAILURE;
mono_class_init (klass);
+ /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
+ any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
+ if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
+ */
+
foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
if (*ip == CEE_STFLD) {
if (target_type_is_incompatible (cfg, field->type, sp [1]))
} else {
MonoInst *store;
+ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
+
#if HAVE_WRITE_BARRIERS
if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
/* insert call to write barrier */
}
#endif
- EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
-
store->flags |= ins_flag;
}
ins_flag = 0;
EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
- if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
+ if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
iargs, ip, cfg->real_offset, dont_inline, TRUE);
bblock = cfg->cbb;
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
FIELD_ACCESS_FAILURE;
+ /* if the class is Critical then transparent code cannot access it's fields */
+ if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
+ ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
+
/*
* We can only support shared generic static
* field access on architectures where the
ins->dreg = alloc_freg (cfg);
ins->type = STACK_R8;
MONO_ADD_INS (bblock, ins);
- *sp++ = ins;
- mono_decompose_opcode (cfg, ins);
+ *sp++ = mono_decompose_opcode (cfg, ins);
++ip;
break;
ins = emit_get_rgctx_klass (cfg, context_used,
tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
} else if (cfg->compile_aot) {
- EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
+ if (method->wrapper_type) {
+ /* FIXME: n is not a normal token */
+ cfg->disable_aot = TRUE;
+ EMIT_NEW_PCONST (cfg, ins, NULL);
+ } else {
+ EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
+ }
} else {
EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
}
MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
ins->inst_target_bb = tblock;
MONO_ADD_INS (bblock, ins);
+ bblock->has_call_handler = 1;
}
g_list_free (handlers);
}
MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
MonoInst *target_ins;
+ MonoMethod *invoke;
+
+ invoke = mono_get_delegate_invoke (ctor_method->klass);
+ if (!invoke || !mono_method_signature (invoke))
+ goto load_error;
ip += 6;
if (cfg->verbose_level > 3)
#endif
if (context_used) {
- if (needs_static_rgctx_invoke)
- cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
-
argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
- } else if (needs_static_rgctx_invoke) {
- EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
} else {
EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
}
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_HAS_ALLOCA;
- if (header->init_locals)
+ if (init_locals)
ins->flags |= MONO_INST_INIT;
*sp++ = ins;
MONO_ADD_INS (cfg->cbb, store);
}
+#ifdef TARGET_POWERPC
+ if (cfg->compile_aot)
+ /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
+ mono_get_got_var (cfg);
+#endif
+
if (cfg->method == method && cfg->got_var)
mono_emit_load_got_addr (cfg);
- if (header->init_locals) {
+ if (init_locals) {
MonoInst *store;
cfg->cbb = init_localsbb;
case OP_STOREI4_MEMBASE_REG:
return OP_STOREI4_MEMBASE_IMM;
-#if defined(__i386__) || defined (__x86_64__)
+#if defined(TARGET_X86) || defined (TARGET_AMD64)
case OP_X86_PUSH:
return OP_X86_PUSH_IMM;
case OP_X86_COMPARE_MEMBASE_REG:
return OP_X86_COMPARE_MEMBASE_IMM;
#endif
-#if defined(__x86_64__)
+#if defined(TARGET_AMD64)
case OP_AMD64_ICOMPARE_MEMBASE_REG:
return OP_AMD64_ICOMPARE_MEMBASE_IMM;
#endif
mono_load_membase_to_load_mem (int opcode)
{
// FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_LOAD_MEMBASE:
return OP_LOAD_MEM;
static inline int
op_to_op_dest_membase (int store_opcode, int opcode)
{
-#if defined(__i386__)
+#if defined(TARGET_X86)
if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
return -1;
}
#endif
-#if defined(__x86_64__)
+#if defined(TARGET_AMD64)
if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
return -1;
static inline int
op_to_op_store_membase (int store_opcode, int opcode)
{
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_ICEQ:
if (store_opcode == OP_STOREI1_MEMBASE_REG)
static inline int
op_to_op_src1_membase (int load_opcode, int opcode)
{
-#ifdef __i386__
+#ifdef TARGET_X86
/* FIXME: This has sign extension issues */
/*
if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
}
#endif
-#ifdef __x86_64__
+#ifdef TARGET_AMD64
/* FIXME: This has sign extension issues */
/*
if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
static inline int
op_to_op_src2_membase (int load_opcode, int opcode)
{
-#ifdef __i386__
+#ifdef TARGET_X86
if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
return -1;
}
#endif
-#ifdef __x86_64__
+#ifdef TARGET_AMD64
switch (opcode) {
case OP_ICOMPARE:
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
g_assert (ins->opcode >= MONO_CEE_LAST);
- for (regindex = 0; regindex < 3; regindex ++) {
+ for (regindex = 0; regindex < 4; regindex ++) {
int vreg;
if (regindex == 0) {
if (regtype == ' ')
continue;
vreg = ins->sreg1;
- } else {
+ } else if (regindex == 2) {
regtype = spec [MONO_INST_SRC2];
if (regtype == ' ')
continue;
vreg = ins->sreg2;
+ } else if (regindex == 3) {
+ regtype = spec [MONO_INST_SRC3];
+ if (regtype == ' ')
+ continue;
+ vreg = ins->sreg3;
}
#if SIZEOF_REGISTER == 4
- if (regtype == 'l') {
+ /* In the LLVM case, the long opcodes are not decomposed */
+ if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
/*
* Since some instructions reference the original long vreg,
* and some reference the two component vregs, it is quite hard
#if SIZEOF_REGISTER == 8
case STACK_I8:
#endif
-#if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
+#if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
/* Enabling this screws up the fp stack on x86 */
case STACK_R8:
#endif
cfg->cbb = bb;
MONO_BB_FOR_EACH_INS (bb, ins) {
const char *spec = INS_INFO (ins->opcode);
- int regtype, srcindex, sreg, tmp_reg, prev_dreg;
+ int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
gboolean store, no_lvreg;
+ int sregs [MONO_MAX_SRC_REGS];
if (G_UNLIKELY (cfg->verbose_level > 2))
mono_print_ins (ins);
spec2 [MONO_INST_DEST] = ' ';
spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
+ spec2 [MONO_INST_SRC3] = ' ';
spec = spec2;
} else if (MONO_IS_STORE_MEMINDEX (ins))
g_assert_not_reached ();
store = FALSE;
no_lvreg = FALSE;
- if (G_UNLIKELY (cfg->verbose_level > 2))
- printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
+ if (G_UNLIKELY (cfg->verbose_level > 2)) {
+ printf ("\t %.3s %d", spec, ins->dreg);
+ num_sregs = mono_inst_get_src_registers (ins, sregs);
+ for (srcindex = 0; srcindex < 3; ++srcindex)
+ printf (" %d", sregs [srcindex]);
+ printf ("\n");
+ }
/***************/
/* DREG */
ins->inst_imm = ins->inst_c0;
ins->inst_destbasereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
+ spec = INS_INFO (ins->opcode);
} else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
ins->opcode = store_opcode;
ins->inst_destbasereg = var->inst_basereg;
spec2 [MONO_INST_DEST] = ' ';
spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
+ spec2 [MONO_INST_SRC3] = ' ';
spec = spec2;
} else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
// FIXME: The backends expect the base reg to be in inst_basereg
/************/
/* SREGS */
/************/
- for (srcindex = 0; srcindex < 2; ++srcindex) {
- regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
- sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
+ num_sregs = mono_inst_get_src_registers (ins, sregs);
+ for (srcindex = 0; srcindex < 3; ++srcindex) {
+ regtype = spec [MONO_INST_SRC1 + srcindex];
+ sreg = sregs [srcindex];
g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
guint32 load_opcode;
if (var->opcode == OP_REGVAR) {
- if (srcindex == 0)
- ins->sreg1 = var->dreg;
- else
- ins->sreg2 = var->dreg;
- live_range_end [var->dreg] = use_ins;
- live_range_end_bb [var->dreg] = bb;
+ sregs [srcindex] = var->dreg;
+ //mono_inst_set_src_registers (ins, sregs);
+ live_range_end [sreg] = use_ins;
+ live_range_end_bb [sreg] = bb;
continue;
}
g_assert (load_opcode != OP_LOADV_MEMBASE);
if (vreg_to_lvreg [sreg]) {
+ g_assert (vreg_to_lvreg [sreg] != -1);
+
/* The variable is already loaded to an lvreg */
if (G_UNLIKELY (cfg->verbose_level > 2))
printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
- if (srcindex == 0)
- ins->sreg1 = vreg_to_lvreg [sreg];
- else
- ins->sreg2 = vreg_to_lvreg [sreg];
+ sregs [srcindex] = vreg_to_lvreg [sreg];
+ //mono_inst_set_src_registers (ins, sregs);
continue;
}
/* Try to fuse the load into the instruction */
if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
- ins->inst_basereg = var->inst_basereg;
+ sregs [0] = var->inst_basereg;
+ //mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
} else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
- ins->sreg2 = var->inst_basereg;
+ sregs [1] = var->inst_basereg;
+ //mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
} else {
if (MONO_IS_REAL_MOVE (ins)) {
*/
sreg = ins->dreg;
}
+ g_assert (sreg != -1);
vreg_to_lvreg [var->dreg] = sreg;
g_assert (lvregs_len < 1024);
lvregs [lvregs_len ++] = var->dreg;
}
}
- if (srcindex == 0)
- ins->sreg1 = sreg;
- else
- ins->sreg2 = sreg;
+ sregs [srcindex] = sreg;
+ //mono_inst_set_src_registers (ins, sregs);
if (regtype == 'l') {
NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
}
}
}
+ mono_inst_set_src_registers (ins, sregs);
if (dest_has_lvreg) {
+ g_assert (ins->dreg != -1);
vreg_to_lvreg [prev_dreg] = ins->dreg;
g_assert (lvregs_len < 1024);
lvregs [lvregs_len ++] = prev_dreg;
for (i = 0; i < lvregs_len; i++)
vreg_to_lvreg [lvregs [i]] = 0;
lvregs_len = 0;
+ } else if (ins->opcode == OP_NOP) {
+ ins->dreg = -1;
+ MONO_INST_NULLIFY_SREGS (ins);
}
if (cfg->verbose_level > 2)
if (live_range_start [vreg]) {
MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
ins->inst_c0 = i;
+ ins->inst_c1 = vreg;
mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
}
if (live_range_end [vreg]) {
MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
ins->inst_c0 = i;
+ ins->inst_c1 = vreg;
mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
}
}