#endif
}
+static inline guint32
+alloc_ireg_ref (MonoCompile *cfg)
+{
+ int vreg = alloc_ireg (cfg);
+
+ if (cfg->compute_gc_maps)
+ mono_mark_vreg_as_ref (cfg, vreg);
+
+ return vreg;
+}
+
+static inline guint32
+alloc_ireg_mp (MonoCompile *cfg)
+{
+ int vreg = alloc_ireg (cfg);
+
+ if (cfg->compute_gc_maps)
+ mono_mark_vreg_as_mp (cfg, vreg);
+
+ return vreg;
+}
+
static inline guint32
alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
{
switch (stack_type) {
case STACK_I4:
case STACK_PTR:
+ return alloc_ireg (cfg);
case STACK_MP:
+ return alloc_ireg_mp (cfg);
case STACK_OBJ:
- return alloc_ireg (cfg);
+ return alloc_ireg_ref (cfg);
case STACK_R8:
return alloc_freg (cfg);
case STACK_I8:
#define NEW_TYPE_FROM_HANDLE_CONST(cfg,dest,image,token,generic_context) NEW_AOTCONST_TOKEN ((cfg), (dest), MONO_PATCH_INFO_TYPE_FROM_HANDLE, (image), (token), (generic_context), STACK_OBJ, mono_defaults.monotype_class)
-#define NEW_LDTOKENCONST(cfg,dest,image,token) NEW_AOTCONST_TOKEN ((cfg), (dest), MONO_PATCH_INFO_LDTOKEN, (image), (token), NULL, STACK_PTR, NULL)
+#define NEW_LDTOKENCONST(cfg,dest,image,token,generic_context) NEW_AOTCONST_TOKEN ((cfg), (dest), MONO_PATCH_INFO_LDTOKEN, (image), (token), (generic_context), STACK_PTR, NULL)
#define NEW_DECLSECCONST(cfg,dest,image,entry) do { \
if (cfg->compile_aot) { \
} \
} while (0)
+#define NEW_JIT_ICALL_ADDRCONST(cfg,dest,name) NEW_AOTCONST ((cfg), (dest), MONO_PATCH_INFO_JIT_ICALL_ADDR, (name))
+
#define GET_VARINFO_INST(cfg,num) ((cfg)->varinfo [(num)]->inst)
#define NEW_VARLOAD(cfg,dest,var,vartype) do { \
(dest)->klass = mono_class_from_mono_type (ltype); \
} while (0)
-#define NEW_SEQ_POINT(cfg,dest,il_offset,ss_loc) do { \
+#define NEW_SEQ_POINT(cfg,dest,il_offset,intr_loc) do { \
MONO_INST_NEW ((cfg), (dest), OP_SEQ_POINT); \
(dest)->inst_imm = (il_offset); \
- (dest)->flags = ss_loc ? MONO_INST_SINGLE_STEP_LOC : 0; \
+ (dest)->flags = intr_loc ? MONO_INST_SINGLE_STEP_LOC : 0; \
+ } while (0)
+
+#define NEW_GC_PARAM_SLOT_LIVENESS_DEF(cfg,dest,offset,type) do { \
+ MONO_INST_NEW ((cfg), (dest), OP_GC_PARAM_SLOT_LIVENESS_DEF); \
+ (dest)->inst_offset = (offset); \
+ (dest)->inst_vtype = (type); \
} while (0)
/*
#define EMIT_NEW_TYPE_FROM_HANDLE_CONST(cfg,dest,image,token,generic_context) do { NEW_AOTCONST_TOKEN ((cfg), (dest), MONO_PATCH_INFO_TYPE_FROM_HANDLE, (image), (token), (generic_context), STACK_OBJ, mono_defaults.monotype_class); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
-#define EMIT_NEW_LDTOKENCONST(cfg,dest,image,token) do { NEW_AOTCONST_TOKEN ((cfg), (dest), MONO_PATCH_INFO_LDTOKEN, (image), (token), NULL, STACK_PTR, NULL); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
+#define EMIT_NEW_LDTOKENCONST(cfg,dest,image,token,generic_context) do { NEW_AOTCONST_TOKEN ((cfg), (dest), MONO_PATCH_INFO_LDTOKEN, (image), (token), (generic_context), STACK_PTR, NULL); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
#define EMIT_NEW_DOMAINCONST(cfg,dest) do { NEW_DOMAINCONST ((cfg), (dest)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
#define EMIT_NEW_METHOD_RGCTX_CONST(cfg,dest,method) do { NEW_METHOD_RGCTX_CONST ((cfg), (dest), (method)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
+#define EMIT_NEW_JIT_ICALL_ADDRCONST(cfg,dest,name) do { NEW_JIT_ICALL_ADDRCONST ((cfg), (dest), (name)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
+
#define EMIT_NEW_VARLOAD(cfg,dest,var,vartype) do { NEW_VARLOAD ((cfg), (dest), (var), (vartype)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
#define EMIT_NEW_VARSTORE(cfg,dest,var,vartype,inst) do { NEW_VARSTORE ((cfg), (dest), (var), (vartype), (inst)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
#define EMIT_NEW_VARLOADA(cfg,dest,var,vartype) do { NEW_VARLOADA ((cfg), (dest), (var), (vartype)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
-
#ifdef MONO_ARCH_SOFT_FLOAT
/*
*/
#define EMIT_NEW_VARLOAD_SFLOAT(cfg,dest,var,vartype) do { \
- if (!(vartype)->byref && (vartype)->type == MONO_TYPE_R4) { \
+ if (COMPILE_SOFT_FLOAT ((cfg)) && !(vartype)->byref && (vartype)->type == MONO_TYPE_R4) { \
MonoInst *iargs [1]; \
EMIT_NEW_VARLOADA (cfg, iargs [0], (var), (vartype)); \
(dest) = mono_emit_jit_icall (cfg, mono_fload_r4, iargs); \
} while (0)
#define EMIT_NEW_VARSTORE_SFLOAT(cfg,dest,var,vartype,inst) do { \
- if (!(vartype)->byref && (vartype)->type == MONO_TYPE_R4) { \
+ if (COMPILE_SOFT_FLOAT ((cfg)) && !(vartype)->byref && (vartype)->type == MONO_TYPE_R4) { \
MonoInst *iargs [2]; \
iargs [0] = (inst); \
EMIT_NEW_VARLOADA (cfg, iargs [1], (var), (vartype)); \
#define EMIT_NEW_STORE_MEMBASE_TYPE(cfg,dest,ltype,base,offset,sr) do { NEW_STORE_MEMBASE_TYPE ((cfg), (dest), (ltype), (base), (offset), (sr)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
+#define EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF(cfg,dest,offset,type) do { NEW_GC_PARAM_SLOT_LIVENESS_DEF ((cfg), (dest), (offset), (type)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
/*
* Variants which do not take an dest argument, but take a dreg argument.
*/
#define MONO_EMIT_NEW_AOTCONST(cfg,dr,imm,type) do { \
MonoInst *inst; \
- MONO_INST_NEW ((cfg), (inst), OP_AOTCONST); \
+ MONO_INST_NEW ((cfg), (inst), cfg->compile_aot ? OP_AOTCONST : OP_PCONST); \
inst->dreg = dr; \
inst->inst_p0 = imm; \
inst->inst_c1 = type; \
#define MONO_EMIT_NEW_CLASSCONST(cfg,dr,imm) MONO_EMIT_NEW_AOTCONST(cfg,dr,imm,MONO_PATCH_INFO_CLASS)
#define MONO_EMIT_NEW_VTABLECONST(cfg,dest,vtable) MONO_EMIT_NEW_AOTCONST ((cfg), (dest), (cfg)->compile_aot ? (gpointer)((vtable)->klass) : (vtable), MONO_PATCH_INFO_VTABLE)
+#define MONO_EMIT_NEW_SIGNATURECONST(cfg,dr,sig) MONO_EMIT_NEW_AOTCONST ((cfg), (dr), (sig), MONO_PATCH_INFO_SIGNATURE)
#define MONO_EMIT_NEW_VZERO(cfg,dr,kl) do { \
MonoInst *inst; \
MONO_ADD_INS ((cfg)->cbb, inst); \
} while (0)
+/* This is used on 32 bit machines too when running with LLVM */
+#define MONO_EMIT_NEW_LCOMPARE_IMM(cfg,sr1,imm) do { \
+ MonoInst *inst; \
+ MONO_INST_NEW ((cfg), (inst), (OP_LCOMPARE_IMM)); \
+ inst->sreg1 = sr1; \
+ if (SIZEOF_REGISTER == 4 && COMPILE_LLVM (cfg)) { \
+ guint64 _l = (imm); \
+ inst->inst_imm = _l & 0xffffffff; \
+ inst->inst_offset = _l >> 32; \
+ } else { \
+ inst->inst_imm = (imm); \
+ } \
+ MONO_ADD_INS ((cfg)->cbb, inst); \
+ } while (0)
+
#define MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg,op,dr,base,offset) do { \
MonoInst *inst; \
MONO_INST_NEW ((cfg), (inst), (op)); \
} \
} while (0)
+/* Loads/Stores which can fault are handled correctly by the LLVM mono branch */
+#define MONO_EMIT_NEW_IMPLICIT_EXCEPTION_LOAD_STORE(cfg) do { \
+ if (COMPILE_LLVM (cfg) && !IS_LLVM_MONO_BRANCH) \
+ MONO_EMIT_NEW_IMPLICIT_EXCEPTION ((cfg)); \
+ } while (0)
+
/* Emit an explicit null check which doesn't depend on SIGSEGV signal handling */
#define MONO_EMIT_NULL_CHECK(cfg, reg) do { \
if (cfg->explicit_null_checks) { \
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, (reg), 0); \
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); \
- } else { \
- MONO_EMIT_NEW_IMPLICIT_EXCEPTION (cfg); \
+ } else { \
+ MONO_EMIT_NEW_IMPLICIT_EXCEPTION_LOAD_STORE (cfg); \
} \
} while (0)
#define MONO_EMIT_NEW_CHECK_THIS(cfg, sreg) do { \
- cfg->flags |= MONO_CFG_HAS_CHECK_THIS; \
- if (cfg->explicit_null_checks) \
- MONO_EMIT_NULL_CHECK (cfg, sreg); \
- else \
- MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, sreg); \
- MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, sreg); \
+ cfg->flags |= MONO_CFG_HAS_CHECK_THIS; \
+ if (cfg->explicit_null_checks) { \
+ MONO_EMIT_NULL_CHECK (cfg, sreg); \
+ } else { \
+ MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, sreg); \
+ MONO_EMIT_NEW_IMPLICIT_EXCEPTION_LOAD_STORE (cfg); \
+ } \
+ MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, sreg); \
+ } while (0)
+
+#define NEW_LOAD_MEMBASE_FLAGS(cfg,dest,op,dr,base,offset,ins_flags) do { \
+ int __ins_flags = ins_flags; \
+ if (__ins_flags & MONO_INST_FAULT) { \
+ MONO_EMIT_NULL_CHECK ((cfg), (base)); \
+ if (cfg->explicit_null_checks) \
+ __ins_flags &= ~MONO_INST_FAULT; \
+ } \
+ NEW_LOAD_MEMBASE ((cfg), (dest), (op), (dr), (base), (offset)); \
+ (dest)->flags = (__ins_flags); \
+ } while (0)
+
+#define MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS(cfg,op,dr,base,offset,ins_flags) do { \
+ MonoInst *inst; \
+ int __ins_flags = ins_flags; \
+ if (__ins_flags & MONO_INST_FAULT) { \
+ MONO_EMIT_NULL_CHECK ((cfg), (base)); \
+ if (cfg->explicit_null_checks) \
+ __ins_flags &= ~MONO_INST_FAULT; \
+ } \
+ NEW_LOAD_MEMBASE ((cfg), (inst), (op), (dr), (base), (offset)); \
+ inst->flags = (__ins_flags); \
+ MONO_ADD_INS (cfg->cbb, inst); \
+ } while (0)
+
+#define MONO_EMIT_NEW_LOAD_MEMBASE_FLAGS(cfg,dr,base,offset,ins_flags) MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS ((cfg), (OP_LOAD_MEMBASE), (dr), (base), (offset),(ins_flags))
+
+/* A load which can cause a nullref */
+#define NEW_LOAD_MEMBASE_FAULT(cfg,dest,op,dr,base,offset) NEW_LOAD_MEMBASE_FLAGS ((cfg), (dest), (op), (dr), (base), (offset), MONO_INST_FAULT)
+
+#define EMIT_NEW_LOAD_MEMBASE_FAULT(cfg,dest,op,dr,base,offset) do { \
+ NEW_LOAD_MEMBASE_FAULT ((cfg), (dest), (op), (dr), (base), (offset)); \
+ MONO_ADD_INS ((cfg)->cbb, (dest)); \
} while (0)
+#define MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT(cfg,op,dr,base,offset) MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS ((cfg), (op), (dr), (base), (offset), MONO_INST_FAULT)
+
+#define MONO_EMIT_NEW_LOAD_MEMBASE_FAULT(cfg,dr,base,offset) MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT ((cfg), (OP_LOAD_MEMBASE), (dr), (base), (offset))
+
/*Object Model related macros*/
-#ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
-#define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
+/* Default bounds check implementation for most architectures + llvm */
+#define MONO_EMIT_DEFAULT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg, fault) do { \
int _length_reg = alloc_ireg (cfg); \
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
+ if (fault) \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
+ else \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset, MONO_INST_CONSTANT_LOAD); \
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
} while (0)
+
+#ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
+#define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) MONO_EMIT_DEFAULT_BOUNDS_CHECK ((cfg), (array_reg), (offset), (index_reg), TRUE)
#endif
/* cfg is the MonoCompile been used
* index_reg is the vreg holding the index
*/
#define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
+ if (!(cfg->opt & MONO_OPT_UNSAFE)) { \
if (!(cfg->opt & MONO_OPT_ABCREM)) { \
MONO_EMIT_NULL_CHECK (cfg, array_reg); \
- MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
+ if (COMPILE_LLVM (cfg)) \
+ MONO_EMIT_DEFAULT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg), TRUE); \
+ else \
+ MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
} else { \
MonoInst *ins; \
MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
ins->sreg1 = array_reg; \
ins->sreg2 = index_reg; \
ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
+ ins->flags |= MONO_INST_FAULT; \
MONO_ADD_INS ((cfg)->cbb, ins); \
(cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
(cfg)->cbb->has_array_access = TRUE; \
} \
+ } \
} while (0)
G_END_DECLS