(dest)->type = STACK_I4; \
} while (0)
-#define NEW_LOAD_MEMBASE_FLAGS(cfg,dest,op,dr,base,offset,ins_flags) do { \
- MONO_INST_NEW ((cfg), (dest), (op)); \
- (dest)->dreg = (dr); \
- (dest)->inst_basereg = (base); \
- (dest)->inst_offset = (offset); \
- (dest)->type = STACK_I4; \
- (dest)->flags = (ins_flags); \
- } while (0)
-
#define NEW_LOAD_MEM(cfg,dest,op,dr,mem) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
(dest)->dreg = (dr); \
#define MONO_EMIT_NEW_LOAD_MEMBASE(cfg,dr,base,offset) MONO_EMIT_NEW_LOAD_MEMBASE_OP ((cfg), (OP_LOAD_MEMBASE), (dr), (base), (offset))
-#define MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS(cfg,op,dr,base,offset,ins_flags) do { \
- MonoInst *inst; \
- MONO_INST_NEW ((cfg), (inst), (op)); \
- inst->dreg = dr; \
- inst->inst_basereg = base; \
- inst->inst_offset = offset; \
- inst->flags = (ins_flags); \
- MONO_ADD_INS (cfg->cbb, inst); \
- } while (0)
-
#define MONO_EMIT_NEW_STORE_MEMBASE(cfg,op,base,offset,sr) do { \
MonoInst *inst; \
MONO_INST_NEW ((cfg), (inst), (op)); \
} while (0)
#define MONO_EMIT_NEW_CHECK_THIS(cfg, sreg) do { \
- cfg->flags |= MONO_CFG_HAS_CHECK_THIS; \
- if (cfg->explicit_null_checks) { \
- MONO_EMIT_NULL_CHECK (cfg, sreg); \
- } else { \
- MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, sreg); \
- MONO_EMIT_NEW_IMPLICIT_EXCEPTION_LOAD_STORE (cfg); \
- } \
- MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, sreg); \
+ cfg->flags |= MONO_CFG_HAS_CHECK_THIS; \
+ if (cfg->explicit_null_checks) { \
+ MONO_EMIT_NULL_CHECK (cfg, sreg); \
+ } else { \
+ MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, sreg); \
+ MONO_EMIT_NEW_IMPLICIT_EXCEPTION_LOAD_STORE (cfg); \
+ } \
+ MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, sreg); \
} while (0)
-/* A load which can cause a nullref */
-#define NEW_LOAD_MEMBASE_FAULT(cfg,dest,op,dr,base,offset) do { \
- MONO_EMIT_NULL_CHECK ((cfg), (base)); \
- if (cfg->explicit_null_checks) { \
- NEW_LOAD_MEMBASE ((cfg), (dest), (op), (dr), (base), (offset)); \
- } else { \
- NEW_LOAD_MEMBASE_FLAGS ((cfg), (dest), (op), (dr), (base), (offset), MONO_INST_FAULT); \
- } \
+#define NEW_LOAD_MEMBASE_FLAGS(cfg,dest,op,dr,base,offset,ins_flags) do { \
+ int __ins_flags = ins_flags; \
+ if (__ins_flags & MONO_INST_FAULT) { \
+ MONO_EMIT_NULL_CHECK ((cfg), (base)); \
+ if (cfg->explicit_null_checks) \
+ __ins_flags &= ~MONO_INST_FAULT; \
+ } \
+ NEW_LOAD_MEMBASE ((cfg), (dest), (op), (dr), (base), (offset)); \
+ (dest)->flags = (__ins_flags); \
} while (0)
-#define NEW_LOAD_MEMBASE_MAY_FAULT(cfg,dest,op,dr,base,offset,fault) do { \
- if (fault) \
- NEW_LOAD_MEMBASE_FAULT ((cfg), (dest), (op), (dr), (base), (offset)); \
- else \
- NEW_LOAD_MEMBASE ((cfg), (dest), (op), (dr), (base), (offset)); \
- } while (0)
+#define MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS(cfg,op,dr,base,offset,ins_flags) do { \
+ MonoInst *inst; \
+ int __ins_flags = ins_flags; \
+ if (__ins_flags & MONO_INST_FAULT) { \
+ MONO_EMIT_NULL_CHECK ((cfg), (base)); \
+ if (cfg->explicit_null_checks) \
+ __ins_flags &= ~MONO_INST_FAULT; \
+ } \
+ NEW_LOAD_MEMBASE ((cfg), (inst), (op), (dr), (base), (offset)); \
+ inst->flags = (__ins_flags); \
+ MONO_ADD_INS (cfg->cbb, inst); \
+ } while (0)
+
+#define MONO_EMIT_NEW_LOAD_MEMBASE_FLAGS(cfg,dr,base,offset,ins_flags) MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS ((cfg), (OP_LOAD_MEMBASE), (dr), (base), (offset),(ins_flags))
+
+/* A load which can cause a nullref */
+#define NEW_LOAD_MEMBASE_FAULT(cfg,dest,op,dr,base,offset) NEW_LOAD_MEMBASE_FLAGS ((cfg), (dest), (op), (dr), (base), (offset), MONO_INST_FAULT)
#define EMIT_NEW_LOAD_MEMBASE_FAULT(cfg,dest,op,dr,base,offset) do { \
NEW_LOAD_MEMBASE_FAULT ((cfg), (dest), (op), (dr), (base), (offset)); \
MONO_ADD_INS ((cfg)->cbb, (dest)); \
} while (0)
-#define MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT(cfg,op,dr,base,offset) do { \
- MONO_EMIT_NULL_CHECK (cfg, base); \
- if (cfg->explicit_null_checks) { \
- MONO_EMIT_NEW_LOAD_MEMBASE_OP ((cfg), (op), (dr), (base), (offset)); \
- } else { \
- MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS ((cfg), (op), (dr), (base), (offset), MONO_INST_FAULT); \
- } \
- } while (0)
-
-#define MONO_EMIT_NEW_LOAD_MEMBASE_OP_MAY_FAULT(cfg,op,dr,base,offset,fault) do { \
- if (fault) \
- MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT ((cfg), (op), (dr), (base), (offset)); \
- else \
- MONO_EMIT_NEW_LOAD_MEMBASE_OP ((cfg), (op), (dr), (base), (offset)); \
- } while (0)
+#define MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT(cfg,op,dr,base,offset) MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS ((cfg), (op), (dr), (base), (offset), MONO_INST_FAULT)
#define MONO_EMIT_NEW_LOAD_MEMBASE_FAULT(cfg,dr,base,offset) MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT ((cfg), (OP_LOAD_MEMBASE), (dr), (base), (offset))
if (fault) \
MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
else \
- MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset, MONO_INST_CONSTANT_LOAD); \
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
} while (0)
return -1;
}
+static void
+set_metadata_flag (LLVMValueRef v, const char *flag_name)
+{
+#if LLVM_CHECK_VERSION (2, 8)
+ LLVMValueRef md_arg;
+ int md_kind;
+
+ md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
+ md_arg = LLVMMDString ("mono", 4);
+ LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
+#endif
+}
+
/*
* emit_call:
*
return res;
} else {
- LLVMValueRef res, md_arg;
- int md_kind;
+ LLVMValueRef res;
/*
* We emit volatile loads for loads which can fault, because otherwise
res = mono_llvm_build_load (*builder_ref, addr, name, is_faulting);
/* Mark it with a custom metadata */
-#if LLVM_CHECK_VERSION (2, 8)
- if (is_faulting) {
- md_kind = LLVMGetMDKindID ("mono.faulting.load", strlen ("mono.faulting.load"));
- md_arg = LLVMMDString ("mono", 4);
- LLVMSetMetadata (res, md_kind, LLVMMDNode (&md_arg, 1));
- }
-#endif
+ /*
+ if (is_faulting)
+ set_metadata_flag (res, "mono.faulting.load");
+ */
+
return res;
}
}
emit_volatile_store (ctx, cfg->args [i + sig->hasthis]->dreg);
if (sig->hasthis && !cfg->rgctx_var && cfg->generic_sharing_context) {
-#if LLVM_CHECK_VERSION (2, 8)
- LLVMValueRef this_alloc, md_arg;
- int md_kind;
+ LLVMValueRef this_alloc;
/*
* The exception handling code needs the location where the this argument was
/* This volatile store will keep the alloca alive */
mono_llvm_build_store (builder, ctx->values [cfg->args [0]->dreg], this_alloc, TRUE);
- md_kind = LLVMGetMDKindID ("mono.this", strlen ("mono.this"));
- md_arg = LLVMMDString ("this", 4);
- LLVMSetMetadata (this_alloc, md_kind, LLVMMDNode (&md_arg, 1));
-#endif
+ set_metadata_flag (this_alloc, "mono.this");
}
if (cfg->rgctx_var) {
-#if LLVM_CHECK_VERSION (2, 8)
- LLVMValueRef rgctx_alloc, store, md_arg;
- int md_kind;
+ LLVMValueRef rgctx_alloc, store;
/*
* We handle the rgctx arg similarly to the this pointer.
/* This volatile store will keep the alloca alive */
store = mono_llvm_build_store (builder, ctx->rgctx_arg, rgctx_alloc, TRUE);
- md_kind = LLVMGetMDKindID ("mono.this", strlen ("mono.this"));
- md_arg = LLVMMDString ("this", 4);
- LLVMSetMetadata (rgctx_alloc, md_kind, LLVMMDNode (&md_arg, 1));
-#endif
+ set_metadata_flag (rgctx_alloc, "mono.this");
}
/*
values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, dname, is_volatile);
+ if (!is_volatile && (ins->flags & MONO_INST_CONSTANT_LOAD) && IS_LLVM_MONO_BRANCH) {
+ /*
+ * These will signal LLVM that these loads do not alias any stores, and
+ * they can't fail, allowing them to be hoisted out of loops.
+ */
+ set_metadata_flag (values [ins->dreg], "mono.noalias");
+ set_metadata_flag (values [ins->dreg], "mono.nofail.load");
+ }
+
if (sext)
values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
else if (zext)