case OP_LOADI1_MEMBASE:
case OP_STOREI1_MEMBASE_REG:
case OP_STOREI1_MEMBASE_IMM:
+ case OP_ATOMIC_LOAD_I1:
+ case OP_ATOMIC_STORE_I1:
*size = 1;
*sext = TRUE;
return LLVMInt8Type ();
case OP_LOADU1_MEMBASE:
case OP_LOADU1_MEM:
+ case OP_ATOMIC_LOAD_U1:
+ case OP_ATOMIC_STORE_U1:
*size = 1;
*zext = TRUE;
return LLVMInt8Type ();
case OP_LOADI2_MEMBASE:
case OP_STOREI2_MEMBASE_REG:
case OP_STOREI2_MEMBASE_IMM:
+ case OP_ATOMIC_LOAD_I2:
+ case OP_ATOMIC_STORE_I2:
*size = 2;
*sext = TRUE;
return LLVMInt16Type ();
case OP_LOADU2_MEMBASE:
case OP_LOADU2_MEM:
+ case OP_ATOMIC_LOAD_U2:
+ case OP_ATOMIC_STORE_U2:
*size = 2;
*zext = TRUE;
return LLVMInt16Type ();
case OP_LOADU4_MEM:
case OP_STOREI4_MEMBASE_REG:
case OP_STOREI4_MEMBASE_IMM:
+ case OP_ATOMIC_LOAD_I4:
+ case OP_ATOMIC_STORE_I4:
+ case OP_ATOMIC_LOAD_U4:
+ case OP_ATOMIC_STORE_U4:
*size = 4;
return LLVMInt32Type ();
case OP_LOADI8_MEMBASE:
case OP_LOADI8_MEM:
case OP_STOREI8_MEMBASE_REG:
case OP_STOREI8_MEMBASE_IMM:
+ case OP_ATOMIC_LOAD_I8:
+ case OP_ATOMIC_STORE_I8:
+ case OP_ATOMIC_LOAD_U8:
+ case OP_ATOMIC_STORE_U8:
*size = 8;
return LLVMInt64Type ();
case OP_LOADR4_MEMBASE:
case OP_STORER4_MEMBASE_REG:
+ case OP_ATOMIC_LOAD_R4:
+ case OP_ATOMIC_STORE_R4:
*size = 4;
return LLVMFloatType ();
case OP_LOADR8_MEMBASE:
case OP_STORER8_MEMBASE_REG:
+ case OP_ATOMIC_LOAD_R8:
+ case OP_ATOMIC_STORE_R8:
*size = 8;
return LLVMDoubleType ();
case OP_LOAD_MEMBASE:
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
for (i = 0; i < sig->param_count; ++i) {
+ LLVMArgInfo *ainfo = cinfo ? &cinfo->args [i + sig->hasthis] : NULL;
+
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
pindexes [i] = pindex;
- if (cinfo && cinfo->args [i + sig->hasthis].storage == LLVMArgVtypeInReg) {
+ if (ainfo && ainfo->storage == LLVMArgVtypeInReg) {
for (j = 0; j < 2; ++j) {
- switch (cinfo->args [i + sig->hasthis].pair_storage [j]) {
+ switch (ainfo->pair_storage [j]) {
case LLVMArgInIReg:
param_types [pindex ++] = LLVMIntType (sizeof (gpointer) * 8);
break;
g_assert_not_reached ();
}
}
- } else if (cinfo && cinfo->args [i + sig->hasthis].storage == LLVMArgVtypeByVal) {
+ } else if (ainfo && ainfo->storage == LLVMArgVtypeByVal) {
param_types [pindex] = type_to_llvm_arg_type (ctx, sig->params [i]);
CHECK_FAILURE (ctx);
param_types [pindex] = LLVMPointerType (param_types [pindex], 0);
pindex ++;
+ } else if (ainfo && ainfo->storage == LLVMArgAsIArgs) {
+ param_types [pindex] = LLVMArrayType (IntPtrType (), ainfo->nslots);
+ pindex ++;
} else {
param_types [pindex ++] = type_to_llvm_arg_type (ctx, sig->params [i]);
}
return lcall;
}
+#if LLVM_API_VERSION >= 4
+#define EXTRA_MONO_LOAD_STORE_ARGS 1
+#else
+#define EXTRA_MONO_LOAD_STORE_ARGS 0
+#endif
+
static LLVMValueRef
-emit_load (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef addr, const char *name, gboolean is_faulting)
+emit_load_general (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef addr, const char *name, gboolean is_faulting, BarrierKind barrier)
{
const char *intrins_name;
LLVMValueRef args [16], res;
LLVMTypeRef addr_type;
if (is_faulting && bb->region != -1) {
+#if LLVM_API_VERSION >= 4
+ LLVMAtomicOrdering ordering;
+
+ switch (barrier) {
+ case LLVM_BARRIER_NONE:
+ ordering = LLVMAtomicOrderingNotAtomic;
+ break;
+ case LLVM_BARRIER_ACQ:
+ ordering = LLVMAtomicOrderingAcquire;
+ break;
+ case LLVM_BARRIER_SEQ:
+ ordering = LLVMAtomicOrderingSequentiallyConsistent;
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+#endif
+
/*
* We handle loads which can fault by calling a mono specific intrinsic
* using an invoke, so they are handled properly inside try blocks.
args [0] = addr;
args [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
args [2] = LLVMConstInt (LLVMInt1Type (), TRUE, FALSE);
- res = emit_call (ctx, bb, builder_ref, LLVMGetNamedFunction (ctx->module, intrins_name), args, 3);
+#if LLVM_API_VERSION >= 4
+ args [3] = LLVMConstInt (LLVMInt32Type (), ordering, FALSE);
+#endif
+ res = emit_call (ctx, bb, builder_ref, LLVMGetNamedFunction (ctx->module, intrins_name), args, 3 + EXTRA_MONO_LOAD_STORE_ARGS);
if (addr_type == LLVMPointerType (LLVMDoubleType (), 0))
res = LLVMBuildBitCast (*builder_ref, res, LLVMDoubleType (), "");
* LLVM will generate invalid code when encountering a load from a
* NULL address.
*/
- res = mono_llvm_build_load (*builder_ref, addr, name, is_faulting);
+ res = mono_llvm_build_load (*builder_ref, addr, name, is_faulting, barrier);
/* Mark it with a custom metadata */
/*
}
}
+static LLVMValueRef
+emit_load (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef addr, const char *name, gboolean is_faulting)
+{
+ return emit_load_general (ctx, bb, builder_ref, size, addr, name, is_faulting, LLVM_BARRIER_NONE);
+}
+
static void
-emit_store (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, gboolean is_faulting)
+emit_store_general (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, gboolean is_faulting, BarrierKind barrier)
{
const char *intrins_name;
LLVMValueRef args [16];
if (is_faulting && bb->region != -1) {
+#if LLVM_API_VERSION >= 4
+ LLVMAtomicOrdering ordering;
+
+ switch (barrier) {
+ case LLVM_BARRIER_NONE:
+ ordering = LLVMAtomicOrderingNotAtomic;
+ break;
+ case LLVM_BARRIER_REL:
+ ordering = LLVMAtomicOrderingRelease;
+ break;
+ case LLVM_BARRIER_SEQ:
+ ordering = LLVMAtomicOrderingSequentiallyConsistent;
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+#endif
+
switch (size) {
case 1:
intrins_name = "llvm.mono.store.i8.p0i8";
args [1] = addr;
args [2] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
args [3] = LLVMConstInt (LLVMInt1Type (), TRUE, FALSE);
- emit_call (ctx, bb, builder_ref, LLVMGetNamedFunction (ctx->module, intrins_name), args, 4);
+#if LLVM_API_VERSION >= 4
+ args [4] = LLVMConstInt (LLVMInt32Type (), ordering, FALSE);
+#endif
+ emit_call (ctx, bb, builder_ref, LLVMGetNamedFunction (ctx->module, intrins_name), args, 4 + EXTRA_MONO_LOAD_STORE_ARGS);
} else {
- LLVMBuildStore (*builder_ref, value, addr);
+ mono_llvm_build_store (*builder_ref, value, addr, is_faulting, barrier);
}
}
+static void
+emit_store (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, gboolean is_faulting)
+{
+ emit_store_general (ctx, bb, builder_ref, size, value, addr, is_faulting, LLVM_BARRIER_NONE);
+}
+
/*
* emit_cond_system_exception:
*
/* Treat these as normal values */
ctx->values [reg] = LLVMBuildLoad (builder, ctx->addresses [reg], "");
}
+ } else if (ainfo->storage == LLVMArgAsIArgs) {
+ LLVMValueRef arg = LLVMGetParam (ctx->lmethod, ctx->pindexes [i]);
+
+ ctx->addresses [reg] = build_alloca (ctx, sig->params [i]);
+
+ /* The argument is received as an array of ints, store it into the real argument */
+ LLVMBuildStore (ctx->builder, arg, convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMTypeOf (arg), 0)));
} else {
ctx->values [reg] = convert_full (ctx, ctx->values [reg], llvm_type_to_stack_type (type_to_llvm_type (ctx, sig->params [i])), type_is_unsigned (ctx, sig->params [i]));
}
*/
this_alloc = mono_llvm_build_alloca (builder, ThisType (), LLVMConstInt (LLVMInt32Type (), 1, FALSE), 0, "");
/* This volatile store will keep the alloca alive */
- mono_llvm_build_store (builder, ctx->values [cfg->args [0]->dreg], this_alloc, TRUE);
+ mono_llvm_build_store (builder, ctx->values [cfg->args [0]->dreg], this_alloc, TRUE, LLVM_BARRIER_NONE);
set_metadata_flag (this_alloc, "mono.this");
}
g_assert (ctx->addresses [cfg->rgctx_var->dreg]);
rgctx_alloc = ctx->addresses [cfg->rgctx_var->dreg];
/* This volatile store will keep the alloca alive */
- store = mono_llvm_build_store (builder, convert (ctx, ctx->rgctx_arg, IntPtrType ()), rgctx_alloc, TRUE);
+ store = mono_llvm_build_store (builder, convert (ctx, ctx->rgctx_arg, IntPtrType ()), rgctx_alloc, TRUE, LLVM_BARRIER_NONE);
set_metadata_flag (rgctx_alloc, "mono.this");
}
* their own calling convention on some platforms.
*/
#ifndef TARGET_AMD64
- if (abs_ji->type == MONO_PATCH_INFO_MONITOR_ENTER || abs_ji->type == MONO_PATCH_INFO_MONITOR_EXIT || abs_ji->type == MONO_PATCH_INFO_GENERIC_CLASS_INIT)
+ if (abs_ji->type == MONO_PATCH_INFO_MONITOR_ENTER || abs_ji->type == MONO_PATCH_INFO_MONITOR_ENTER_V4 ||
+ abs_ji->type == MONO_PATCH_INFO_MONITOR_EXIT || abs_ji->type == MONO_PATCH_INFO_GENERIC_CLASS_INIT)
LLVM_FAILURE (ctx, "trampoline with own cconv");
#endif
target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, abs_ji, FALSE);
if (!ctx->imt_rgctx_loc)
ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->lmodule->ptr_type, sizeof (gpointer));
LLVMBuildStore (builder, convert (ctx, ctx->values [call->rgctx_arg_reg], ctx->lmodule->ptr_type), ctx->imt_rgctx_loc);
- args [sinfo.rgctx_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE);
+ args [sinfo.rgctx_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE, LLVM_BARRIER_NONE);
#else
args [sinfo.rgctx_arg_pindex] = convert (ctx, values [call->rgctx_arg_reg], ctx->lmodule->ptr_type);
#endif
if (!ctx->imt_rgctx_loc)
ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->lmodule->ptr_type, sizeof (gpointer));
LLVMBuildStore (builder, convert (ctx, ctx->values [call->imt_arg_reg], ctx->lmodule->ptr_type), ctx->imt_rgctx_loc);
- args [sinfo.imt_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE);
+ args [sinfo.imt_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE, LLVM_BARRIER_NONE);
#else
args [sinfo.imt_arg_pindex] = convert (ctx, values [call->imt_arg_reg], ctx->lmodule->ptr_type);
#endif
} else if (ainfo->storage == LLVMArgVtypeByVal) {
g_assert (addresses [reg]);
args [pindex] = addresses [reg];
+ } else if (ainfo->storage == LLVMArgAsIArgs) {
+ g_assert (addresses [reg]);
+ args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMArrayType (IntPtrType (), ainfo->nslots), 0)), "");
} else {
g_assert (args [pindex]);
if (i == 0 && sig->hasthis)
LLVMValueRef method = ctx->lmethod;
LLVMValueRef *values = ctx->values;
LLVMValueRef *addresses = ctx->addresses;
- int i;
LLVMCallInfo *linfo = ctx->linfo;
LLVMModuleRef module = ctx->module;
BBInfo *bblocks = ctx->bblocks;
break;
}
case OP_MEMORY_BARRIER: {
- mono_llvm_build_fence (builder);
+ mono_llvm_build_fence (builder, (BarrierKind) ins->backend.memory_barrier_kind);
+ break;
+ }
+ case OP_ATOMIC_LOAD_I1:
+ case OP_ATOMIC_LOAD_I2:
+ case OP_ATOMIC_LOAD_I4:
+ case OP_ATOMIC_LOAD_I8:
+ case OP_ATOMIC_LOAD_U1:
+ case OP_ATOMIC_LOAD_U2:
+ case OP_ATOMIC_LOAD_U4:
+ case OP_ATOMIC_LOAD_U8:
+ case OP_ATOMIC_LOAD_R4:
+ case OP_ATOMIC_LOAD_R8: {
+#if LLVM_API_VERSION >= 4
+ int size;
+ gboolean sext, zext;
+ LLVMTypeRef t;
+ gboolean is_volatile = (ins->flags & MONO_INST_FAULT);
+ BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind;
+ LLVMValueRef index, addr;
+
+ t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
+
+ if (sext || zext)
+ dname = (char *)"";
+
+ if (ins->inst_offset != 0) {
+ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
+ addr = LLVMBuildGEP (builder, convert (ctx, lhs, LLVMPointerType (t, 0)), &index, 1, "");
+ } else {
+ addr = lhs;
+ }
+
+ addr = convert (ctx, addr, LLVMPointerType (t, 0));
+
+ values [ins->dreg] = emit_load_general (ctx, bb, &builder, size, addr, dname, is_volatile, barrier);
+
+ if (sext)
+ values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
+ else if (zext)
+ values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
+#else
+ LLVM_FAILURE (ctx, "atomic mono.load intrinsic");
+#endif
+ break;
+ }
+ case OP_ATOMIC_STORE_I1:
+ case OP_ATOMIC_STORE_I2:
+ case OP_ATOMIC_STORE_I4:
+ case OP_ATOMIC_STORE_I8:
+ case OP_ATOMIC_STORE_U1:
+ case OP_ATOMIC_STORE_U2:
+ case OP_ATOMIC_STORE_U4:
+ case OP_ATOMIC_STORE_U8:
+ case OP_ATOMIC_STORE_R4:
+ case OP_ATOMIC_STORE_R8: {
+#if LLVM_API_VERSION >= 4
+ int size;
+ gboolean sext, zext;
+ LLVMTypeRef t;
+ gboolean is_volatile = (ins->flags & MONO_INST_FAULT);
+ BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind;
+ LLVMValueRef index, addr, value;
+
+ if (!values [ins->inst_destbasereg])
+ LLVM_FAILURE (ctx, "inst_destbasereg");
+
+ t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
+
+ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
+ addr = LLVMBuildGEP (builder, convert (ctx, values [ins->inst_destbasereg], LLVMPointerType (t, 0)), &index, 1, "");
+ value = convert (ctx, values [ins->sreg1], t);
+
+ emit_store_general (ctx, bb, &builder, size, value, addr, is_volatile, barrier);
+ break;
+#else
+ LLVM_FAILURE (ctx, "atomic mono.store intrinsic");
+#endif
break;
}
case OP_RELAXED_NOP: {
case OP_EXPAND_R8: {
LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode);
LLVMValueRef mask [16], v;
+ int i;
for (i = 0; i < 16; ++i)
mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
}
case LLVMArgVtypeByVal:
case LLVMArgVtypeInReg:
+ case LLVMArgAsIArgs:
MONO_INST_NEW (cfg, ins, OP_LLVM_OUTARG_VT);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
arg_types [0] = LLVMPointerType (LLVMIntType (i * 8), 0);
arg_types [1] = LLVMInt32Type ();
arg_types [2] = LLVMInt1Type ();
+#if LLVM_API_VERSION >= 4
+ arg_types [3] = LLVMInt32Type ();
+#endif
sprintf (name, "llvm.mono.load.i%d.p0i%d", i * 8, i * 8);
- LLVMAddFunction (module, name, LLVMFunctionType (LLVMIntType (i * 8), arg_types, 3, FALSE));
+ LLVMAddFunction (module, name, LLVMFunctionType (LLVMIntType (i * 8), arg_types, 3 + EXTRA_MONO_LOAD_STORE_ARGS, FALSE));
arg_types [0] = LLVMIntType (i * 8);
arg_types [1] = LLVMPointerType (LLVMIntType (i * 8), 0);
arg_types [2] = LLVMInt32Type ();
arg_types [3] = LLVMInt1Type ();
+#if LLVM_API_VERSION >= 4
+ arg_types [4] = LLVMInt32Type ();
+#endif
sprintf (name, "llvm.mono.store.i%d.p0i%d", i * 8, i * 8);
- LLVMAddFunction (module, name, LLVMFunctionType (LLVMVoidType (), arg_types, 4, FALSE));
+ LLVMAddFunction (module, name, LLVMFunctionType (LLVMVoidType (), arg_types, 4 + EXTRA_MONO_LOAD_STORE_ARGS, FALSE));
}
}
}