LLVMValueRef got_var;
const char *got_symbol;
GHashTable *plt_entries;
+ char **bb_names;
+ GPtrArray *used;
+ LLVMTypeRef ptr_type;
} MonoLLVMModule;
/*
gboolean *is_dead;
gboolean *unreachable;
int *pindexes;
+ LLVMValueRef imt_rgctx_loc;
char temp_name [32];
} EmitContext;
#define IS_TARGET_X86 0
#endif
+#ifdef TARGET_AMD64
+#define IS_TARGET_AMD64 1
+#else
+#define IS_TARGET_AMD64 0
+#endif
+
#define LLVM_FAILURE(ctx, reason) do { \
TRACE_FAILURE (reason); \
(ctx)->cfg->exception_message = g_strdup (reason); \
return sizeof (gpointer) == 8 ? LLVMInt64Type () : LLVMInt32Type ();
}
+static LLVMTypeRef
+ObjRefType (void)
+{
+ return sizeof (gpointer) == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0);
+}
+
+static LLVMTypeRef
+ThisType (void)
+{
+ return sizeof (gpointer) == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0);
+}
+
/*
* get_vtype_size:
*
size = mono_class_value_size (mono_class_from_mono_type (t), NULL);
- while (size < sizeof (gpointer) && mono_is_power_of_two (size) == -1)
+ while (size < 2 * sizeof (gpointer) && mono_is_power_of_two (size) == -1)
size ++;
return size;
case MONO_TYPE_SZARRAY:
case MONO_TYPE_STRING:
case MONO_TYPE_PTR:
- return IntPtrType ();
+ return ObjRefType ();
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* Because of generic sharing */
- return IntPtrType ();
+ return ObjRefType ();
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t))
- return IntPtrType ();
+ return ObjRefType ();
/* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF: {
static LLVMBasicBlockRef
get_bb (EmitContext *ctx, MonoBasicBlock *bb)
{
- char bb_name [128];
+ char bb_name_buf [128];
+ char *bb_name;
if (ctx->bblocks [bb->block_num].bblock == NULL) {
if (bb->flags & BB_EXCEPTION_HANDLER) {
int clause_index = (mono_get_block_region_notry (ctx->cfg, bb->region) >> 8) - 1;
- sprintf (bb_name, "EH_CLAUSE%d_BB%d", clause_index, bb->block_num);
+ sprintf (bb_name_buf, "EH_CLAUSE%d_BB%d", clause_index, bb->block_num);
+ bb_name = bb_name_buf;
+ } else if (bb->block_num < 256) {
+ if (!ctx->lmodule->bb_names)
+ ctx->lmodule->bb_names = g_new0 (char*, 256);
+ if (!ctx->lmodule->bb_names [bb->block_num]) {
+ char *n;
+
+ n = g_strdup_printf ("BB%d", bb->block_num);
+ mono_memory_barrier ();
+ ctx->lmodule->bb_names [bb->block_num] = n;
+ }
+ bb_name = ctx->lmodule->bb_names [bb->block_num];
} else {
- sprintf (bb_name, "BB%d", bb->block_num);
+ sprintf (bb_name_buf, "BB%d", bb->block_num);
+ bb_name = bb_name_buf;
}
ctx->bblocks [bb->block_num].bblock = LLVMAppendBasicBlock (ctx->lmethod, bb_name);
if (cinfo && cinfo->rgctx_arg) {
if (sinfo)
sinfo->rgctx_arg_pindex = pindex;
- param_types [pindex] = IntPtrType ();
+ param_types [pindex] = ctx->lmodule->ptr_type;
pindex ++;
}
if (cinfo && cinfo->imt_arg) {
if (sinfo)
sinfo->imt_arg_pindex = pindex;
- param_types [pindex] = IntPtrType ();
+ param_types [pindex] = ctx->lmodule->ptr_type;
pindex ++;
}
if (vretaddr) {
if (sig->hasthis) {
if (sinfo)
sinfo->this_arg_pindex = pindex;
- param_types [pindex ++] = IntPtrType ();
+ param_types [pindex ++] = ThisType ();
}
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
{
LLVMValueRef md_arg;
int md_kind;
-
+
md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
md_arg = LLVMMDString ("mono", 4);
LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
}
+static void
+set_invariant_load_flag (LLVMValueRef v)
+{
+ LLVMValueRef md_arg;
+ int md_kind;
+ const char *flag_name;
+
+ // FIXME: Cache this
+ flag_name = "invariant.load";
+ md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
+ md_arg = LLVMMDString ("<index>", strlen ("<index>"));
+ LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
+}
+
/*
* emit_call:
*
throw_sig->ret = &mono_get_void_class ()->byval_arg;
throw_sig->params [0] = &mono_get_int32_class ()->byval_arg;
icall_name = "llvm_throw_corlib_exception_abs_trampoline";
- throw_sig->params [1] = &mono_get_intptr_class ()->byval_arg;
+ /* This will become i8* */
+ throw_sig->params [1] = &mono_get_byte_class ()->this_arg;
sig = sig_to_llvm_sig (ctx, throw_sig);
if (ctx->cfg->compile_aot) {
/*
* Differences between the LLVM/non-LLVM throw corlib exception trampoline:
* - On x86, LLVM generated code doesn't push the arguments
- * - When using the LLVM mono branch, the trampoline takes the throw address as an
- * arguments, not a pc offset.
+ * - The trampoline takes the throw address as an arguments, not a pc offset.
*/
LLVMAddGlobalMapping (ee, callee, resolve_patch (ctx->cfg, MONO_PATCH_INFO_INTERNAL_METHOD, icall_name));
}
ctx->lmodule->throw_corlib_exception = callee;
}
- if (IS_TARGET_X86)
+ if (IS_TARGET_X86 || IS_TARGET_AMD64)
args [0] = LLVMConstInt (LLVMInt32Type (), exc_class->type_token - MONO_TOKEN_TYPE_DEF, FALSE);
else
args [0] = LLVMConstInt (LLVMInt32Type (), exc_class->type_token, FALSE);
* The LLVM mono branch contains changes so a block address can be passed as an
* argument to a call.
*/
- args [1] = LLVMBuildPtrToInt (builder, LLVMBlockAddress (ctx->lmethod, ex_bb), IntPtrType (), "");
+ args [1] = LLVMBlockAddress (ctx->lmethod, ex_bb);
emit_call (ctx, bb, &builder, ctx->lmodule->throw_corlib_exception, args, 2);
LLVMBuildUnreachable (builder);
*nregs = pindex;
}
+static LLVMValueRef
+build_alloca_llvm_type (EmitContext *ctx, LLVMTypeRef t, int align)
+{
+ /*
+ * Have to place all alloca's at the end of the entry bb, since otherwise they would
+ * get executed every time control reaches them.
+ */
+ LLVMPositionBuilder (ctx->alloca_builder, get_bb (ctx, ctx->cfg->bb_entry), ctx->last_alloca);
+
+ ctx->last_alloca = mono_llvm_build_alloca (ctx->alloca_builder, t, NULL, align, "");
+ return ctx->last_alloca;
+}
+
static LLVMValueRef
build_alloca (EmitContext *ctx, MonoType *t)
{
while (mono_is_power_of_two (align) == -1)
align ++;
- /*
- * Have to place all alloca's at the end of the entry bb, since otherwise they would
- * get executed every time control reaches them.
- */
- LLVMPositionBuilder (ctx->alloca_builder, get_bb (ctx, ctx->cfg->bb_entry), ctx->last_alloca);
-
- ctx->last_alloca = mono_llvm_build_alloca (ctx->alloca_builder, type_to_llvm_type (ctx, t), NULL, align, "");
- return ctx->last_alloca;
+ return build_alloca_llvm_type (ctx, type_to_llvm_type (ctx, t), align);
}
/*
* Put the global into the 'llvm.used' array to prevent it from being optimized away.
*/
static void
-mark_as_used (LLVMModuleRef module, LLVMValueRef global)
+mark_as_used (MonoLLVMModule *lmodule, LLVMValueRef global)
+{
+ if (!lmodule->used)
+ lmodule->used = g_ptr_array_sized_new (16);
+ g_ptr_array_add (lmodule->used, global);
+}
+
+static void
+emit_llvm_used (MonoLLVMModule *lmodule)
{
+ LLVMModuleRef module = lmodule->module;
LLVMTypeRef used_type;
- LLVMValueRef used, used_elem;
+ LLVMValueRef used, *used_elem;
+ int i;
- used_type = LLVMArrayType (LLVMPointerType (LLVMInt8Type (), 0), 1);
+ if (!lmodule->used)
+ return;
+
+ used_type = LLVMArrayType (LLVMPointerType (LLVMInt8Type (), 0), lmodule->used->len);
used = LLVMAddGlobal (module, used_type, "llvm.used");
- used_elem = LLVMConstBitCast (global, LLVMPointerType (LLVMInt8Type (), 0));
- LLVMSetInitializer (used, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), &used_elem, 1));
+ used_elem = g_new0 (LLVMValueRef, lmodule->used->len);
+ for (i = 0; i < lmodule->used->len; ++i)
+ used_elem [i] = LLVMConstBitCast (g_ptr_array_index (lmodule->used, i), LLVMPointerType (LLVMInt8Type (), 0));
+ LLVMSetInitializer (used, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), used_elem, lmodule->used->len));
LLVMSetLinkage (used, LLVMAppendingLinkage);
LLVMSetSection (used, "llvm.metadata");
}
* with the "mono.this" custom metadata to tell llvm that it needs to save its
* location into the LSDA.
*/
- this_alloc = mono_llvm_build_alloca (builder, IntPtrType (), LLVMConstInt (LLVMInt32Type (), 1, FALSE), 0, "");
+ this_alloc = mono_llvm_build_alloca (builder, ThisType (), LLVMConstInt (LLVMInt32Type (), 1, FALSE), 0, "");
/* This volatile store will keep the alloca alive */
mono_llvm_build_store (builder, ctx->values [cfg->args [0]->dreg], this_alloc, TRUE);
g_assert (ctx->addresses [cfg->rgctx_var->dreg]);
rgctx_alloc = ctx->addresses [cfg->rgctx_var->dreg];
/* This volatile store will keep the alloca alive */
- store = mono_llvm_build_store (builder, ctx->rgctx_arg, rgctx_alloc, TRUE);
+ store = mono_llvm_build_store (builder, convert (ctx, ctx->rgctx_arg, IntPtrType ()), rgctx_alloc, TRUE);
set_metadata_flag (rgctx_alloc, "mono.this");
}
LLVMAddGlobalMapping (ee, callee, target);
}
}
+
+ if (call->method && strstr (call->method->klass->name, "AsyncVoidMethodBuilder"))
+ /* LLVM miscompiles async methods */
+ LLVM_FAILURE (ctx, "#13734");
} else if (calli) {
} else {
MonoJitICallInfo *info = mono_find_jit_icall_by_addr (call->fptr);
if (call->rgctx_arg_reg) {
g_assert (values [call->rgctx_arg_reg]);
g_assert (sinfo.rgctx_arg_pindex < nargs);
- args [sinfo.rgctx_arg_pindex] = values [call->rgctx_arg_reg];
+ /*
+ * On ARM, the imt/rgctx argument is passed in a caller save register, but some of our trampolines etc. clobber it, leading to
+ * problems is LLVM moves the arg assignment earlier. To work around this, save the argument into a stack slot and load
+ * it using a volatile load.
+ */
+#ifdef TARGET_ARM
+ if (!ctx->imt_rgctx_loc)
+ ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->lmodule->ptr_type, sizeof (gpointer));
+ LLVMBuildStore (builder, convert (ctx, ctx->values [call->rgctx_arg_reg], ctx->lmodule->ptr_type), ctx->imt_rgctx_loc);
+ args [sinfo.rgctx_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE);
+#else
+ args [sinfo.rgctx_arg_pindex] = convert (ctx, values [call->rgctx_arg_reg], ctx->lmodule->ptr_type);
+#endif
}
if (call->imt_arg_reg) {
g_assert (values [call->imt_arg_reg]);
g_assert (sinfo.imt_arg_pindex < nargs);
- args [sinfo.imt_arg_pindex] = values [call->imt_arg_reg];
+#ifdef TARGET_ARM
+ if (!ctx->imt_rgctx_loc)
+ ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->lmodule->ptr_type, sizeof (gpointer));
+ LLVMBuildStore (builder, convert (ctx, ctx->values [call->imt_arg_reg], ctx->lmodule->ptr_type), ctx->imt_rgctx_loc);
+ args [sinfo.imt_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE);
+#else
+ args [sinfo.imt_arg_pindex] = convert (ctx, values [call->imt_arg_reg], ctx->lmodule->ptr_type);
+#endif
}
if (vretaddr) {
} else {
g_assert (args [pindex]);
if (i == 0 && sig->hasthis)
- args [pindex] = convert (ctx, args [pindex], IntPtrType ());
+ args [pindex] = convert (ctx, args [pindex], ThisType ());
else
args [pindex] = convert (ctx, args [pindex], type_to_llvm_arg_type (ctx, sig->params [i - sig->hasthis]));
}
lcall = emit_call (ctx, bb, &builder, callee, args, LLVMCountParamTypes (llvm_sig));
-#ifdef LLVM_MONO_BRANCH
/*
* Modify cconv and parameter attributes to pass rgctx/imt correctly.
*/
LLVMAddInstrAttribute (lcall, 1 + sinfo.rgctx_arg_pindex, LLVMInRegAttribute);
if (call->imt_arg_reg)
LLVMAddInstrAttribute (lcall, 1 + sinfo.imt_arg_pindex, LLVMInRegAttribute);
-#endif
/* Add byval attributes if needed */
for (i = 0; i < sig->param_count; ++i) {
/* We use COMPARE+SETcc/Bcc, llvm uses SETcc+br cond */
if (ins->opcode == OP_FCOMPARE)
cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), "");
- else if (ins->opcode == OP_COMPARE_IMM)
- cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), "");
- else if (ins->opcode == OP_LCOMPARE_IMM) {
+ else if (ins->opcode == OP_COMPARE_IMM) {
+ if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && ins->inst_imm == 0)
+ cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, LLVMConstNull (LLVMTypeOf (lhs)), "");
+ else
+ cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), "");
+ } else if (ins->opcode == OP_LCOMPARE_IMM) {
if (SIZEOF_REGISTER == 4 && COMPILE_LLVM (cfg)) {
/* The immediate is encoded in two fields */
guint64 l = ((guint64)(guint32)ins->inst_offset << 32) | ((guint32)ins->inst_imm);
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, LLVMInt64Type ()), LLVMConstInt (LLVMInt64Type (), ins->inst_imm, FALSE), "");
}
}
- else if (ins->opcode == OP_COMPARE)
- cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), convert (ctx, rhs, IntPtrType ()), "");
- else
+ else if (ins->opcode == OP_COMPARE) {
+ if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && LLVMTypeOf (lhs) == LLVMTypeOf (rhs))
+ cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
+ else
+ cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), convert (ctx, rhs, IntPtrType ()), "");
+ } else
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
if (MONO_IS_COND_BRANCH_OP (ins->next)) {
case OP_AND_IMM:
case OP_MUL_IMM:
case OP_SHL_IMM:
- case OP_SHR_IMM: {
+ case OP_SHR_IMM:
+ case OP_SHR_UN_IMM: {
LLVMValueRef imm;
if (spec [MONO_INST_SRC1] == 'l') {
values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname);
break;
case OP_LSHR_UN_IMM:
+ case OP_SHR_UN_IMM:
values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname);
break;
default:
values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, dname, is_volatile);
- if (!is_volatile && (ins->flags & MONO_INST_CONSTANT_LOAD)) {
+ if (!is_volatile && (ins->flags & MONO_INST_INVARIANT_LOAD)) {
/*
* These will signal LLVM that these loads do not alias any stores, and
* they can't fail, allowing them to be hoisted out of loops.
*/
- set_metadata_flag (values [ins->dreg], "mono.noalias");
+ set_invariant_load_flag (values [ins->dreg]);
set_metadata_flag (values [ins->dreg], "mono.nofail.load");
}
indexes [1] = LLVMConstInt (LLVMInt32Type (), (gssize)got_offset, FALSE);
got_entry_addr = LLVMBuildGEP (builder, ctx->lmodule->got_var, indexes, 2, "");
- // FIXME: This doesn't work right now, because it must be
- // paired with an invariant.end, and even then, its only in effect
- // inside its basic block
-#if 0
- {
- LLVMValueRef args [3];
- LLVMValueRef ptr, val;
-
- ptr = LLVMBuildBitCast (builder, got_entry_addr, LLVMPointerType (LLVMInt8Type (), 0), "ptr");
-
- args [0] = LLVMConstInt (LLVMInt64Type (), sizeof (gpointer), FALSE);
- args [1] = ptr;
- val = LLVMBuildCall (builder, LLVMGetNamedFunction (module, "llvm.invariant.start"), args, 2, "");
- }
-#endif
-
values [ins->dreg] = LLVMBuildLoad (builder, got_entry_addr, dname);
+ set_invariant_load_flag (values [ins->dreg]);
break;
}
case OP_NOT_REACHED:
values [ins->dreg] = LLVMBuildSelect (builder, v, lhs, rhs, dname);
break;
}
- case OP_ATOMIC_EXCHANGE_I4: {
- LLVMValueRef args [2];
-
- g_assert (ins->inst_offset == 0);
-
- args [0] = convert (ctx, lhs, LLVMPointerType (LLVMInt32Type (), 0));
- args [1] = rhs;
-
- values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_XCHG, args [0], args [1]);
- break;
- }
+ case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8: {
LLVMValueRef args [2];
+ LLVMTypeRef t;
+
+ if (ins->opcode == OP_ATOMIC_EXCHANGE_I4)
+ t = LLVMInt32Type ();
+ else
+ t = LLVMInt64Type ();
g_assert (ins->inst_offset == 0);
- args [0] = convert (ctx, lhs, LLVMPointerType (LLVMInt64Type (), 0));
- args [1] = convert (ctx, rhs, LLVMInt64Type ());
- values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_XCHG, args [0], args [1]);
- break;
- }
- case OP_ATOMIC_ADD_NEW_I4: {
- LLVMValueRef args [2];
-
- g_assert (ins->inst_offset == 0);
+ args [0] = convert (ctx, lhs, LLVMPointerType (t, 0));
+ args [1] = convert (ctx, rhs, t);
- args [0] = convert (ctx, lhs, LLVMPointerType (LLVMInt32Type (), 0));
- args [1] = rhs;
- values [ins->dreg] = LLVMBuildAdd (builder, mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_ADD, args [0], args [1]), args [1], dname);
+ values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_XCHG, args [0], args [1]);
break;
}
+ case OP_ATOMIC_ADD_NEW_I4:
case OP_ATOMIC_ADD_NEW_I8: {
LLVMValueRef args [2];
+ LLVMTypeRef t;
+
+ if (ins->opcode == OP_ATOMIC_ADD_NEW_I4)
+ t = LLVMInt32Type ();
+ else
+ t = LLVMInt64Type ();
g_assert (ins->inst_offset == 0);
- args [0] = convert (ctx, lhs, LLVMPointerType (LLVMInt64Type (), 0));
- args [1] = convert (ctx, rhs, LLVMInt64Type ());
+ args [0] = convert (ctx, lhs, LLVMPointerType (t, 0));
+ args [1] = convert (ctx, rhs, t);
values [ins->dreg] = LLVMBuildAdd (builder, mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_ADD, args [0], args [1]), args [1], dname);
break;
}
LLVMValueRef args [3];
LLVMTypeRef t;
- if (ins->opcode == OP_ATOMIC_CAS_I4) {
+ if (ins->opcode == OP_ATOMIC_CAS_I4)
t = LLVMInt32Type ();
- } else {
+ else
t = LLVMInt64Type ();
- }
args [0] = convert (ctx, lhs, LLVMPointerType (t, 0));
/* comparand */
// 256 == GS segment register
LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256);
#endif
-
// FIXME: XEN
values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), ins->inst_offset, TRUE), ptrtype, ""), "");
+#elif defined(TARGET_AMD64) && defined(TARGET_OSX)
+ /* See mono_amd64_emit_tls_get () */
+ int offset = mono_amd64_get_tls_gs_offset () + (ins->inst_offset * 8);
+
+ // 256 == GS segment register
+ LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256);
+ values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), offset, TRUE), ptrtype, ""), "");
#else
LLVM_FAILURE (ctx, "opcode tls-get");
#endif
break;
}
+ case OP_TLS_GET_REG: {
+#if defined(TARGET_AMD64) && defined(TARGET_OSX)
+ /* See emit_tls_get_reg () */
+ // 256 == GS segment register
+ LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256);
+ values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, convert (ctx, lhs, LLVMInt32Type ()), ptrtype, ""), "");
+#else
+ LLVM_FAILURE (ctx, "opcode tls-get");
+#endif
+ break;
+ }
/*
* Overflow opcodes.
switch (ins->opcode) {
case OP_STOREV_MEMBASE:
- if (cfg->gen_write_barriers && klass->has_references && ins->inst_destbasereg != cfg->frame_reg) {
- /* FIXME: Emit write barriers like in mini_emit_stobj () */
- LLVM_FAILURE (ctx, "storev_membase + write barriers");
+ if (cfg->gen_write_barriers && klass->has_references && ins->inst_destbasereg != cfg->frame_reg &&
+ LLVMGetInstructionOpcode (values [ins->inst_destbasereg]) != LLVMAlloca) {
+ /* Decomposed earlier */
+ g_assert_not_reached ();
break;
}
if (!addresses [ins->sreg1]) {
#if 1
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
-
+
if (i > 0 && clause->try_offset <= header->clauses [i - 1].handler_offset + header->clauses [i - 1].handler_len) {
/*
* FIXME: Some tests still fail with nested clauses.
ctx->addresses = g_new0 (LLVMValueRef, cfg->next_vreg);
ctx->vreg_types = g_new0 (LLVMTypeRef, cfg->next_vreg);
ctx->vreg_cli_types = g_new0 (MonoType*, cfg->next_vreg);
- phi_values = g_ptr_array_new ();
+ phi_values = g_ptr_array_sized_new (256);
/*
* This signals whenever the vreg was defined by a phi node with no input vars
* (i.e. all its input bblocks end with NOT_REACHABLE).
/* Whenever the bblock is unreachable */
ctx->unreachable = g_new0 (gboolean, cfg->max_block_num);
- bblock_list = g_ptr_array_new ();
+ bblock_list = g_ptr_array_sized_new (256);
ctx->values = values;
ctx->region_to_handler = g_hash_table_new (NULL, NULL);
static int count = 0;
count ++;
- if (getenv ("LLVM_COUNT")) {
- if (count == atoi (getenv ("LLVM_COUNT"))) {
+ if (g_getenv ("LLVM_COUNT")) {
+ if (count == atoi (g_getenv ("LLVM_COUNT"))) {
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
fflush (stdout);
last = TRUE;
}
- if (count > atoi (getenv ("LLVM_COUNT")))
+ if (count > atoi (g_getenv ("LLVM_COUNT")))
LLVM_FAILURE (ctx, "");
}
}
method = LLVMAddFunction (module, method_name, method_type);
ctx->lmethod = method;
-#ifdef LLVM_MONO_BRANCH
LLVMSetFunctionCallConv (method, LLVMMono1CallConv);
-#endif
LLVMSetLinkage (method, LLVMPrivateLinkage);
LLVMAddFunctionAttr (method, LLVMUWTable);
}
g_free (names);
- if (cfg->compile_aot) {
- LLVMValueRef md_args [16];
- LLVMValueRef md_node;
- int method_index;
-
- method_index = mono_aot_get_method_index (cfg->orig_method);
- md_args [0] = LLVMMDString (method_name, strlen (method_name));
- md_args [1] = LLVMConstInt (LLVMInt32Type (), method_index, FALSE);
- md_node = LLVMMDNode (md_args, 2);
- LLVMAddNamedMetadataOperand (module, "mono.function_indexes", md_node);
- //LLVMSetMetadata (method, md_kind, LLVMMDNode (&md_arg, 1));
- }
-
max_block_num = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
max_block_num = MAX (max_block_num, bb->block_num);
g_assert (LLVMTypeOf (ctx->addresses [sreg1]) == LLVMTypeOf (values [phi->dreg]));
LLVMAddIncoming (values [phi->dreg], &ctx->addresses [sreg1], &in_bb, 1);
} else {
+ if (LLVMTypeOf (values [sreg1]) != LLVMTypeOf (values [phi->dreg]))
+ // FIXME:
+ LLVM_FAILURE (ctx, "incoming phi arg type mismatch");
g_assert (LLVMTypeOf (values [sreg1]) == LLVMTypeOf (values [phi->dreg]));
LLVMAddIncoming (values [phi->dreg], &values [sreg1], &in_bb, 1);
}
if (cfg->verbose_level > 1)
mono_llvm_dump_value (method);
- mark_as_used (module, method);
+ if (cfg->compile_aot)
+ mark_as_used (ctx->lmodule, method);
+
+ if (cfg->compile_aot) {
+ LLVMValueRef md_args [16];
+ LLVMValueRef md_node;
+ int method_index;
+
+ method_index = mono_aot_get_method_index (cfg->orig_method);
+ md_args [0] = LLVMMDString (method_name, strlen (method_name));
+ md_args [1] = LLVMConstInt (LLVMInt32Type (), method_index, FALSE);
+ md_node = LLVMMDNode (md_args, 2);
+ LLVMAddNamedMetadataOperand (module, "mono.function_indexes", md_node);
+ //LLVMSetMetadata (method, md_kind, LLVMMDNode (&md_arg, 1));
+ }
if (cfg->compile_aot) {
/* Don't generate native code, keep the LLVM IR */
* with it.
*/
cfg->encoded_unwind_ops = mono_unwind_decode_fde ((guint8*)data, &cfg->encoded_unwind_ops_len, NULL, &ei, &ei_len, &type_info, &this_reg, &this_offset);
+ if (cfg->verbose_level > 1)
+ mono_print_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
/* Count nested clauses */
nested_len = 0;
LLVMAddFunction (module, "llvm.umul.with.overflow.i64", LLVMFunctionType (LLVMStructType (ovf_res_i64, 2, FALSE), ovf_params_i64, 2, FALSE));
}
- {
- LLVMTypeRef struct_ptr = LLVMPointerType (LLVMStructType (NULL, 0, FALSE), 0);
- LLVMTypeRef invariant_start_params [] = { LLVMInt64Type (), LLVMPointerType (LLVMInt8Type (), 0) };
- LLVMTypeRef invariant_end_params [] = { struct_ptr, LLVMInt64Type (), LLVMPointerType (LLVMInt8Type (), 0) };
-
- LLVMAddFunction (module, "llvm.invariant.start", LLVMFunctionType (struct_ptr, invariant_start_params, 2, FALSE));
-
- LLVMAddFunction (module, "llvm.invariant.end", LLVMFunctionType (LLVMVoidType (), invariant_end_params, 3, FALSE));
- }
-
/* EH intrinsics */
{
LLVMTypeRef arg_types [2];
}
/* SSE intrinsics */
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
{
LLVMTypeRef ret_type, arg_types [16];
}
AddFunc (module, "llvm.x86.sse2.pause", LLVMVoidType (), NULL, 0);
+#endif
/* Load/Store intrinsics */
{
}
}
+static void
+add_types (MonoLLVMModule *lmodule)
+{
+ lmodule->ptr_type = LLVMPointerType (sizeof (gpointer) == 8 ? LLVMInt64Type () : LLVMInt32Type (), 0);
+}
+
void
mono_llvm_init (void)
{
ee = mono_llvm_create_ee (LLVMCreateModuleProviderForExistingModule (jit_module.module), alloc_cb, emitted_cb, exception_cb, dlsym_cb);
add_intrinsics (jit_module.module);
+ add_types (&jit_module);
jit_module.llvm_types = g_hash_table_new (NULL, NULL);
aot_module.got_symbol = got_symbol;
add_intrinsics (aot_module.module);
+ add_types (&aot_module);
/* Add GOT */
/*
* its size is known in mono_llvm_emit_aot_module ().
*/
{
- LLVMTypeRef got_type = LLVMArrayType (IntPtrType (), 0);
+ LLVMTypeRef got_type = LLVMArrayType (aot_module.ptr_type, 0);
aot_module.got_var = LLVMAddGlobal (aot_module.module, got_type, "mono_dummy_got");
LLVMSetInitializer (aot_module.got_var, LLVMConstNull (got_type));
* Create the real got variable and replace all uses of the dummy variable with
* the real one.
*/
- got_type = LLVMArrayType (IntPtrType (), got_size);
+ got_type = LLVMArrayType (aot_module.ptr_type, got_size);
real_got = LLVMAddGlobal (aot_module.module, got_type, aot_module.got_symbol);
LLVMSetInitializer (real_got, LLVMConstNull (got_type));
LLVMSetLinkage (real_got, LLVMInternalLinkage);
mono_llvm_replace_uses_of (aot_module.got_var, real_got);
- mark_as_used (aot_module.module, real_got);
+ mark_as_used (&aot_module, real_got);
/* Delete the dummy got so it doesn't become a global */
LLVMDeleteGlobal (aot_module.got_var);
+ emit_llvm_used (&aot_module);
+
#if 0
{
char *verifier_err;