/*
* mini-llvm.c: llvm "Backend" for the mono JIT
*
- * (C) 2009 Novell, Inc.
+ * Copyright 2009-2011 Novell Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
*/
#include "mini.h"
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/mempool-internals.h>
+#include <mono/utils/mono-tls.h>
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
* needs to branch to in ENDFINALLY.
*/
GSList *call_handler_return_bbs;
+ /*
+ * If this bblock is the start of a finally clause, this is the bblock that
+ * CALL_HANDLER needs to branch to.
+ */
+ LLVMBasicBlockRef call_handler_target_bb;
/* The list of switch statements generated by ENDFINALLY instructions */
GSList *endfinally_switch_ins_list;
GSList *phi_nodes;
};
static LLVMExecutionEngineRef ee;
-static guint32 current_cfg_tls_id;
+static MonoNativeTlsKey current_cfg_tls_id;
static MonoLLVMModule jit_module, aot_module;
static gboolean jit_module_inited;
}
}
+/* Return the 128 bit SIMD type corresponding to the mono type TYPE */
+static inline G_GNUC_UNUSED LLVMTypeRef
+type_to_simd_type (int type)
+{
+ switch (type) {
+ case MONO_TYPE_I1:
+ return LLVMVectorType (LLVMInt8Type (), 16);
+ case MONO_TYPE_I2:
+ return LLVMVectorType (LLVMInt16Type (), 8);
+ case MONO_TYPE_I4:
+ return LLVMVectorType (LLVMInt32Type (), 4);
+ case MONO_TYPE_I8:
+ return LLVMVectorType (LLVMInt64Type (), 2);
+ case MONO_TYPE_R8:
+ return LLVMVectorType (LLVMDoubleType (), 2);
+ case MONO_TYPE_R4:
+ return LLVMVectorType (LLVMFloatType (), 4);
+ default:
+ g_assert_not_reached ();
+ return NULL;
+ }
+}
+
/*
* type_to_llvm_type:
*
if (!ltype) {
int i, size;
LLVMTypeRef *eltypes;
+ char *name;
size = get_vtype_size (t);
for (i = 0; i < size; ++i)
eltypes [i] = LLVMInt8Type ();
- ltype = LLVMStructType (eltypes, size, FALSE);
+ name = mono_type_full_name (&klass->byval_arg);
+ ltype = LLVMStructCreateNamed (LLVMGetGlobalContext (), name);
+ LLVMStructSetBody (ltype, eltypes, size, FALSE);
g_hash_table_insert (ctx->lmodule->llvm_types, klass, ltype);
g_free (eltypes);
}
case OP_MINPD:
return "llvm.x86.sse2.min.pd";
case OP_MINPS:
- return "llvm.x86.sse2.min.ps";
+ return "llvm.x86.sse.min.ps";
case OP_PMIND_UN:
return "llvm.x86.sse41.pminud";
case OP_PMINW_UN:
return "llvm.x86.sse41.pminuw";
case OP_PMINB_UN:
return "llvm.x86.sse2.pminu.b";
+ case OP_PMINW:
+ return "llvm.x86.sse2.pmins.w";
case OP_MAXPD:
return "llvm.x86.sse2.max.pd";
case OP_MAXPS:
- return "llvm.x86.sse2.max.ps";
+ return "llvm.x86.sse.max.ps";
+ case OP_HADDPD:
+ return "llvm.x86.sse3.hadd.pd";
+ case OP_HADDPS:
+ return "llvm.x86.sse3.hadd.ps";
+ case OP_HSUBPD:
+ return "llvm.x86.sse3.hsub.pd";
+ case OP_HSUBPS:
+ return "llvm.x86.sse3.hsub.ps";
case OP_PMAXD_UN:
return "llvm.x86.sse41.pmaxud";
case OP_PMAXW_UN:
return "llvm.x86.sse41.pmaxuw";
case OP_PMAXB_UN:
return "llvm.x86.sse2.pmaxu.b";
+ case OP_ADDSUBPS:
+ return "llvm.x86.sse3.addsub.ps";
+ case OP_ADDSUBPD:
+ return "llvm.x86.sse3.addsub.pd";
+ case OP_EXTRACT_MASK:
+ return "llvm.x86.sse2.pmovmskb.128";
+ case OP_PSHRW:
+ case OP_PSHRW_REG:
+ return "llvm.x86.sse2.psrli.w";
+ case OP_PSHRD:
+ case OP_PSHRD_REG:
+ return "llvm.x86.sse2.psrli.d";
+ case OP_PSHRQ:
+ case OP_PSHRQ_REG:
+ return "llvm.x86.sse2.psrli.q";
+ case OP_PSHLW:
+ case OP_PSHLW_REG:
+ return "llvm.x86.sse2.pslli.w";
+ case OP_PSHLD:
+ case OP_PSHLD_REG:
+ return "llvm.x86.sse2.pslli.d";
+ case OP_PSHLQ:
+ case OP_PSHLQ_REG:
+ return "llvm.x86.sse2.pslli.q";
+ case OP_PSARW:
+ case OP_PSARW_REG:
+ return "llvm.x86.sse2.psrai.w";
+ case OP_PSARD:
+ case OP_PSARD_REG:
+ return "llvm.x86.sse2.psrai.d";
+ case OP_PADDB_SAT:
+ return "llvm.x86.sse2.padds.b";
+ case OP_PADDW_SAT:
+ return "llvm.x86.sse2.padds.w";
+ case OP_PSUBB_SAT:
+ return "llvm.x86.sse2.psubs.b";
+ case OP_PSUBW_SAT:
+ return "llvm.x86.sse2.psubs.w";
+ case OP_PADDB_SAT_UN:
+ return "llvm.x86.sse2.paddus.b";
+ case OP_PADDW_SAT_UN:
+ return "llvm.x86.sse2.paddus.w";
+ case OP_PSUBB_SAT_UN:
+ return "llvm.x86.sse2.psubus.b";
+ case OP_PSUBW_SAT_UN:
+ return "llvm.x86.sse2.psubus.w";
+ case OP_PAVGB_UN:
+ return "llvm.x86.sse2.pavg.b";
+ case OP_PAVGW_UN:
+ return "llvm.x86.sse2.pavg.w";
+ case OP_SQRTPS:
+ return "llvm.x86.sse.sqrt.ps";
+ case OP_SQRTPD:
+ return "llvm.x86.sse2.sqrt.pd";
+ case OP_RSQRTPS:
+ return "llvm.x86.sse.rsqrt.ps";
+ case OP_RCPPS:
+ return "llvm.x86.sse.rcp.ps";
+ case OP_PCMPEQB:
+ return "llvm.x86.sse2.pcmpeq.b";
+ case OP_PCMPEQW:
+ return "llvm.x86.sse2.pcmpeq.w";
+ case OP_PCMPEQD:
+ return "llvm.x86.sse2.pcmpeq.d";
+ case OP_PCMPEQQ:
+ return "llvm.x86.sse41.pcmpeqq";
+ case OP_PCMPGTB:
+ return "llvm.x86.sse2.pcmpgt.b";
+ case OP_CVTDQ2PD:
+ return "llvm.x86.sse2.cvtdq2pd";
+ case OP_CVTDQ2PS:
+ return "llvm.x86.sse2.cvtdq2ps";
+ case OP_CVTPD2DQ:
+ return "llvm.x86.sse2.cvtpd2dq";
+ case OP_CVTPS2DQ:
+ return "llvm.x86.sse2.cvtps2dq";
+ case OP_CVTPD2PS:
+ return "llvm.x86.sse2.cvtpd2ps";
+ case OP_CVTPS2PD:
+ return "llvm.x86.sse2.cvtps2pd";
+ case OP_CVTTPD2DQ:
+ return "llvm.x86.sse2.cvttpd2dq";
+ case OP_CVTTPS2DQ:
+ return "llvm.x86.sse2.cvttps2dq";
+ case OP_COMPPS:
+ return "llvm.x86.sse.cmp.ps";
+ case OP_COMPPD:
+ return "llvm.x86.sse2.cmp.pd";
+ case OP_PACKW:
+ return "llvm.x86.sse2.packsswb.128";
+ case OP_PACKD:
+ return "llvm.x86.sse2.packssdw.128";
+ case OP_PACKW_UN:
+ return "llvm.x86.sse2.packuswb.128";
+ case OP_PACKD_UN:
+ return "llvm.x86.sse41.packusdw";
+ case OP_PMULW_HIGH:
+ return "llvm.x86.sse2.pmulh.w";
+ case OP_PMULW_HIGH_UN:
+ return "llvm.x86.sse2.pmulhu.w";
#endif
default:
g_assert_not_reached ();
}
}
+static LLVMTypeRef
+simd_op_to_llvm_type (int opcode)
+{
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
+ switch (opcode) {
+ case OP_EXTRACT_R8:
+ case OP_EXPAND_R8:
+ return type_to_simd_type (MONO_TYPE_R8);
+ case OP_EXTRACT_I8:
+ case OP_EXPAND_I8:
+ return type_to_simd_type (MONO_TYPE_I8);
+ case OP_EXTRACT_I4:
+ case OP_EXPAND_I4:
+ return type_to_simd_type (MONO_TYPE_I4);
+ case OP_EXTRACT_I2:
+ case OP_EXTRACT_U2:
+ case OP_EXTRACTX_U2:
+ case OP_EXPAND_I2:
+ return type_to_simd_type (MONO_TYPE_I2);
+ case OP_EXTRACT_I1:
+ case OP_EXTRACT_U1:
+ case OP_EXPAND_I1:
+ return type_to_simd_type (MONO_TYPE_I1);
+ case OP_EXPAND_R4:
+ return type_to_simd_type (MONO_TYPE_R4);
+ case OP_CVTDQ2PD:
+ case OP_CVTDQ2PS:
+ return type_to_simd_type (MONO_TYPE_I4);
+ case OP_CVTPD2DQ:
+ case OP_CVTPD2PS:
+ case OP_CVTTPD2DQ:
+ return type_to_simd_type (MONO_TYPE_R8);
+ case OP_CVTPS2DQ:
+ case OP_CVTPS2PD:
+ case OP_CVTTPS2DQ:
+ return type_to_simd_type (MONO_TYPE_R4);
+ case OP_EXTRACT_MASK:
+ return type_to_simd_type (MONO_TYPE_I1);
+ case OP_SQRTPS:
+ case OP_RSQRTPS:
+ case OP_RCPPS:
+ case OP_DUPPS_LOW:
+ case OP_DUPPS_HIGH:
+ return type_to_simd_type (MONO_TYPE_R4);
+ case OP_SQRTPD:
+ case OP_DUPPD:
+ return type_to_simd_type (MONO_TYPE_R8);
+ default:
+ g_assert_not_reached ();
+ return NULL;
+ }
+#else
+ return NULL;
+#endif
+}
+
/*
* get_bb:
*
return LLVMBuildTrunc (ctx->builder, v, dtype, "");
if (stype == LLVMInt32Type () && (dtype == LLVMInt16Type () || dtype == LLVMInt8Type ()))
return LLVMBuildTrunc (ctx->builder, v, dtype, "");
+ if (stype == LLVMInt16Type () && dtype == LLVMInt8Type ())
+ return LLVMBuildTrunc (ctx->builder, v, dtype, "");
if (stype == LLVMDoubleType () && dtype == LLVMFloatType ())
return LLVMBuildFPTrunc (ctx->builder, v, dtype, "");
return LLVMBuildBitCast (ctx->builder, LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), ""), dtype, "");
#endif
+ if (LLVMGetTypeKind (stype) == LLVMVectorTypeKind && LLVMGetTypeKind (dtype) == LLVMVectorTypeKind)
+ return LLVMBuildBitCast (ctx->builder, v, dtype, "");
+
LLVMDumpValue (v);
LLVMDumpValue (LLVMConstNull (dtype));
g_assert_not_reached ();
* Might have to zero extend since llvm doesn't have
* unsigned types.
*/
- if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2)
+ if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_CHAR || t->type == MONO_TYPE_BOOLEAN)
v = LLVMBuildZExt (ctx->builder, v, LLVMInt32Type (), "");
else if (t->type == MONO_TYPE_U8)
v = LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), "");
}
pindexes = g_new0 (int, sig->param_count);
- param_types = g_new0 (LLVMTypeRef, (sig->param_count * 2) + 2);
+ param_types = g_new0 (LLVMTypeRef, (sig->param_count * 2) + 3);
pindex = 0;
if (cinfo && cinfo->rgctx_arg) {
if (sinfo)
param_types [pindex] = IntPtrType ();
pindex ++;
}
- if (cinfo && cinfo->imt_arg) {
+ if (cinfo && cinfo->imt_arg && IS_LLVM_MONO_BRANCH) {
if (sinfo)
sinfo->imt_arg_pindex = pindex;
param_types [pindex] = IntPtrType ();
if (!callee_name)
return NULL;
+ if (ctx->cfg->compile_aot)
+ /* Add a patch so referenced wrappers can be compiled in full aot mode */
+ mono_add_patch_info (ctx->cfg, 0, type, data);
+
// FIXME: Locking
callee = g_hash_table_lookup (ctx->lmodule->plt_entries, callee_name);
if (!callee) {
return -1;
}
+static void
+set_metadata_flag (LLVMValueRef v, const char *flag_name)
+{
+ LLVMValueRef md_arg;
+ int md_kind;
+
+ if (!IS_LLVM_MONO_BRANCH)
+ return;
+
+ md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
+ md_arg = LLVMMDString ("mono", 4);
+ LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
+}
+
/*
* emit_call:
*
emit_load (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef addr, const char *name, gboolean is_faulting)
{
const char *intrins_name;
- LLVMValueRef args [16];
+ LLVMValueRef args [16], res;
+ LLVMTypeRef addr_type;
if (is_faulting && bb->region != -1 && IS_LLVM_MONO_BRANCH) {
/*
* We handle loads which can fault by calling a mono specific intrinsic
* using an invoke, so they are handled properly inside try blocks.
+ * We can't use this outside clauses, since LLVM optimizes intrinsics which
+ * are marked with IntrReadArgMem.
*/
switch (size) {
case 1:
g_assert_not_reached ();
}
+ addr_type = LLVMTypeOf (addr);
+ if (addr_type == LLVMPointerType (LLVMDoubleType (), 0) || addr_type == LLVMPointerType (LLVMFloatType (), 0))
+ addr = LLVMBuildBitCast (*builder_ref, addr, LLVMPointerType (LLVMIntType (size * 8), 0), "");
+
args [0] = addr;
args [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
args [2] = LLVMConstInt (LLVMInt1Type (), TRUE, FALSE);
- return emit_call (ctx, bb, builder_ref, LLVMGetNamedFunction (ctx->module, intrins_name), args, 3);
+ res = emit_call (ctx, bb, builder_ref, LLVMGetNamedFunction (ctx->module, intrins_name), args, 3);
+
+ if (addr_type == LLVMPointerType (LLVMDoubleType (), 0))
+ res = LLVMBuildBitCast (*builder_ref, res, LLVMDoubleType (), "");
+ else if (addr_type == LLVMPointerType (LLVMFloatType (), 0))
+ res = LLVMBuildBitCast (*builder_ref, res, LLVMFloatType (), "");
+
+ return res;
} else {
+ LLVMValueRef res;
+
/*
* We emit volatile loads for loads which can fault, because otherwise
* LLVM will generate invalid code when encountering a load from a
* NULL address.
*/
- return mono_llvm_build_load (*builder_ref, addr, name, is_faulting);
+ res = mono_llvm_build_load (*builder_ref, addr, name, is_faulting);
+
+ /* Mark it with a custom metadata */
+ /*
+ if (is_faulting)
+ set_metadata_flag (res, "mono.faulting.load");
+ */
+
+ return res;
}
}
const char *intrins_name;
LLVMValueRef args [16];
- if (is_faulting && bb->region != -1) {
+ if (is_faulting && bb->region != -1 && IS_LLVM_MONO_BRANCH) {
switch (size) {
case 1:
intrins_name = "llvm.mono.store.i8.p0i8";
g_assert_not_reached ();
}
+ if (LLVMTypeOf (value) == LLVMDoubleType () || LLVMTypeOf (value) == LLVMFloatType ()) {
+ value = LLVMBuildBitCast (*builder_ref, value, LLVMIntType (size * 8), "");
+ addr = LLVMBuildBitCast (*builder_ref, addr, LLVMPointerType (LLVMIntType (size * 8), 0), "");
+ }
+
args [0] = value;
args [1] = addr;
args [2] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
LLVMBuildCondBr (ctx->builder, cmp, ex_bb, noex_bb);
- exc_class = mono_class_from_name (mono_defaults.corlib, "System", exc_type);
+ exc_class = mono_class_from_name (mono_get_corlib (), "System", exc_type);
g_assert (exc_class);
/* Emit exception throwing code */
LLVMTypeRef sig;
const char *icall_name;
- MonoMethodSignature *throw_sig = mono_metadata_signature_alloc (mono_defaults.corlib, 2);
- throw_sig->ret = &mono_defaults.void_class->byval_arg;
- throw_sig->params [0] = &mono_defaults.int32_class->byval_arg;
+ MonoMethodSignature *throw_sig = mono_metadata_signature_alloc (mono_get_corlib (), 2);
+ throw_sig->ret = &mono_get_void_class ()->byval_arg;
+ throw_sig->params [0] = &mono_get_int32_class ()->byval_arg;
if (IS_LLVM_MONO_BRANCH) {
- icall_name = "mono_arch_llvm_throw_corlib_exception_abs";
- throw_sig->params [1] = &mono_defaults.int_class->byval_arg;
+ icall_name = "llvm_throw_corlib_exception_abs_trampoline";
+ throw_sig->params [1] = &mono_get_intptr_class ()->byval_arg;
} else {
- icall_name = "mono_arch_llvm_throw_corlib_exception";
- throw_sig->params [1] = &mono_defaults.int32_class->byval_arg;
+ icall_name = "llvm_throw_corlib_exception_trampoline";
+ throw_sig->params [1] = &mono_get_int32_class ()->byval_arg;
}
sig = sig_to_llvm_sig (ctx, throw_sig);
if (ctx->cfg->compile_aot) {
callee = get_plt_entry (ctx, sig, MONO_PATCH_INFO_INTERNAL_METHOD, icall_name);
} else {
- callee = LLVMAddFunction (ctx->module, "llvm_throw_corlib_exception", sig_to_llvm_sig (ctx, throw_sig));
+ callee = LLVMAddFunction (ctx->module, "llvm_throw_corlib_exception_trampoline", sig_to_llvm_sig (ctx, throw_sig));
/*
* Differences between the LLVM/non-LLVM throw corlib exception trampoline:
if (!MONO_TYPE_ISSTRUCT (sig->params [i]))
emit_volatile_store (ctx, cfg->args [i + sig->hasthis]->dreg);
- if (sig->hasthis && !cfg->rgctx_var) {
-#if LLVM_CHECK_VERSION (2, 8)
- LLVMValueRef this_alloc, md_arg;
- int md_kind;
+ if (sig->hasthis && !cfg->rgctx_var && cfg->generic_sharing_context) {
+ LLVMValueRef this_alloc;
/*
* The exception handling code needs the location where the this argument was
* with the "mono.this" custom metadata to tell llvm that it needs to save its
* location into the LSDA.
*/
- // FIXME: Do this for gshared only
this_alloc = mono_llvm_build_alloca (builder, IntPtrType (), LLVMConstInt (LLVMInt32Type (), 1, FALSE), 0, "");
/* This volatile store will keep the alloca alive */
mono_llvm_build_store (builder, ctx->values [cfg->args [0]->dreg], this_alloc, TRUE);
- md_kind = LLVMGetMDKindID ("mono.this", strlen ("mono.this"));
- md_arg = LLVMMDString ("this", 4);
- LLVMSetMetadata (this_alloc, md_kind, LLVMMDNode (&md_arg, 1));
-#endif
+ set_metadata_flag (this_alloc, "mono.this");
}
if (cfg->rgctx_var) {
-#if LLVM_CHECK_VERSION (2, 8)
- LLVMValueRef rgctx_alloc, store, md_arg;
- int md_kind;
+ LLVMValueRef rgctx_alloc, store;
/*
* We handle the rgctx arg similarly to the this pointer.
/* This volatile store will keep the alloca alive */
store = mono_llvm_build_store (builder, ctx->rgctx_arg, rgctx_alloc, TRUE);
- md_kind = LLVMGetMDKindID ("mono.this", strlen ("mono.this"));
- md_arg = LLVMMDString ("this", 4);
- LLVMSetMetadata (rgctx_alloc, md_kind, LLVMMDNode (&md_arg, 1));
-#endif
+ set_metadata_flag (rgctx_alloc, "mono.this");
}
/*
if (bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER))
g_hash_table_insert (ctx->region_to_handler, GUINT_TO_POINTER (mono_get_block_region_notry (cfg, bb->region)), bb);
if (bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER) && bb->in_scount == 0) {
- LLVMValueRef val = LLVMBuildAlloca (builder, LLVMInt32Type (), "");
+ char name [128];
+ LLVMValueRef val;
+
+ sprintf (name, "finally_ind_bb%d", bb->block_num);
+ val = LLVMBuildAlloca (builder, LLVMInt32Type (), name);
LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), val);
ctx->bblocks [bb->block_num].finally_ind = val;
+
+ /*
+ * Create a new bblock which CALL_HANDLER can branch to, because branching to the
+ * LLVM bblock containing the call to llvm.eh.selector causes problems for the
+ * LLVM optimizer passes.
+ */
+ sprintf (name, "BB_%d_CALL_HANDLER_TARGET", bb->block_num);
+ ctx->bblocks [bb->block_num].call_handler_target_bb = LLVMAppendBasicBlock (ctx->lmethod, name);
}
}
LLVMValueRef *args;
LLVMCallInfo *cinfo;
GSList *l;
- int i;
+ int i, len;
gboolean vretaddr;
LLVMTypeRef llvm_sig;
gpointer target;
if (call->rgctx_arg_reg && !IS_LLVM_MONO_BRANCH)
LLVM_FAILURE (ctx, "rgctx reg in call");
+ if (call->rgctx_reg && !IS_LLVM_MONO_BRANCH) {
+ /*
+ * It might be possible to support this by creating a static rgctx trampoline, but
+ * common_call_trampoline () would patch callsites to call the trampoline, which
+ * would be incorrect if the rgctx arg is computed dynamically.
+ */
+ LLVM_FAILURE (ctx, "rgctx reg");
+ }
+
cinfo = call->cinfo;
if (call->rgctx_arg_reg)
cinfo->rgctx_arg = TRUE;
MonoJumpInfo *abs_ji = g_hash_table_lookup (cfg->abs_patches, call->fptr);
if (abs_ji) {
/*
- * The monitor entry/exit trampolines might have
+ * FIXME: Some trampolines might have
* their own calling convention on some platforms.
*/
#ifndef TARGET_AMD64
- if (abs_ji->type == MONO_PATCH_INFO_MONITOR_ENTER || abs_ji->type == MONO_PATCH_INFO_MONITOR_EXIT)
- LLVM_FAILURE (ctx, "monitor enter/exit");
+ if (abs_ji->type == MONO_PATCH_INFO_MONITOR_ENTER || abs_ji->type == MONO_PATCH_INFO_MONITOR_EXIT || abs_ji->type == MONO_PATCH_INFO_GENERIC_CLASS_INIT)
+ LLVM_FAILURE (ctx, "trampoline with own cconv");
#endif
target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, abs_ji, FALSE);
LLVMAddGlobalMapping (ee, callee, target);
g_assert (ins->inst_offset % size == 0);
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
- // FIXME: mono_arch_get_vcall_slot () can't decode the code
- // generated by LLVM
- //LLVM_FAILURE (ctx, "virtual call");
-
/*
* When using the llvm mono branch, we can support IMT directly, otherwise
* we need to call a trampoline.
/*
* Collect and convert arguments
*/
- args = alloca (sizeof (LLVMValueRef) * ((sig->param_count * 2) + sig->hasthis + vretaddr + call->rgctx_reg));
+ len = sizeof (LLVMValueRef) * ((sig->param_count * 2) + sig->hasthis + vretaddr + call->rgctx_reg);
+ args = alloca (len);
+ memset (args, 0, len);
l = call->out_ireg_args;
if (IS_LLVM_MONO_BRANCH) {
- if (call->rgctx_arg_reg)
+ if (call->rgctx_arg_reg) {
+ g_assert (values [call->rgctx_arg_reg]);
args [sinfo.rgctx_arg_pindex] = values [call->rgctx_arg_reg];
- if (call->imt_arg_reg)
+ }
+ if (call->imt_arg_reg) {
+ g_assert (values [call->imt_arg_reg]);
args [sinfo.imt_arg_pindex] = values [call->imt_arg_reg];
+ }
}
if (vretaddr) {
/*
* Modify cconv and parameter attributes to pass rgctx/imt correctly.
*/
- if (call->rgctx_arg_reg)
+#if defined(MONO_ARCH_IMT_REG) && defined(MONO_ARCH_RGCTX_REG)
+ g_assert (MONO_ARCH_IMT_REG == MONO_ARCH_RGCTX_REG);
+#endif
+ /* The two can't be used together, so use only one LLVM calling conv to pass them */
+ g_assert (!(call->rgctx_arg_reg && call->imt_arg_reg));
+ if (!sig->pinvoke)
LLVMSetInstructionCallConv (lcall, LLVMMono1CallConv);
- else if (call->imt_arg_reg)
- LLVMSetInstructionCallConv (lcall, LLVMMono2CallConv);
if (call->rgctx_arg_reg)
LLVMAddInstrAttribute (lcall, 1 + sinfo.rgctx_arg_pindex, LLVMInRegAttribute);
if (bb->flags & BB_EXCEPTION_HANDLER) {
LLVMTypeRef i8ptr;
LLVMValueRef eh_selector, eh_exception, personality, args [4];
+ LLVMBasicBlockRef target_bb;
MonoInst *exvar;
static gint32 mapping_inited;
static int ti_generator;
/*
* LLVM asserts if llvm.eh.selector is called from a bblock which
* doesn't have an invoke pointing at it.
+ * Update: LLVM no longer asserts, but some tests in exceptions.exe now fail.
*/
LLVM_FAILURE (ctx, "handler without invokes");
}
* Enabling this causes llc to crash:
* http://llvm.org/bugs/show_bug.cgi?id=6102
*/
- LLVM_FAILURE (ctx, "aot+clauses");
+ //LLVM_FAILURE (ctx, "aot+clauses");
} else {
- /* exception_cb will decode this */
- ti = g_malloc (sizeof (gint32));
+ /*
+ * After the cfg mempool is freed, the type info will point to stale memory,
+ * but this is not a problem, since we decode it once in exception_cb during
+ * compilation.
+ */
+ ti = mono_mempool_alloc (cfg->mempool, sizeof (gint32));
*(gint32*)ti = clause_index;
type_info = LLVMAddGlobal (module, i8ptr, ti_name);
values [exvar->dreg] = LLVMBuildCall (builder, eh_exception, NULL, 0, "");
emit_volatile_store (ctx, exvar->dreg);
}
+
+ /* Start a new bblock which CALL_HANDLER can branch to */
+ target_bb = bblocks [bb->block_num].call_handler_target_bb;
+ if (target_bb) {
+ LLVMBuildBr (builder, target_bb);
+
+ ctx->builder = builder = create_builder (ctx);
+ LLVMPositionBuilderAtEnd (ctx->builder, target_bb);
+
+ ctx->bblocks [bb->block_num].end_bblock = target_bb;
+ }
}
has_terminator = FALSE;
case OP_COMPARE:
case OP_ICOMPARE_IMM:
case OP_LCOMPARE_IMM:
- case OP_COMPARE_IMM:
-#ifdef TARGET_AMD64
- case OP_AMD64_ICOMPARE_MEMBASE_REG:
- case OP_AMD64_ICOMPARE_MEMBASE_IMM:
-#endif
-#ifdef TARGET_X86
- case OP_X86_COMPARE_MEMBASE_REG:
- case OP_X86_COMPARE_MEMBASE_IMM:
-#endif
- {
- CompRelation rel;
- LLVMValueRef cmp;
-
- if (ins->next->opcode == OP_NOP)
- break;
-
- if (ins->next->opcode == OP_BR)
- /* The comparison result is not needed */
- continue;
-
- rel = mono_opcode_to_cond (ins->next->opcode);
-
- /* Used for implementing bound checks */
-#ifdef TARGET_AMD64
- if ((ins->opcode == OP_AMD64_ICOMPARE_MEMBASE_REG) || (ins->opcode == OP_AMD64_ICOMPARE_MEMBASE_IMM)) {
- int size = 4;
- LLVMValueRef index;
- LLVMTypeRef t;
-
- t = LLVMInt32Type ();
-
- g_assert (ins->inst_offset % size == 0);
- index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
-
- lhs = LLVMBuildLoad (builder, LLVMBuildGEP (builder, convert (ctx, values [ins->inst_basereg], LLVMPointerType (t, 0)), &index, 1, ""), "");
- }
- if (ins->opcode == OP_AMD64_ICOMPARE_MEMBASE_IMM) {
- lhs = convert (ctx, lhs, LLVMInt32Type ());
- rhs = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
- }
- if (ins->opcode == OP_AMD64_ICOMPARE_MEMBASE_REG)
- rhs = convert (ctx, rhs, LLVMInt32Type ());
-#endif
+ case OP_COMPARE_IMM: {
+ CompRelation rel;
+ LLVMValueRef cmp;
-#ifdef TARGET_X86
- if ((ins->opcode == OP_X86_COMPARE_MEMBASE_REG) || (ins->opcode == OP_X86_COMPARE_MEMBASE_IMM)) {
- int size = 4;
- LLVMValueRef index;
- LLVMTypeRef t;
+ if (ins->next->opcode == OP_NOP)
+ break;
- t = LLVMInt32Type ();
+ if (ins->next->opcode == OP_BR)
+ /* The comparison result is not needed */
+ continue;
- g_assert (ins->inst_offset % size == 0);
- index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
+ rel = mono_opcode_to_cond (ins->next->opcode);
- lhs = LLVMBuildLoad (builder, LLVMBuildGEP (builder, convert (ctx, values [ins->inst_basereg], LLVMPointerType (t, 0)), &index, 1, ""), "");
- }
- if (ins->opcode == OP_X86_COMPARE_MEMBASE_IMM) {
- lhs = convert (ctx, lhs, LLVMInt32Type ());
- rhs = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
- }
- if (ins->opcode == OP_X86_COMPARE_MEMBASE_REG)
- rhs = convert (ctx, rhs, LLVMInt32Type ());
-#endif
+ if (ins->opcode == OP_ICOMPARE_IMM) {
+ lhs = convert (ctx, lhs, LLVMInt32Type ());
+ rhs = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
+ }
+ if (ins->opcode == OP_LCOMPARE_IMM) {
+ lhs = convert (ctx, lhs, LLVMInt64Type ());
+ rhs = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE);
+ }
+ if (ins->opcode == OP_LCOMPARE) {
+ lhs = convert (ctx, lhs, LLVMInt64Type ());
+ rhs = convert (ctx, rhs, LLVMInt64Type ());
+ }
+ if (ins->opcode == OP_ICOMPARE) {
+ lhs = convert (ctx, lhs, LLVMInt32Type ());
+ rhs = convert (ctx, rhs, LLVMInt32Type ());
+ }
- if (ins->opcode == OP_ICOMPARE_IMM) {
- lhs = convert (ctx, lhs, LLVMInt32Type ());
- rhs = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
- }
- if (ins->opcode == OP_LCOMPARE_IMM) {
- lhs = convert (ctx, lhs, LLVMInt64Type ());
- rhs = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE);
- }
- if (ins->opcode == OP_LCOMPARE) {
- lhs = convert (ctx, lhs, LLVMInt64Type ());
- rhs = convert (ctx, rhs, LLVMInt64Type ());
- }
- if (ins->opcode == OP_ICOMPARE) {
- lhs = convert (ctx, lhs, LLVMInt32Type ());
- rhs = convert (ctx, rhs, LLVMInt32Type ());
- }
+ if (lhs && rhs) {
+ if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind)
+ rhs = convert (ctx, rhs, LLVMTypeOf (lhs));
+ else if (LLVMGetTypeKind (LLVMTypeOf (rhs)) == LLVMPointerTypeKind)
+ lhs = convert (ctx, lhs, LLVMTypeOf (rhs));
+ }
- if (lhs && rhs) {
- if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind)
- rhs = convert (ctx, rhs, LLVMTypeOf (lhs));
- else if (LLVMGetTypeKind (LLVMTypeOf (rhs)) == LLVMPointerTypeKind)
- lhs = convert (ctx, lhs, LLVMTypeOf (rhs));
+ /* We use COMPARE+SETcc/Bcc, llvm uses SETcc+br cond */
+ if (ins->opcode == OP_FCOMPARE)
+ cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), "");
+ else if (ins->opcode == OP_COMPARE_IMM)
+ cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), "");
+ else if (ins->opcode == OP_LCOMPARE_IMM) {
+ if (SIZEOF_REGISTER == 4 && COMPILE_LLVM (cfg)) {
+ /* The immediate is encoded in two fields */
+ guint64 l = ((guint64)(guint32)ins->inst_offset << 32) | ((guint32)ins->inst_imm);
+ cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, LLVMInt64Type ()), LLVMConstInt (LLVMInt64Type (), l, FALSE), "");
+ } else {
+ cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, LLVMInt64Type ()), LLVMConstInt (LLVMInt64Type (), ins->inst_imm, FALSE), "");
}
-
- /* We use COMPARE+SETcc/Bcc, llvm uses SETcc+br cond */
- if (ins->opcode == OP_FCOMPARE)
- cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), "");
- else if (ins->opcode == OP_COMPARE_IMM)
- cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), "");
- else if (ins->opcode == OP_COMPARE)
- cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), convert (ctx, rhs, IntPtrType ()), "");
- else
- cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
-
- if (MONO_IS_COND_BRANCH_OP (ins->next)) {
- LLVMBuildCondBr (builder, cmp, get_bb (ctx, ins->next->inst_true_bb), get_bb (ctx, ins->next->inst_false_bb));
- has_terminator = TRUE;
- } else if (MONO_IS_SETCC (ins->next)) {
- sprintf (dname_buf, "t%d", ins->next->dreg);
- dname = dname_buf;
- values [ins->next->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname);
-
- /* Add stores for volatile variables */
- emit_volatile_store (ctx, ins->next->dreg);
- } else if (MONO_IS_COND_EXC (ins->next)) {
- emit_cond_system_exception (ctx, bb, ins->next->inst_p1, cmp);
- CHECK_FAILURE (ctx);
- builder = ctx->builder;
+ }
+ else if (ins->opcode == OP_COMPARE)
+ cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), convert (ctx, rhs, IntPtrType ()), "");
+ else
+ cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
+
+ if (MONO_IS_COND_BRANCH_OP (ins->next)) {
+ if (ins->next->inst_true_bb == ins->next->inst_false_bb) {
+ /*
+ * If the target bb contains PHI instructions, LLVM requires
+ * two PHI entries for this bblock, while we only generate one.
+ * So convert this to an unconditional bblock. (bxc #171).
+ */
+ LLVMBuildBr (builder, get_bb (ctx, ins->next->inst_true_bb));
} else {
- LLVM_FAILURE (ctx, "next");
+ LLVMBuildCondBr (builder, cmp, get_bb (ctx, ins->next->inst_true_bb), get_bb (ctx, ins->next->inst_false_bb));
}
+ has_terminator = TRUE;
+ } else if (MONO_IS_SETCC (ins->next)) {
+ sprintf (dname_buf, "t%d", ins->next->dreg);
+ dname = dname_buf;
+ values [ins->next->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname);
- ins = ins->next;
- break;
+ /* Add stores for volatile variables */
+ emit_volatile_store (ctx, ins->next->dreg);
+ } else if (MONO_IS_COND_EXC (ins->next)) {
+ emit_cond_system_exception (ctx, bb, ins->next->inst_p1, cmp);
+ CHECK_FAILURE (ctx);
+ builder = ctx->builder;
+ } else {
+ LLVM_FAILURE (ctx, "next");
}
+
+ ins = ins->next;
+ break;
+ }
case OP_FCEQ:
case OP_FCLT:
case OP_FCLT_UN:
case OP_MOVE:
case OP_LMOVE:
case OP_XMOVE:
+ case OP_SETFRET:
g_assert (lhs);
values [ins->dreg] = lhs;
break;
case OP_LOCALLOC: {
LLVMValueRef v, size;
- size = LLVMBuildAnd (builder, LLVMBuildAdd (builder, lhs, LLVMConstInt (LLVMInt32Type (), MONO_ARCH_FRAME_ALIGNMENT - 1, FALSE), ""), LLVMConstInt (LLVMInt32Type (), ~ (MONO_ARCH_FRAME_ALIGNMENT - 1), FALSE), "");
+ size = LLVMBuildAnd (builder, LLVMBuildAdd (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), MONO_ARCH_FRAME_ALIGNMENT - 1, FALSE), ""), LLVMConstInt (LLVMInt32Type (), ~ (MONO_ARCH_FRAME_ALIGNMENT - 1), FALSE), "");
v = mono_llvm_build_alloca (builder, LLVMInt8Type (), size, MONO_ARCH_FRAME_ALIGNMENT, "");
case OP_LOADU4_MEM:
case OP_LOAD_MEM: {
int size = 8;
- LLVMValueRef index, addr;
+ LLVMValueRef base, index, addr;
LLVMTypeRef t;
gboolean sext = FALSE, zext = FALSE;
gboolean is_volatile = (ins->flags & MONO_INST_FAULT);
if ((ins->opcode == OP_LOADI8_MEM) || (ins->opcode == OP_LOAD_MEM) || (ins->opcode == OP_LOADI4_MEM) || (ins->opcode == OP_LOADU4_MEM) || (ins->opcode == OP_LOADU1_MEM) || (ins->opcode == OP_LOADU2_MEM)) {
addr = LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE);
- } else if (ins->inst_offset == 0) {
- addr = values [ins->inst_basereg];
- } else if (ins->inst_offset % size != 0) {
- /* Unaligned load */
- index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE);
- addr = LLVMBuildGEP (builder, convert (ctx, values [ins->inst_basereg], LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, "");
} else {
- index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
- addr = LLVMBuildGEP (builder, convert (ctx, values [ins->inst_basereg], LLVMPointerType (t, 0)), &index, 1, "");
+ /* _MEMBASE */
+ base = lhs;
+
+ if (ins->inst_offset == 0) {
+ addr = base;
+ } else if (ins->inst_offset % size != 0) {
+ /* Unaligned load */
+ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE);
+ addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, "");
+ } else {
+ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
+ addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, "");
+ }
}
addr = convert (ctx, addr, LLVMPointerType (t, 0));
values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, dname, is_volatile);
+ if (!is_volatile && (ins->flags & MONO_INST_CONSTANT_LOAD)) {
+ /*
+ * These will signal LLVM that these loads do not alias any stores, and
+ * they can't fail, allowing them to be hoisted out of loops.
+ */
+ set_metadata_flag (values [ins->dreg], "mono.noalias");
+ set_metadata_flag (values [ins->dreg], "mono.nofail.load");
+ }
+
if (sext)
values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
else if (zext)
gboolean sext = FALSE, zext = FALSE;
gboolean is_volatile = (ins->flags & MONO_INST_FAULT);
+ if (!values [ins->inst_destbasereg])
+ LLVM_FAILURE (ctx, "inst_destbasereg");
+
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
if (ins->inst_offset % size != 0) {
/* Might have instructions after this */
while (ins->next) {
MonoInst *next = ins->next;
+ /*
+ * FIXME: If later code uses the regs defined by these instructions,
+ * compilation will fail.
+ */
MONO_DELETE_INS (bb, next);
}
break;
values [ins->dreg] = LLVMBuildCall (builder, LLVMGetNamedFunction (module, "llvm.cos.f64"), args, 1, dname);
break;
}
- /* test_0_sqrt_nan fails with LLVM */
- /*
- case OP_SQRT: {
- LLVMValueRef args [1];
-
- args [0] = lhs;
- values [ins->dreg] = LLVMBuildCall (builder, LLVMGetNamedFunction (module, "llvm.sqrt.f64"), args, 1, dname);
- break;
- }
- */
+ case OP_SQRT: {
+ LLVMValueRef args [1];
+#if 0
+ /* This no longer seems to happen */
+ /*
+ * LLVM optimizes sqrt(nan) into undefined in
+ * lib/Analysis/ConstantFolding.cpp
+ * Also, sqrt(NegativeInfinity) is optimized into 0.
+ */
+ LLVM_FAILURE (ctx, "sqrt");
+#endif
+ args [0] = lhs;
+ values [ins->dreg] = LLVMBuildCall (builder, LLVMGetNamedFunction (module, "llvm.sqrt.f64"), args, 1, dname);
+ break;
+ }
case OP_ABS: {
LLVMValueRef args [1];
}
case OP_IMIN:
- case OP_LMIN: {
- LLVMValueRef v = LLVMBuildICmp (builder, LLVMIntSLE, lhs, rhs, "");
- values [ins->dreg] = LLVMBuildSelect (builder, v, lhs, rhs, dname);
- break;
- }
+ case OP_LMIN:
case OP_IMAX:
- case OP_LMAX: {
- LLVMValueRef v = LLVMBuildICmp (builder, LLVMIntSGE, lhs, rhs, "");
- values [ins->dreg] = LLVMBuildSelect (builder, v, lhs, rhs, dname);
- break;
- }
+ case OP_LMAX:
case OP_IMIN_UN:
- case OP_LMIN_UN: {
- LLVMValueRef v = LLVMBuildICmp (builder, LLVMIntULE, lhs, rhs, "");
- values [ins->dreg] = LLVMBuildSelect (builder, v, lhs, rhs, dname);
- break;
- }
+ case OP_LMIN_UN:
case OP_IMAX_UN:
case OP_LMAX_UN: {
- LLVMValueRef v = LLVMBuildICmp (builder, LLVMIntUGE, lhs, rhs, "");
+ LLVMValueRef v;
+
+ lhs = convert (ctx, lhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
+ rhs = convert (ctx, rhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
+
+ switch (ins->opcode) {
+ case OP_IMIN:
+ case OP_LMIN:
+ v = LLVMBuildICmp (builder, LLVMIntSLE, lhs, rhs, "");
+ break;
+ case OP_IMAX:
+ case OP_LMAX:
+ v = LLVMBuildICmp (builder, LLVMIntSGE, lhs, rhs, "");
+ break;
+ case OP_IMIN_UN:
+ case OP_LMIN_UN:
+ v = LLVMBuildICmp (builder, LLVMIntULE, lhs, rhs, "");
+ break;
+ case OP_IMAX_UN:
+ case OP_LMAX_UN:
+ v = LLVMBuildICmp (builder, LLVMIntUGE, lhs, rhs, "");
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
values [ins->dreg] = LLVMBuildSelect (builder, v, lhs, rhs, dname);
break;
}
case OP_MEMORY_BARRIER: {
LLVMValueRef args [5];
+#ifdef TARGET_ARM
+ /* Not yet supported by llc on arm */
+ LLVM_FAILURE (ctx, "memory-barrier+arm");
+#endif
+
for (i = 0; i < 5; ++i)
args [i] = LLVMConstInt (LLVMInt1Type (), TRUE, TRUE);
}
case OP_RELAXED_NOP: {
#if defined(TARGET_AMD64) || defined(TARGET_X86)
- /* No way to get LLVM to emit this */
- LLVM_FAILURE (ctx, "relaxed_nop");
+ if (IS_LLVM_MONO_BRANCH)
+ emit_call (ctx, bb, &builder, LLVMGetNamedFunction (ctx->module, "llvm.x86.sse2.pause"), NULL, 0);
+ else
+ /* No way to get LLVM to emit this */
+ LLVM_FAILURE (ctx, "relaxed_nop");
+ break;
#else
break;
#endif
switch (ins->opcode) {
case OP_STOREV_MEMBASE:
+ if (cfg->gen_write_barriers && klass->has_references && ins->inst_destbasereg != cfg->frame_reg) {
+ /* FIXME: Emit write barriers like in mini_emit_stobj () */
+ LLVM_FAILURE (ctx, "storev_membase + write barriers");
+ break;
+ }
if (!addresses [ins->sreg1]) {
/* SIMD */
g_assert (values [ins->sreg1]);
default:
g_assert_not_reached ();
}
+ CHECK_FAILURE (ctx);
if (done)
break;
values [ins->dreg] = mono_llvm_build_aligned_load (builder, src, "", FALSE, 1);
break;
}
+ case OP_STOREX_MEMBASE: {
+ LLVMTypeRef t = LLVMTypeOf (values [ins->sreg1]);
+ LLVMValueRef dest;
+
+ dest = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (t, 0));
+ mono_llvm_build_aligned_store (builder, values [ins->sreg1], dest, FALSE, 1);
+ break;
+ }
case OP_PADDB:
case OP_PADDW:
case OP_PADDD:
case OP_PXOR:
values [ins->dreg] = LLVMBuildXor (builder, lhs, rhs, "");
break;
+ case OP_PMULW:
+ case OP_PMULD:
+ values [ins->dreg] = LLVMBuildMul (builder, lhs, rhs, "");
+ break;
case OP_ANDPS:
case OP_ANDNPS:
case OP_ORPS:
case OP_MINPS:
case OP_MAXPD:
case OP_MAXPS:
+ case OP_ADDSUBPD:
+ case OP_ADDSUBPS:
case OP_PMIND_UN:
case OP_PMINW_UN:
case OP_PMINB_UN:
+ case OP_PMINW:
case OP_PMAXD_UN:
case OP_PMAXW_UN:
- case OP_PMAXB_UN: {
+ case OP_PMAXB_UN:
+ case OP_HADDPD:
+ case OP_HADDPS:
+ case OP_HSUBPD:
+ case OP_HSUBPS:
+ case OP_PADDB_SAT:
+ case OP_PADDW_SAT:
+ case OP_PSUBB_SAT:
+ case OP_PSUBW_SAT:
+ case OP_PADDB_SAT_UN:
+ case OP_PADDW_SAT_UN:
+ case OP_PSUBB_SAT_UN:
+ case OP_PSUBW_SAT_UN:
+ case OP_PAVGB_UN:
+ case OP_PAVGW_UN:
+ case OP_PCMPEQB:
+ case OP_PCMPEQW:
+ case OP_PCMPEQD:
+ case OP_PCMPEQQ:
+ case OP_PCMPGTB:
+ case OP_PACKW:
+ case OP_PACKD:
+ case OP_PACKW_UN:
+ case OP_PACKD_UN:
+ case OP_PMULW_HIGH:
+ case OP_PMULW_HIGH_UN: {
LLVMValueRef args [2];
args [0] = lhs;
case OP_EXTRACT_I4:
case OP_EXTRACT_I2:
case OP_EXTRACT_U2:
+ case OP_EXTRACTX_U2:
case OP_EXTRACT_I1:
case OP_EXTRACT_U1: {
LLVMTypeRef t;
gboolean zext = FALSE;
+ t = simd_op_to_llvm_type (ins->opcode);
+
switch (ins->opcode) {
case OP_EXTRACT_R8:
- t = LLVMVectorType (LLVMDoubleType (), 2);
- break;
case OP_EXTRACT_I8:
- t = LLVMVectorType (LLVMInt64Type (), 2);
- break;
case OP_EXTRACT_I4:
- t = LLVMVectorType (LLVMInt32Type (), 4);
- break;
case OP_EXTRACT_I2:
- t = LLVMVectorType (LLVMInt16Type (), 8);
- break;
- case OP_EXTRACT_U2:
- t = LLVMVectorType (LLVMInt16Type (), 8);
- zext = TRUE;
- break;
case OP_EXTRACT_I1:
- t = LLVMVectorType (LLVMInt8Type (), 16);
break;
+ case OP_EXTRACT_U2:
+ case OP_EXTRACTX_U2:
case OP_EXTRACT_U1:
- t = LLVMVectorType (LLVMInt8Type (), 16);
zext = TRUE;
break;
default:
values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), "");
break;
}
-#endif
+
+ case OP_EXPAND_I1:
+ case OP_EXPAND_I2:
+ case OP_EXPAND_I4:
+ case OP_EXPAND_I8:
+ case OP_EXPAND_R4:
+ case OP_EXPAND_R8: {
+ LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode);
+ LLVMValueRef mask [16], v;
+
+ for (i = 0; i < 16; ++i)
+ mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
+
+ v = convert (ctx, values [ins->sreg1], LLVMGetElementType (t));
+
+ values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (t), v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
+ values [ins->dreg] = LLVMBuildShuffleVector (builder, values [ins->dreg], LLVMGetUndef (t), LLVMConstVector (mask, LLVMGetVectorSize (t)), "");
+ break;
+ }
+
+ case OP_INSERT_I1:
+ values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt8Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
+ break;
+ case OP_INSERT_I2:
+ values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt16Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
+ break;
+ case OP_INSERT_I4:
+ values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
+ break;
+ case OP_INSERT_I8:
+ values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt64Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
+ break;
+ case OP_INSERT_R4:
+ values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMFloatType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
+ break;
+ case OP_INSERT_R8:
+ values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMDoubleType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
+ break;
+
+ case OP_CVTDQ2PD:
+ case OP_CVTDQ2PS:
+ case OP_CVTPD2DQ:
+ case OP_CVTPS2DQ:
+ case OP_CVTPD2PS:
+ case OP_CVTPS2PD:
+ case OP_CVTTPD2DQ:
+ case OP_CVTTPS2DQ:
+ case OP_EXTRACT_MASK:
+ case OP_SQRTPS:
+ case OP_SQRTPD:
+ case OP_RSQRTPS:
+ case OP_RCPPS: {
+ LLVMValueRef v;
+
+ v = convert (ctx, values [ins->sreg1], simd_op_to_llvm_type (ins->opcode));
+
+ values [ins->dreg] = LLVMBuildCall (builder, LLVMGetNamedFunction (module, simd_op_to_intrins (ins->opcode)), &v, 1, dname);
+ break;
+ }
+
+ case OP_ICONV_TO_R8_RAW:
+ /* Same as OP_ICONV_TO_R8 */
+ values [ins->dreg] = convert (ctx, LLVMBuildBitCast (builder, lhs, LLVMFloatType (), ""), LLVMDoubleType ());
+ break;
+
+ case OP_COMPPS:
+ case OP_COMPPD: {
+ LLVMValueRef args [3];
+
+ args [0] = lhs;
+ args [1] = rhs;
+ args [2] = LLVMConstInt (LLVMInt8Type (), ins->inst_c0, FALSE);
+
+ values [ins->dreg] = LLVMBuildCall (builder, LLVMGetNamedFunction (module, simd_op_to_intrins (ins->opcode)), args, 3, dname);
+ break;
+ }
+
+ case OP_ICONV_TO_X:
+ /* This is only used for implementing shifts by non-immediate */
+ values [ins->dreg] = lhs;
+ break;
+
+ case OP_PSHRW:
+ case OP_PSHRD:
+ case OP_PSHRQ:
+ case OP_PSARW:
+ case OP_PSARD:
+ case OP_PSHLW:
+ case OP_PSHLD:
+ case OP_PSHLQ: {
+ LLVMValueRef args [3];
+
+ args [0] = lhs;
+ args [1] = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
+
+ values [ins->dreg] = LLVMBuildCall (builder, LLVMGetNamedFunction (module, simd_op_to_intrins (ins->opcode)), args, 2, dname);
+ break;
+ }
+
+ case OP_PSHRW_REG:
+ case OP_PSHRD_REG:
+ case OP_PSHRQ_REG:
+ case OP_PSARW_REG:
+ case OP_PSARD_REG:
+ case OP_PSHLW_REG:
+ case OP_PSHLD_REG:
+ case OP_PSHLQ_REG: {
+ LLVMValueRef args [3];
+
+ args [0] = lhs;
+ args [1] = values [ins->sreg2];
+
+ values [ins->dreg] = LLVMBuildCall (builder, LLVMGetNamedFunction (module, simd_op_to_intrins (ins->opcode)), args, 2, dname);
+ break;
+ }
+
+ case OP_SHUFPS:
+ case OP_SHUFPD:
+ case OP_PSHUFLED:
+ case OP_PSHUFLEW_LOW:
+ case OP_PSHUFLEW_HIGH: {
+ int mask [16];
+ LLVMValueRef v1 = NULL, v2 = NULL, mask_values [4];
+ int i, mask_size = 0;
+ int imask = ins->inst_c0;
+
+ /* Convert the x86 shuffle mask to LLVM's */
+ switch (ins->opcode) {
+ case OP_SHUFPS:
+ mask_size = 4;
+ mask [0] = ((imask >> 0) & 3);
+ mask [1] = ((imask >> 2) & 3);
+ mask [2] = ((imask >> 4) & 3) + 4;
+ mask [3] = ((imask >> 6) & 3) + 4;
+ v1 = values [ins->sreg1];
+ v2 = values [ins->sreg2];
+ break;
+ case OP_SHUFPD:
+ mask_size = 2;
+ mask [0] = ((imask >> 0) & 1);
+ mask [1] = ((imask >> 1) & 1) + 2;
+ v1 = values [ins->sreg1];
+ v2 = values [ins->sreg2];
+ break;
+ case OP_PSHUFLEW_LOW:
+ mask_size = 8;
+ mask [0] = ((imask >> 0) & 3);
+ mask [1] = ((imask >> 2) & 3);
+ mask [2] = ((imask >> 4) & 3);
+ mask [3] = ((imask >> 6) & 3);
+ mask [4] = 4 + 0;
+ mask [5] = 4 + 1;
+ mask [6] = 4 + 2;
+ mask [7] = 4 + 3;
+ v1 = values [ins->sreg1];
+ v2 = LLVMGetUndef (LLVMTypeOf (v1));
+ break;
+ case OP_PSHUFLEW_HIGH:
+ mask_size = 8;
+ mask [0] = 0;
+ mask [1] = 1;
+ mask [2] = 2;
+ mask [3] = 3;
+ mask [4] = 4 + ((imask >> 0) & 3);
+ mask [5] = 4 + ((imask >> 2) & 3);
+ mask [6] = 4 + ((imask >> 4) & 3);
+ mask [7] = 4 + ((imask >> 6) & 3);
+ v1 = values [ins->sreg1];
+ v2 = LLVMGetUndef (LLVMTypeOf (v1));
+ break;
+ case OP_PSHUFLED:
+ mask_size = 4;
+ mask [0] = ((imask >> 0) & 3);
+ mask [1] = ((imask >> 2) & 3);
+ mask [2] = ((imask >> 4) & 3);
+ mask [3] = ((imask >> 6) & 3);
+ v1 = values [ins->sreg1];
+ v2 = LLVMGetUndef (LLVMTypeOf (v1));
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ for (i = 0; i < mask_size; ++i)
+ mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE);
+
+ values [ins->dreg] =
+ LLVMBuildShuffleVector (builder, v1, v2,
+ LLVMConstVector (mask_values, mask_size), dname);
+ break;
+ }
+
+ case OP_UNPACK_LOWB:
+ case OP_UNPACK_LOWW:
+ case OP_UNPACK_LOWD:
+ case OP_UNPACK_LOWQ:
+ case OP_UNPACK_LOWPS:
+ case OP_UNPACK_LOWPD:
+ case OP_UNPACK_HIGHB:
+ case OP_UNPACK_HIGHW:
+ case OP_UNPACK_HIGHD:
+ case OP_UNPACK_HIGHQ:
+ case OP_UNPACK_HIGHPS:
+ case OP_UNPACK_HIGHPD: {
+ int mask [16];
+ LLVMValueRef mask_values [16];
+ int i, mask_size = 0;
+ gboolean low = FALSE;
+
+ switch (ins->opcode) {
+ case OP_UNPACK_LOWB:
+ mask_size = 16;
+ low = TRUE;
+ break;
+ case OP_UNPACK_LOWW:
+ mask_size = 8;
+ low = TRUE;
+ break;
+ case OP_UNPACK_LOWD:
+ case OP_UNPACK_LOWPS:
+ mask_size = 4;
+ low = TRUE;
+ break;
+ case OP_UNPACK_LOWQ:
+ case OP_UNPACK_LOWPD:
+ mask_size = 2;
+ low = TRUE;
+ break;
+ case OP_UNPACK_HIGHB:
+ mask_size = 16;
+ break;
+ case OP_UNPACK_HIGHW:
+ mask_size = 8;
+ break;
+ case OP_UNPACK_HIGHD:
+ case OP_UNPACK_HIGHPS:
+ mask_size = 4;
+ break;
+ case OP_UNPACK_HIGHQ:
+ case OP_UNPACK_HIGHPD:
+ mask_size = 2;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ if (low) {
+ for (i = 0; i < (mask_size / 2); ++i) {
+ mask [(i * 2)] = i;
+ mask [(i * 2) + 1] = mask_size + i;
+ }
+ } else {
+ for (i = 0; i < (mask_size / 2); ++i) {
+ mask [(i * 2)] = (mask_size / 2) + i;
+ mask [(i * 2) + 1] = mask_size + (mask_size / 2) + i;
+ }
+ }
+
+ for (i = 0; i < mask_size; ++i)
+ mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE);
+
+ values [ins->dreg] =
+ LLVMBuildShuffleVector (builder, values [ins->sreg1], values [ins->sreg2],
+ LLVMConstVector (mask_values, mask_size), dname);
+ break;
+ }
+
+ case OP_DUPPD: {
+ LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode);
+ LLVMValueRef v, val;
+
+ v = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
+ val = LLVMConstNull (t);
+ val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
+ val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 1, FALSE), dname);
+
+ values [ins->dreg] = val;
+ break;
+ }
+ case OP_DUPPS_LOW:
+ case OP_DUPPS_HIGH: {
+ LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode);
+ LLVMValueRef v1, v2, val;
+
+
+ if (ins->opcode == OP_DUPPS_LOW) {
+ v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
+ v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 2, FALSE), "");
+ } else {
+ v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 1, FALSE), "");
+ v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 3, FALSE), "");
+ }
+ val = LLVMConstNull (t);
+ val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
+ val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 1, FALSE), "");
+ val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 2, FALSE), "");
+ val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 3, FALSE), "");
+
+ values [ins->dreg] = val;
+ break;
+ }
+
+#endif /* SIMD */
case OP_DUMMY_USE:
break;
icall_name = rethrow ? "mono_arch_rethrow_exception" : "mono_arch_throw_exception";
if (!callee) {
- throw_sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
- throw_sig->ret = &mono_defaults.void_class->byval_arg;
- throw_sig->params [0] = &mono_defaults.object_class->byval_arg;
+ throw_sig = mono_metadata_signature_alloc (mono_get_corlib (), 1);
+ throw_sig->ret = &mono_get_void_class ()->byval_arg;
+ throw_sig->params [0] = &mono_get_object_class ()->byval_arg;
if (cfg->compile_aot) {
callee = get_plt_entry (ctx, sig_to_llvm_sig (ctx, throw_sig), MONO_PATCH_INFO_INTERNAL_METHOD, icall_name);
} else {
* LLVM doesn't push the exception argument, so we need a different
* trampoline.
*/
- LLVMAddGlobalMapping (ee, callee, resolve_patch (cfg, MONO_PATCH_INFO_INTERNAL_METHOD, rethrow ? "mono_arch_llvm_rethrow_exception" : "mono_arch_llvm_throw_exception"));
+ LLVMAddGlobalMapping (ee, callee, resolve_patch (cfg, MONO_PATCH_INFO_INTERNAL_METHOD, rethrow ? "llvm_rethrow_exception_trampoline" : "llvm_throw_exception_trampoline"));
#else
LLVMAddGlobalMapping (ee, callee, resolve_patch (cfg, MONO_PATCH_INFO_INTERNAL_METHOD, icall_name));
#endif
else
ctx->lmodule->throw = callee;
}
- arg = convert (ctx, values [ins->sreg1], type_to_llvm_type (ctx, &mono_defaults.object_class->byval_arg));
+ arg = convert (ctx, values [ins->sreg1], type_to_llvm_type (ctx, &mono_get_object_class ()->byval_arg));
emit_call (ctx, bb, &builder, callee, &arg, 1);
break;
}
* We don't 'call' handlers, but instead simply branch to them.
* The code generated by ENDFINALLY will branch back to us.
*/
- LLVMBasicBlockRef finally_bb, noex_bb;
+ LLVMBasicBlockRef noex_bb;
GSList *bb_list;
BBInfo *info = &bblocks [ins->inst_target_bb->block_num];
- finally_bb = get_bb (ctx, ins->inst_target_bb);
-
bb_list = info->call_handler_return_bbs;
/*
LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), g_slist_length (bb_list) + 1, FALSE), lhs);
/* Branch to the finally clause */
- LLVMBuildBr (builder, finally_bb);
+ LLVMBuildBr (builder, info->call_handler_target_bb);
noex_bb = gen_bb (ctx, "CALL_HANDLER_CONT_BB");
info->call_handler_return_bbs = g_slist_append_mempool (cfg->mempool, info->call_handler_return_bbs, noex_bb);
LLVMPositionBuilderAtEnd (ctx->builder, resume_bb);
if (ctx->cfg->compile_aot) {
- callee = get_plt_entry (ctx, LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE), MONO_PATCH_INFO_INTERNAL_METHOD, "mono_resume_unwind");
+ callee = get_plt_entry (ctx, LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE), MONO_PATCH_INFO_INTERNAL_METHOD, "llvm_resume_unwind_trampoline");
} else {
- callee = LLVMGetNamedFunction (module, "mono_resume_unwind");
+ callee = LLVMGetNamedFunction (module, "llvm_resume_unwind_trampoline");
}
LLVMBuildCall (builder, callee, NULL, 0, "");
void
mono_llvm_check_method_supported (MonoCompile *cfg)
{
+ /*
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
+ */
if (cfg->generic_sharing_context && !IS_LLVM_MONO_BRANCH) {
/* No way to obtain location info for this/rgctx */
cfg->disable_llvm = TRUE;
}
- if (!LLVM_CHECK_VERSION (2, 8)) {
- /*
- * FIXME: LLLVM 2.6 no longer seems to generate correct exception info
- * for JITted code.
- */
- cfg->exception_message = g_strdup ("clauses");
- cfg->disable_llvm = TRUE;
- }
-
+#if 0
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
cfg->disable_llvm = TRUE;
}
}
+#endif
/* FIXME: */
if (cfg->method->dynamic) {
MonoMethodSignature *sig;
MonoBasicBlock *bb;
LLVMTypeRef method_type;
- LLVMValueRef method = NULL, debug_alias = NULL;
- char *method_name, *debug_name = NULL;
+ LLVMValueRef method = NULL;
+ char *method_name;
LLVMValueRef *values;
int i, max_block_num, bb_index;
gboolean last = FALSE;
MonoMethodHeader *header;
MonoExceptionClause *clause;
LLVMSigInfo sinfo;
+ char **names;
/* The code below might acquire the loader lock, so use it for global locking */
mono_loader_lock ();
/* Used to communicate with the callbacks */
- TlsSetValue (current_cfg_tls_id, cfg);
+ mono_native_tls_set_value (current_cfg_tls_id, cfg);
ctx = g_new0 (EmitContext, 1);
ctx->cfg = cfg;
if (cfg->compile_aot) {
ctx->lmodule = &aot_module;
method_name = mono_aot_get_method_name (cfg);
- debug_name = mono_aot_get_method_debug_name (cfg);
+ cfg->llvm_method_name = g_strdup (method_name);
} else {
init_jit_module ();
ctx->lmodule = &jit_module;
method_name = mono_method_full_name (cfg->method, TRUE);
- debug_name = NULL;
}
module = ctx->module = ctx->lmodule->module;
if (getenv ("LLVM_COUNT")) {
if (count == atoi (getenv ("LLVM_COUNT"))) {
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
+ fflush (stdout);
last = TRUE;
}
if (count > atoi (getenv ("LLVM_COUNT")))
method_type = sig_to_llvm_sig_full (ctx, sig, linfo, &sinfo);
CHECK_FAILURE (ctx);
+ /*
+ * This maps parameter indexes in the original signature to the indexes in
+ * the LLVM signature.
+ */
+ ctx->pindexes = sinfo.pindexes;
+
method = LLVMAddFunction (module, method_name, method_type);
ctx->lmethod = method;
#ifdef LLVM_MONO_BRANCH
- if (linfo->rgctx_arg)
- LLVMSetFunctionCallConv (method, LLVMMono1CallConv);
+ LLVMSetFunctionCallConv (method, LLVMMono1CallConv);
#endif
LLVMSetLinkage (method, LLVMPrivateLinkage);
+ LLVMAddFunctionAttr (method, LLVMUWTableAttribute);
+
+ if (cfg->compile_aot) {
+ LLVMSetLinkage (method, LLVMInternalLinkage);
+ LLVMSetVisibility (method, LLVMHiddenVisibility);
+ } else {
+ LLVMSetLinkage (method, LLVMPrivateLinkage);
+ }
+
if (cfg->method->save_lmf)
LLVM_FAILURE (ctx, "lmf");
- if (sig->pinvoke)
+ if (sig->pinvoke && cfg->method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
LLVM_FAILURE (ctx, "pinvoke signature");
header = cfg->header;
LLVM_FAILURE (ctx, "non-finally/catch clause.");
}
- /*
- * This maps parameter indexes in the original signature to the indexes in
- * the LLVM signature.
- */
- ctx->pindexes = sinfo.pindexes;
if (linfo->rgctx_arg) {
ctx->rgctx_arg = LLVMGetParam (method, sinfo.rgctx_arg_pindex);
/*
values [cfg->args [0]->dreg] = LLVMGetParam (method, sinfo.this_arg_pindex);
LLVMSetValueName (values [cfg->args [0]->dreg], "this");
}
+
+ names = g_new (char *, sig->param_count);
+ mono_method_get_param_names (cfg->method, (const char **) names);
+
for (i = 0; i < sig->param_count; ++i) {
char *name;
values [cfg->args [i + sig->hasthis]->dreg] = LLVMGetParam (method, sinfo.pindexes [i]);
- name = g_strdup_printf ("arg_%d", i);
+ if (names [i] && names [i][0] != '\0')
+ name = g_strdup_printf ("arg_%s", names [i]);
+ else
+ name = g_strdup_printf ("arg_%d", i);
LLVMSetValueName (values [cfg->args [i + sig->hasthis]->dreg], name);
g_free (name);
if (linfo->args [i + sig->hasthis].storage == LLVMArgVtypeByVal)
LLVMAddAttribute (LLVMGetParam (method, sinfo.pindexes [i]), LLVMByValAttribute);
}
+ g_free (names);
max_block_num = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
}
/*
- * Make a first pass over the code to precreate PHI nodes.
+ * The INDIRECT flag added by OP_LDADDR inhibits optimizations, even if the LDADDR
+ * was later optimized away, so clear these flags, and add them back for the still
+ * present OP_LDADDR instructions.
+ */
+ for (i = 0; i < cfg->next_vreg; ++i) {
+ MonoInst *ins;
+
+ ins = get_vreg_to_inst (cfg, i);
+ if (ins && ins != cfg->rgctx_var)
+ ins->flags &= ~MONO_INST_INDIRECT;
+ }
+
+ /*
+ * Make a first pass over the code to precreate PHI nodes/set INDIRECT flags.
*/
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
}
break;
}
+ case OP_LDADDR:
+ ((MonoInst*)ins->inst_p0)->flags |= MONO_INST_INDIRECT;
+ break;
default:
break;
}
g_assert (values [sreg1]);
- g_assert (LLVMTypeOf (values [sreg1]) == LLVMTypeOf (values [phi->dreg]));
- LLVMAddIncoming (values [phi->dreg], &values [sreg1], &in_bb, 1);
+ if (phi->opcode == OP_VPHI) {
+ g_assert (LLVMTypeOf (ctx->addresses [sreg1]) == LLVMTypeOf (values [phi->dreg]));
+ LLVMAddIncoming (values [phi->dreg], &ctx->addresses [sreg1], &in_bb, 1);
+ } else {
+ g_assert (LLVMTypeOf (values [sreg1]) == LLVMTypeOf (values [phi->dreg]));
+ LLVMAddIncoming (values [phi->dreg], &values [sreg1], &in_bb, 1);
+ }
}
}
if (cfg->compile_aot) {
/* Don't generate native code, keep the LLVM IR */
-
- /* Can't delete the method if it has an alias, so only add it if successful */
- if (debug_name) {
- debug_alias = LLVMAddAlias (module, LLVMTypeOf (method), method, debug_name);
- LLVMSetLinkage (debug_alias, LLVMInternalLinkage);
- LLVMSetVisibility (debug_alias, LLVMHiddenVisibility);
- }
-
if (cfg->compile_aot && cfg->verbose_level)
printf ("%s emitted as %s\n", mono_method_full_name (cfg->method, TRUE), method_name);
g_free (ctx->vreg_types);
g_free (ctx->vreg_cli_types);
g_free (ctx->pindexes);
- g_free (debug_name);
+ g_free (ctx->is_dead);
+ g_free (ctx->unreachable);
g_ptr_array_free (phi_values, TRUE);
g_free (ctx->bblocks);
g_hash_table_destroy (ctx->region_to_handler);
g_free (ctx);
- TlsSetValue (current_cfg_tls_id, NULL);
+ mono_native_tls_set_value (current_cfg_tls_id, NULL);
mono_loader_unlock ();
}
{
MonoCompile *cfg;
- cfg = TlsGetValue (current_cfg_tls_id);
+ cfg = mono_native_tls_get_value (current_cfg_tls_id);
if (cfg) {
// FIXME: dynamic
{
MonoCompile *cfg;
- cfg = TlsGetValue (current_cfg_tls_id);
+ cfg = mono_native_tls_get_value (current_cfg_tls_id);
g_assert (cfg);
cfg->code_len = (guint8*)end - (guint8*)start;
}
{
MonoCompile *cfg;
MonoJitExceptionInfo *ei;
- guint32 ei_len, i;
+ guint32 ei_len, i, j, nested_len, nindex;
gpointer *type_info;
int this_reg, this_offset;
- cfg = TlsGetValue (current_cfg_tls_id);
+ cfg = mono_native_tls_get_value (current_cfg_tls_id);
g_assert (cfg);
/*
*/
cfg->encoded_unwind_ops = mono_unwind_decode_fde ((guint8*)data, &cfg->encoded_unwind_ops_len, NULL, &ei, &ei_len, &type_info, &this_reg, &this_offset);
- cfg->llvm_ex_info = mono_mempool_alloc0 (cfg->mempool, ei_len * sizeof (MonoJitExceptionInfo));
- cfg->llvm_ex_info_len = ei_len;
+ /* Count nested clauses */
+ nested_len = 0;
+ for (i = 0; i < ei_len; ++i) {
+ for (j = 0; j < ei_len; ++j) {
+ gint32 cindex1 = *(gint32*)type_info [i];
+ MonoExceptionClause *clause1 = &cfg->header->clauses [cindex1];
+ gint32 cindex2 = *(gint32*)type_info [j];
+ MonoExceptionClause *clause2 = &cfg->header->clauses [cindex2];
+
+ if (cindex1 != cindex2 && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset) {
+ nested_len ++;
+ }
+ }
+ }
+
+ cfg->llvm_ex_info = mono_mempool_alloc0 (cfg->mempool, (ei_len + nested_len) * sizeof (MonoJitExceptionInfo));
+ cfg->llvm_ex_info_len = ei_len + nested_len;
memcpy (cfg->llvm_ex_info, ei, ei_len * sizeof (MonoJitExceptionInfo));
/* Fill the rest of the information from the type info */
for (i = 0; i < ei_len; ++i) {
cfg->llvm_ex_info [i].flags = clause->flags;
cfg->llvm_ex_info [i].data.catch_class = clause->data.catch_class;
}
+
+ /*
+ * For nested clauses, the LLVM produced exception info associates the try interval with
+ * the innermost handler, while mono expects it to be associated with all nesting clauses.
+ */
+ /* FIXME: These should be order with the normal clauses */
+ nindex = ei_len;
+ for (i = 0; i < ei_len; ++i) {
+ for (j = 0; j < ei_len; ++j) {
+ gint32 cindex1 = *(gint32*)type_info [i];
+ MonoExceptionClause *clause1 = &cfg->header->clauses [cindex1];
+ gint32 cindex2 = *(gint32*)type_info [j];
+ MonoExceptionClause *clause2 = &cfg->header->clauses [cindex2];
+
+ if (cindex1 != cindex2 && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset) {
+ /*
+ * The try interval comes from the nested clause, everything else from the
+ * nesting clause.
+ */
+ memcpy (&cfg->llvm_ex_info [nindex], &cfg->llvm_ex_info [j], sizeof (MonoJitExceptionInfo));
+ cfg->llvm_ex_info [nindex].try_start = cfg->llvm_ex_info [i].try_start;
+ cfg->llvm_ex_info [nindex].try_end = cfg->llvm_ex_info [i].try_end;
+ nindex ++;
+ }
+ }
+ }
+ g_assert (nindex == ei_len + nested_len);
cfg->llvm_this_reg = this_reg;
cfg->llvm_this_offset = this_offset;
+ /* type_info [i] is cfg mempool allocated, no need to free it */
+
g_free (ei);
+ g_free (type_info);
+}
+
+static inline void
+AddFunc (LLVMModuleRef module, const char *name, LLVMTypeRef ret_type, LLVMTypeRef *param_types, int nparams)
+{
+ LLVMAddFunction (module, name, LLVMFunctionType (ret_type, param_types, nparams, FALSE));
+}
+
+static inline void
+AddFunc2 (LLVMModuleRef module, const char *name, LLVMTypeRef ret_type, LLVMTypeRef param_type1, LLVMTypeRef param_type2)
+{
+ LLVMTypeRef param_types [4];
+
+ param_types [0] = param_type1;
+ param_types [1] = param_type2;
+
+ AddFunc (module, name, ret_type, param_types, 2);
}
static void
add_intrinsics (LLVMModuleRef module)
{
/* Emit declarations of instrinsics */
+ /*
+ * It would be nicer to emit only the intrinsics actually used, but LLVM's Module
+ * type doesn't seem to do any locking.
+ */
{
LLVMTypeRef memset_params [] = { LLVMPointerType (LLVMInt8Type (), 0), LLVMInt8Type (), LLVMInt32Type (), LLVMInt32Type (), LLVMInt1Type () };
-#if LLVM_CHECK_VERSION(2, 8)
memset_param_count = 5;
memset_func_name = "llvm.memset.p0i8.i32";
-#else
- memset_param_count = 4;
- memset_func_name = "llvm.memset.i32";
-#endif
+
LLVMAddFunction (module, memset_func_name, LLVMFunctionType (LLVMVoidType (), memset_params, memset_param_count, FALSE));
}
{
LLVMTypeRef memcpy_params [] = { LLVMPointerType (LLVMInt8Type (), 0), LLVMPointerType (LLVMInt8Type (), 0), LLVMInt32Type (), LLVMInt32Type (), LLVMInt1Type () };
-#if LLVM_CHECK_VERSION(2, 8)
memcpy_param_count = 5;
memcpy_func_name = "llvm.memcpy.p0i8.p0i8.i32";
-#else
- memcpy_param_count = 4;
- memcpy_func_name = "llvm.memcpy.i32";
-#endif
LLVMAddFunction (module, memcpy_func_name, LLVMFunctionType (LLVMVoidType (), memcpy_params, memcpy_param_count, FALSE));
}
arg_types [0] = LLVMPointerType (LLVMInt8Type (), 0);
arg_types [1] = LLVMPointerType (LLVMInt8Type (), 0);
-#if LLVM_CHECK_VERSION(2, 8)
eh_selector_name = "llvm.eh.selector";
ret_type = LLVMInt32Type ();
-#else
-#if SIZEOF_VOID_P == 8
- eh_selector_name = "llvm.eh.selector.i64";
- ret_type = LLVMInt64Type ();
-#else
- eh_selector_name = "llvm.eh.selector.i32";
- ret_type = LLVMInt32Type ();
-#endif
-#endif
+
LLVMAddFunction (module, eh_selector_name, LLVMFunctionType (ret_type, arg_types, 2, TRUE));
LLVMAddFunction (module, "llvm.eh.exception", LLVMFunctionType (LLVMPointerType (LLVMInt8Type (), 0), NULL, 0, FALSE));
LLVMAddFunction (module, "mono_personality", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE));
- LLVMAddFunction (module, "mono_resume_unwind", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE));
+ LLVMAddFunction (module, "llvm_resume_unwind_trampoline", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE));
}
/* SSE intrinsics */
{
- LLVMTypeRef vector_type, arg_types [2];
-
- vector_type = LLVMVectorType (LLVMInt32Type (), 4);
- arg_types [0] = vector_type;
- arg_types [1] = vector_type;
- LLVMAddFunction (module, "llvm.x86.sse41.pminud", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
- LLVMAddFunction (module, "llvm.x86.sse41.pmaxud", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
-
- vector_type = LLVMVectorType (LLVMInt16Type (), 8);
- arg_types [0] = vector_type;
- arg_types [1] = vector_type;
- LLVMAddFunction (module, "llvm.x86.sse41.pminuw", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
- LLVMAddFunction (module, "llvm.x86.sse41.pmaxuw", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
-
- vector_type = LLVMVectorType (LLVMInt8Type (), 16);
- arg_types [0] = vector_type;
- arg_types [1] = vector_type;
- LLVMAddFunction (module, "llvm.x86.sse2.pminu.b", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
- LLVMAddFunction (module, "llvm.x86.sse2.pmaxu.b", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
-
- vector_type = LLVMVectorType (LLVMDoubleType (), 2);
- arg_types [0] = vector_type;
- arg_types [1] = vector_type;
- LLVMAddFunction (module, "llvm.x86.sse2.min.pd", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
- LLVMAddFunction (module, "llvm.x86.sse2.max.pd", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
-
- vector_type = LLVMVectorType (LLVMFloatType (), 4);
- arg_types [0] = vector_type;
- arg_types [1] = vector_type;
- LLVMAddFunction (module, "llvm.x86.sse2.min.ps", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
- LLVMAddFunction (module, "llvm.x86.sse2.max.ps", LLVMFunctionType (vector_type, arg_types, 2, FALSE));
+ LLVMTypeRef ret_type, arg_types [2];
+
+ /* Binary ops */
+ ret_type = type_to_simd_type (MONO_TYPE_I4);
+ arg_types [0] = ret_type;
+ arg_types [1] = ret_type;
+ AddFunc (module, "llvm.x86.sse41.pminud", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse41.pmaxud", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pcmpeq.d", ret_type, arg_types, 2);
+
+ ret_type = type_to_simd_type (MONO_TYPE_I2);
+ arg_types [0] = ret_type;
+ arg_types [1] = ret_type;
+ AddFunc (module, "llvm.x86.sse41.pminuw", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pmins.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse41.pmaxuw", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pcmpeq.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.padds.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.psubs.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.paddus.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.psubus.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pavg.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pmulh.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pmulhu.w", ret_type, arg_types, 2);
+
+ ret_type = type_to_simd_type (MONO_TYPE_I1);
+ arg_types [0] = ret_type;
+ arg_types [1] = ret_type;
+ AddFunc (module, "llvm.x86.sse2.pminu.b", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pmaxu.b", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pcmpeq.b", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pcmpgt.b", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.padds.b", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.psubs.b", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.paddus.b", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.psubus.b", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pavg.b", ret_type, arg_types, 2);
+
+ ret_type = type_to_simd_type (MONO_TYPE_I8);
+ arg_types [0] = ret_type;
+ arg_types [1] = ret_type;
+ AddFunc (module, "llvm.x86.sse41.pcmpeqq", ret_type, arg_types, 2);
+
+ ret_type = type_to_simd_type (MONO_TYPE_R8);
+ arg_types [0] = ret_type;
+ arg_types [1] = ret_type;
+ AddFunc (module, "llvm.x86.sse2.min.pd", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.max.pd", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse3.hadd.pd", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse3.hsub.pd", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse3.addsub.pd", ret_type, arg_types, 2);
+
+ ret_type = type_to_simd_type (MONO_TYPE_R4);
+ arg_types [0] = ret_type;
+ arg_types [1] = ret_type;
+ AddFunc (module, "llvm.x86.sse.min.ps", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse.max.ps", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse3.hadd.ps", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse3.hsub.ps", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse3.addsub.ps", ret_type, arg_types, 2);
+
+ /* pack */
+ ret_type = type_to_simd_type (MONO_TYPE_I1);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_I2);
+ arg_types [1] = type_to_simd_type (MONO_TYPE_I2);
+ AddFunc (module, "llvm.x86.sse2.packsswb.128", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.packuswb.128", ret_type, arg_types, 2);
+ ret_type = type_to_simd_type (MONO_TYPE_I2);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_I4);
+ arg_types [1] = type_to_simd_type (MONO_TYPE_I4);
+ AddFunc (module, "llvm.x86.sse2.packssdw.128", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse41.packusdw", ret_type, arg_types, 2);
+
+ /* cmp pd/ps */
+ ret_type = type_to_simd_type (MONO_TYPE_R8);
+ arg_types [0] = ret_type;
+ arg_types [1] = ret_type;
+ arg_types [2] = LLVMInt8Type ();
+ AddFunc (module, "llvm.x86.sse2.cmp.pd", ret_type, arg_types, 3);
+ ret_type = type_to_simd_type (MONO_TYPE_R4);
+ arg_types [0] = ret_type;
+ arg_types [1] = ret_type;
+ arg_types [2] = LLVMInt8Type ();
+ AddFunc (module, "llvm.x86.sse.cmp.ps", ret_type, arg_types, 3);
+
+ /* Conversion ops */
+ ret_type = type_to_simd_type (MONO_TYPE_R8);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_I4);
+ AddFunc (module, "llvm.x86.sse2.cvtdq2pd", ret_type, arg_types, 1);
+ ret_type = type_to_simd_type (MONO_TYPE_R4);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_I4);
+ AddFunc (module, "llvm.x86.sse2.cvtdq2ps", ret_type, arg_types, 1);
+ ret_type = type_to_simd_type (MONO_TYPE_I4);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_R8);
+ AddFunc (module, "llvm.x86.sse2.cvtpd2dq", ret_type, arg_types, 1);
+ ret_type = type_to_simd_type (MONO_TYPE_I4);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_R4);
+ AddFunc (module, "llvm.x86.sse2.cvtps2dq", ret_type, arg_types, 1);
+ ret_type = type_to_simd_type (MONO_TYPE_R4);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_R8);
+ AddFunc (module, "llvm.x86.sse2.cvtpd2ps", ret_type, arg_types, 1);
+ ret_type = type_to_simd_type (MONO_TYPE_R8);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_R4);
+ AddFunc (module, "llvm.x86.sse2.cvtps2pd", ret_type, arg_types, 1);
+
+ ret_type = type_to_simd_type (MONO_TYPE_I4);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_R8);
+ AddFunc (module, "llvm.x86.sse2.cvttpd2dq", ret_type, arg_types, 1);
+ ret_type = type_to_simd_type (MONO_TYPE_I4);
+ arg_types [0] = type_to_simd_type (MONO_TYPE_R4);
+ AddFunc (module, "llvm.x86.sse2.cvttps2dq", ret_type, arg_types, 1);
+
+ /* Unary ops */
+ ret_type = type_to_simd_type (MONO_TYPE_R8);
+ arg_types [0] = ret_type;
+ AddFunc (module, "llvm.x86.sse2.sqrt.pd", ret_type, arg_types, 1);
+ ret_type = type_to_simd_type (MONO_TYPE_R4);
+ arg_types [0] = ret_type;
+ AddFunc (module, "llvm.x86.sse.sqrt.ps", ret_type, arg_types, 1);
+ ret_type = type_to_simd_type (MONO_TYPE_R4);
+ arg_types [0] = ret_type;
+ AddFunc (module, "llvm.x86.sse.rsqrt.ps", ret_type, arg_types, 1);
+ ret_type = type_to_simd_type (MONO_TYPE_R4);
+ arg_types [0] = ret_type;
+ AddFunc (module, "llvm.x86.sse.rcp.ps", ret_type, arg_types, 1);
+
+ /* shifts */
+ ret_type = type_to_simd_type (MONO_TYPE_I2);
+ arg_types [0] = ret_type;
+ arg_types [1] = LLVMInt32Type ();
+ AddFunc (module, "llvm.x86.sse2.psrli.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.psrai.w", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pslli.w", ret_type, arg_types, 2);
+ ret_type = type_to_simd_type (MONO_TYPE_I4);
+ arg_types [0] = ret_type;
+ arg_types [1] = LLVMInt32Type ();
+ AddFunc (module, "llvm.x86.sse2.psrli.d", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.psrai.d", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pslli.d", ret_type, arg_types, 2);
+ ret_type = type_to_simd_type (MONO_TYPE_I8);
+ arg_types [0] = ret_type;
+ arg_types [1] = LLVMInt32Type ();
+ AddFunc (module, "llvm.x86.sse2.psrli.q", ret_type, arg_types, 2);
+ AddFunc (module, "llvm.x86.sse2.pslli.q", ret_type, arg_types, 2);
+
+ /* pmovmskb */
+ ret_type = LLVMInt32Type ();
+ arg_types [0] = type_to_simd_type (MONO_TYPE_I1);
+ AddFunc (module, "llvm.x86.sse2.pmovmskb.128", ret_type, arg_types, 1);
+ }
+
+ if (IS_LLVM_MONO_BRANCH) {
+ AddFunc (module, "llvm.x86.sse2.pause", LLVMVoidType (), NULL, 0);
}
/* Load/Store intrinsics */
void
mono_llvm_init (void)
{
- current_cfg_tls_id = TlsAlloc ();
+ mono_native_tls_alloc (¤t_cfg_tls_id, NULL);
}
static void
init_jit_module (void)
{
+ MonoJitICallInfo *info;
+
if (jit_module_inited)
return;
jit_module.llvm_types = g_hash_table_new (NULL, NULL);
- LLVMAddGlobalMapping (ee, LLVMGetNamedFunction (jit_module.module, "mono_resume_unwind"), mono_resume_unwind);
+ info = mono_find_jit_icall_by_name ("llvm_resume_unwind_trampoline");
+ g_assert (info);
+ LLVMAddGlobalMapping (ee, LLVMGetNamedFunction (jit_module.module, "llvm_resume_unwind_trampoline"), (void*)info->func);
jit_module_inited = TRUE;
if (jit_module.llvm_types)
g_hash_table_destroy (jit_module.llvm_types);
+
+ if (aot_module.module)
+ LLVMDisposeModule (aot_module.module);
+
+ LLVMContextDispose (LLVMGetGlobalContext ());
}
void
/* Delete previous module */
if (aot_module.plt_entries)
g_hash_table_destroy (aot_module.plt_entries);
+ if (aot_module.module)
+ LLVMDisposeModule (aot_module.module);
memset (&aot_module, 0, sizeof (aot_module));
- Emit LLVM IR from the mono IR using the LLVM C API.
- The original arch specific code remains, so we can fall back to it if we run
into something we can't handle.
- FIXME:
- - llvm's PrettyStackTrace class seems to register a signal handler which screws
- up our GC. Also, it calls sigaction () a _lot_ of times instead of just once.
*/
/*
/* FIXME: Normalize some aspects of the mono IR to allow easier translation, like:
* - each bblock should end with a branch
* - setting the return value, making cfg->ret non-volatile
- * - merge some changes back to HEAD, to reduce the differences.
* - avoid some transformations in the JIT which make it harder for us to generate
* code.
- * - fix memory leaks.
* - use pointer types to help optimizations.
*/