} \
} while (0)
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
#define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
(dest)->dreg = alloc_preg ((cfg)); \
}
#endif
+#ifdef ENABLE_LLVM
+ if (COMPILE_LLVM (cfg))
+ mono_llvm_emit_call (cfg, call);
+ else
+ mono_arch_emit_call (cfg, call);
+#else
mono_arch_emit_call (cfg, call);
+#endif
cfg->param_area = MAX (cfg->param_area, call->stack_usage);
cfg->flags |= MONO_CFG_HAS_CALLS;
cfg->flags |= MONO_CFG_HAS_VARARGS;
+ /* mono_array_new_va () needs a vararg calling convention */
+ cfg->disable_llvm = TRUE;
+
/* FIXME: This uses info->sig, but it should use the signature of the wrapper */
return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
}
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
if (size == 1 || size == 2 || size == 4 || size == 8) {
static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
#endif
MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
add_reg = ins->dreg;
/* Avoid a warning */
GSList *class_inits = NULL;
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
int context_used;
+ gboolean init_locals;
/* serialization and xdomain stuff may need access to private fields and methods */
dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
cfg->cil_start = ip;
end = ip + header->code_size;
mono_jit_stats.cil_code_size += header->code_size;
+ init_locals = header->init_locals;
+
+ /*
+ * Methods without init_locals set could cause asserts in various passes
+ * (#497220).
+ */
+ init_locals = TRUE;
method_definition = method;
while (method_definition->is_inflated) {
}
}
- if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
+ if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
/* we use a separate basic block for the initialization code */
NEW_BBLOCK (cfg, init_localsbb);
cfg->bb_init = init_localsbb;
cfg->coverage_info->data [cil_offset].cil_code = ip;
/* TODO: Use an increment here */
-#if defined(__i386__)
+#if defined(TARGET_X86)
MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
ins->inst_imm = 1;
ip++;
--sp;
-#ifdef __i386__
+#ifdef TARGET_X86
if (sp [0]->type == STACK_R8)
/* we need to pop the value from the x86 FP stack */
MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
CHECK_CFG_EXCEPTION;
-#ifdef __x86_64__
+#ifdef TARGET_AMD64
{
MonoMethodSignature *fsig = mono_method_signature (cmethod);
int i, n;
/* Prevent inlining of methods that contain indirect calls */
INLINE_FAILURE;
-#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
+#if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
g_assert (!imt_arg);
if (context_used) {
call->method = cmethod;
call->signature = mono_method_signature (cmethod);
-#ifdef __x86_64__
+#ifdef TARGET_AMD64
/* Handle tail calls similarly to calls */
call->inst.opcode = OP_TAILCALL;
call->args = sp;
table->table_size = n;
use_op_switch = FALSE;
-#ifdef __arm__
+#ifdef TARGET_ARM
/* ARM implements SWITCH statements differently */
/* FIXME: Make it use the generic implementation */
if (!cfg->compile_aot)
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_HAS_ALLOCA;
- if (header->init_locals)
+ if (init_locals)
ins->flags |= MONO_INST_INIT;
*sp++ = ins;
if (cfg->method == method && cfg->got_var)
mono_emit_load_got_addr (cfg);
- if (header->init_locals) {
+ if (init_locals) {
MonoInst *store;
cfg->cbb = init_localsbb;
case OP_STOREI4_MEMBASE_REG:
return OP_STOREI4_MEMBASE_IMM;
-#if defined(__i386__) || defined (__x86_64__)
+#if defined(TARGET_X86) || defined (TARGET_AMD64)
case OP_X86_PUSH:
return OP_X86_PUSH_IMM;
case OP_X86_COMPARE_MEMBASE_REG:
return OP_X86_COMPARE_MEMBASE_IMM;
#endif
-#if defined(__x86_64__)
+#if defined(TARGET_AMD64)
case OP_AMD64_ICOMPARE_MEMBASE_REG:
return OP_AMD64_ICOMPARE_MEMBASE_IMM;
#endif
mono_load_membase_to_load_mem (int opcode)
{
// FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_LOAD_MEMBASE:
return OP_LOAD_MEM;
static inline int
op_to_op_dest_membase (int store_opcode, int opcode)
{
-#if defined(__i386__)
+#if defined(TARGET_X86)
if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
return -1;
}
#endif
-#if defined(__x86_64__)
+#if defined(TARGET_AMD64)
if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
return -1;
static inline int
op_to_op_store_membase (int store_opcode, int opcode)
{
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_ICEQ:
if (store_opcode == OP_STOREI1_MEMBASE_REG)
static inline int
op_to_op_src1_membase (int load_opcode, int opcode)
{
-#ifdef __i386__
+#ifdef TARGET_X86
/* FIXME: This has sign extension issues */
/*
if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
}
#endif
-#ifdef __x86_64__
+#ifdef TARGET_AMD64
/* FIXME: This has sign extension issues */
/*
if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
static inline int
op_to_op_src2_membase (int load_opcode, int opcode)
{
-#ifdef __i386__
+#ifdef TARGET_X86
if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
return -1;
}
#endif
-#ifdef __x86_64__
+#ifdef TARGET_AMD64
switch (opcode) {
case OP_ICOMPARE:
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
}
#if SIZEOF_REGISTER == 4
- if (regtype == 'l') {
+ /* In the LLVM case, the long opcodes are not decomposed */
+ if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
/*
* Since some instructions reference the original long vreg,
* and some reference the two component vregs, it is quite hard
#if SIZEOF_REGISTER == 8
case STACK_I8:
#endif
-#if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
+#if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
/* Enabling this screws up the fp stack on x86 */
case STACK_R8:
#endif