#include "trace.h"
#include "mini-ia64.h"
-#include "inssel.h"
#include "cpu-ia64.h"
#include "jit-icalls.h"
-
-static gint appdomain_tls_offset = -1;
-static gint thread_tls_offset = -1;
+#include "ir-emit.h"
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
typedef enum {
ArgInIReg,
ArgInFloatReg,
+ ArgInFloatRegR4,
ArgOnStack,
ArgValuetypeAddrInIReg,
ArgAggregate,
#define DEBUG(a) if (cfg->verbose_level > 1) a
-#define NEW_ICONST(cfg,dest,val) do { \
- (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
- (dest)->opcode = OP_ICONST; \
- (dest)->inst_c0 = (val); \
- (dest)->type = STACK_I4; \
- } while (0)
-
#define PARAM_REGS 8
static void inline
(*stack_size) += sizeof (gpointer);
}
else {
- ainfo->storage = ArgInFloatReg;
+ ainfo->storage = is_double ? ArgInFloatReg : ArgInFloatRegR4;
ainfo->reg = 8 + *fr;
(*fr) += 1;
(*gr) += 1;
ainfo->nregs = info->num_fields;
ainfo->nslots = ainfo->nregs;
(*fr) += info->num_fields;
+ if (ainfo->atype == AggregateSingleHFA) {
+ /*
+ * FIXME: Have to keep track of the parameter slot number, which is
+ * not the same as *gr.
+ */
+ (*gr) += ALIGN_TO (info->num_fields, 2) / 2;
+ } else {
+ (*gr) += info->num_fields;
+ }
return;
}
}
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
- add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- if (cinfo->ret.storage == ArgOnStack)
- /* The caller passes the address where the value is stored */
- add_general (&gr, &stack_size, &cinfo->ret);
- if (cinfo->ret.storage == ArgInIReg)
- cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ if (sig->ret->byref) {
+ /* This seems to happen with ldfld wrappers */
+ cinfo->ret.storage = ArgInIReg;
+ } else {
+ add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+ if (cinfo->ret.storage == ArgOnStack)
+ /* The caller passes the address where the value is stored */
+ add_general (&gr, &stack_size, &cinfo->ret);
+ if (cinfo->ret.storage == ArgInIReg)
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ }
break;
}
case MONO_TYPE_VOID:
cfg->arch.omit_fp = FALSE;
if (cfg->param_area)
cfg->arch.omit_fp = FALSE;
+ if ((sig->ret->type != MONO_TYPE_VOID) && (cinfo->ret.storage == ArgAggregate))
+ cfg->arch.omit_fp = FALSE;
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
+ cfg->arch.omit_fp = FALSE;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
*/
if (cfg->arch.omit_fp) {
+ cfg->flags |= MONO_CFG_HAS_SPILLUP;
cfg->frame_reg = IA64_SP;
offset = ARGS_OFFSET;
}
cfg->ret->inst_c0 = cinfo->ret.reg;
break;
case ArgValuetypeAddrInIReg:
- cfg->ret->opcode = OP_REGVAR;
- cfg->ret->inst_c0 = cfg->arch.reg_in0 + cinfo->ret.reg;
+ cfg->vret_addr->opcode = OP_REGVAR;
+ cfg->vret_addr->dreg = cfg->arch.reg_in0 + cinfo->ret.reg;
break;
case ArgAggregate:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
inst->dreg = cfg->arch.reg_in0 + ainfo->reg;
break;
case ArgInFloatReg:
+ case ArgInFloatRegR4:
/*
* Since float regs are volatile, we save the arguments to
* the stack in the prolog.
}
if (!inreg && (ainfo->storage != ArgOnStack)) {
+ guint32 size = 0;
+
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
switch (ainfo->storage) {
case ArgAggregate:
if (ainfo->atype == AggregateSingleHFA)
- offset += ainfo->nslots * 4;
+ size = ainfo->nslots * 4;
else
- offset += ainfo->nslots * 8;
+ size = ainfo->nslots * 8;
break;
default:
- offset += sizeof (gpointer);
+ size = sizeof (gpointer);
break;
}
+
offset = ALIGN_TO (offset, sizeof (gpointer));
- if (cfg->arch.omit_fp)
+
+ if (cfg->arch.omit_fp) {
inst->inst_offset = offset;
- else
+ offset += size;
+ } else {
+ offset += size;
inst->inst_offset = - offset;
+ }
}
}
}
+ /*
+ * FIXME: This doesn't work because some variables are allocated during local
+ * regalloc.
+ */
+ /*
if (cfg->arch.omit_fp && offset == 16)
offset = 0;
+ */
cfg->stack_offset = offset;
}
MonoMethodSignature *sig;
CallInfo *cinfo;
+ if (!strcmp (cfg->method->name, "MoveNextCore") && !strcmp (cfg->method->klass->name, "PredicateIterator"))
+ cfg->verbose_level = 4;
+
sig = mono_method_signature (cfg->method);
cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
if (cinfo->ret.storage == ArgAggregate)
cfg->ret_var_is_local = TRUE;
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
+ cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr = ");
+ mono_print_ins (cfg->vret_addr);
+ }
+ }
}
static void
-add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, MonoInst *arg, ArgStorage storage, int reg, MonoInst *tree)
+add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
{
+ MonoInst *arg;
+
+ MONO_INST_NEW (cfg, arg, OP_NOP);
+ arg->sreg1 = tree->dreg;
+
switch (storage) {
case ArgInIReg:
- arg->opcode = OP_OUTARG_REG;
- arg->inst_left = tree;
- arg->inst_right = (MonoInst*)call;
- arg->backend.reg3 = reg;
- call->used_iregs |= 1 << reg;
+ arg->opcode = OP_MOVE;
+ arg->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
break;
case ArgInFloatReg:
- arg->opcode = OP_OUTARG_FREG;
- arg->inst_left = tree;
- arg->inst_right = (MonoInst*)call;
- arg->backend.reg3 = reg;
- call->used_fregs |= 1 << reg;
+ arg->opcode = OP_FMOVE;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
+ break;
+ case ArgInFloatRegR4:
+ arg->opcode = OP_FCONV_TO_R4;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
}
+
+ MONO_ADD_INS (cfg->cbb, arg);
}
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
- MonoInst *arg;
MonoMethodSignature *tmp_sig;
- MonoInst *sig_arg;
+ /* Emit the signature cookie just before the implicit arguments */
+ MonoInst *sig_arg;
/* FIXME: Add support for signature tokens to AOT */
cfg->disable_aot = TRUE;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->dreg = mono_alloc_ireg (cfg);
sig_arg->inst_p0 = tmp_sig;
+ MONO_ADD_INS (cfg->cbb, sig_arg);
- MONO_INST_NEW (cfg, arg, OP_OUTARG);
- arg->inst_left = sig_arg;
- arg->inst_imm = 16 + cinfo->sig_cookie.offset;
- arg->type = STACK_PTR;
-
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + cinfo->sig_cookie.offset, sig_arg->dreg);
}
-/*
- * take the arguments and generate the arch-specific
- * instructions to properly call the function in call.
- * This includes pushing, moving arguments to the right register
- * etc.
- */
-MonoCallInst*
-mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual)
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
- MonoInst *arg, *in;
+ MonoInst *in;
MonoMethodSignature *sig;
int i, n, stack_size;
CallInfo *cinfo;
cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
if (cinfo->ret.storage == ArgAggregate) {
- /* The code in emit_this_vret_arg needs a local */
- cfg->arch.ret_var_addr_local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- ((MonoInst*)cfg->arch.ret_var_addr_local)->flags |= MONO_INST_VOLATILE;
+ MonoInst *vtarg;
+ MonoInst *local;
+
+ /*
+ * The valuetype is in registers after the call, need to be copied
+ * to the stack. Save the address to a local here, so the call
+ * instruction can access it.
+ */
+ local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ local->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ret_var_addr_local = local;
+
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = local->dreg;
+ MONO_ADD_INS (cfg->cbb, vtarg);
+ }
+
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
+ add_outarg_reg (cfg, call, ArgInIReg, cfg->arch.reg_out0 + cinfo->ret.reg, call->vret_var);
}
for (i = 0; i < n; ++i) {
+ MonoType *arg_type;
+
ainfo = cinfo->args + i;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
emit_sig_cookie (cfg, call, cinfo);
}
- if (is_virtual && i == 0) {
- /* the argument will be attached to the call instruction */
- in = call->args [i];
- } else {
- MonoType *arg_type;
-
- MONO_INST_NEW (cfg, arg, OP_OUTARG);
- in = call->args [i];
- arg->cil_code = in->cil_code;
- arg->inst_left = in;
- arg->type = in->type;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ in = call->args [i];
- if (sig->hasthis && (i == 0))
- arg_type = &mono_defaults.object_class->byval_arg;
- else
- arg_type = sig->params [i - sig->hasthis];
-
- if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
- MonoInst *stack_addr;
- guint32 align;
- guint32 size;
-
- if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
- size = sizeof (MonoTypedRef);
- align = sizeof (gpointer);
- }
- else
- if (sig->pinvoke)
- size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
- else {
- /*
- * Other backends use mini_type_stack_size (), but that
- * aligns the size to 8, which is larger than the size of
- * the source, leading to reads of invalid memory if the
- * source is at the end of address space.
- */
- size = mono_class_value_size (in->klass, &align);
- }
-
- if (ainfo->storage == ArgAggregate) {
- MonoInst *vtaddr, *load, *load2, *offset_ins, *set_reg;
- int slot, j;
-
- vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
-
- /*
- * Part of the structure is passed in registers.
- */
- for (j = 0; j < ainfo->nregs; ++j) {
- int offset, load_op, dest_reg, arg_storage;
-
- slot = ainfo->reg + j;
-
- if (ainfo->atype == AggregateSingleHFA) {
- load_op = CEE_LDIND_R4;
- offset = j * 4;
- dest_reg = ainfo->reg + j;
- arg_storage = ArgInFloatReg;
- } else if (ainfo->atype == AggregateDoubleHFA) {
- load_op = CEE_LDIND_R8;
- offset = j * 8;
- dest_reg = ainfo->reg + j;
- arg_storage = ArgInFloatReg;
- } else {
- load_op = CEE_LDIND_I;
- offset = j * 8;
- dest_reg = cfg->arch.reg_out0 + ainfo->reg + j;
- arg_storage = ArgInIReg;
- }
-
- MONO_INST_NEW (cfg, load, CEE_LDIND_I);
- load->ssa_op = MONO_SSA_LOAD;
- load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
-
- NEW_ICONST (cfg, offset_ins, offset);
- MONO_INST_NEW (cfg, load2, CEE_ADD);
- load2->inst_left = load;
- load2->inst_right = offset_ins;
-
- MONO_INST_NEW (cfg, load, load_op);
- load->inst_left = load2;
-
- if (j == 0)
- set_reg = arg;
- else
- MONO_INST_NEW (cfg, set_reg, OP_OUTARG_REG);
- add_outarg_reg (cfg, call, set_reg, arg_storage, dest_reg, load);
- if (&set_reg->node != &call->out_args)
- MONO_INST_LIST_ADD (&set_reg->node, &call->out_args);
- }
-
- /*
- * Part of the structure is passed on the stack.
- */
- for (j = ainfo->nregs; j < ainfo->nslots; ++j) {
- MonoInst *outarg;
-
- slot = ainfo->reg + j;
-
- MONO_INST_NEW (cfg, load, CEE_LDIND_I);
- load->ssa_op = MONO_SSA_LOAD;
- load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
-
- NEW_ICONST (cfg, offset_ins, (j * sizeof (gpointer)));
- MONO_INST_NEW (cfg, load2, CEE_ADD);
- load2->inst_left = load;
- load2->inst_right = offset_ins;
+ if (sig->hasthis && (i == 0))
+ arg_type = &mono_defaults.object_class->byval_arg;
+ else
+ arg_type = sig->params [i - sig->hasthis];
- MONO_INST_NEW (cfg, load, CEE_LDIND_I);
- load->inst_left = load2;
+ if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
+ guint32 align;
+ guint32 size;
- if (j == 0)
- outarg = arg;
- else
- MONO_INST_NEW (cfg, outarg, OP_OUTARG);
- outarg->inst_left = load;
- outarg->inst_imm = 16 + ainfo->offset + (slot - 8) * 8;
+ if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else if (sig->pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
- if (&outarg->node != &call->out_args)
- MONO_INST_LIST_ADD (&outarg->node, &call->out_args);
- }
+ if (size > 0) {
+ MonoInst *arg;
- /* Trees can't be shared so make a copy */
- MONO_INST_NEW (cfg, arg, CEE_STIND_I);
- arg->cil_code = in->cil_code;
- arg->ssa_op = MONO_SSA_STORE;
- arg->inst_left = vtaddr;
- arg->inst_right = in;
- arg->type = in->type;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
- }
- else {
- MONO_INST_NEW (cfg, stack_addr, OP_REGOFFSET);
- stack_addr->inst_basereg = IA64_SP;
- stack_addr->inst_offset = 16 + ainfo->offset;
- stack_addr->inst_imm = size;
+ MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
+ arg->sreg1 = in->dreg;
+ arg->klass = in->klass;
+ arg->backend.size = size;
+ arg->inst_p0 = call;
+ arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
- arg->opcode = OP_OUTARG_VT;
- arg->inst_right = stack_addr;
- }
+ MONO_ADD_INS (cfg->cbb, arg);
}
- else {
- switch (ainfo->storage) {
- case ArgInIReg:
- add_outarg_reg (cfg, call, arg, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
- break;
- case ArgInFloatReg:
- add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
- break;
- case ArgOnStack:
- if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref) {
- arg->opcode = OP_OUTARG_R4;
- }
- else
- arg->opcode = OP_OUTARG;
- arg->inst_imm = 16 + ainfo->offset;
- break;
- default:
- g_assert_not_reached ();
- }
+ }
+ else {
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ add_outarg_reg (cfg, call, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
+ break;
+ case ArgInFloatReg:
+ case ArgInFloatRegR4:
+ add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
+ break;
+ case ArgOnStack:
+ if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ else if (arg_type->type == MONO_TYPE_R8 && !arg_type->byref)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
}
}
}
}
call->stack_usage = cinfo->stack_usage;
- cfg->param_area = MAX (cfg->param_area, call->stack_usage);
cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
- cfg->flags |= MONO_CFG_HAS_CALLS;
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
+ int size = ins->backend.size;
+
+ if (ainfo->storage == ArgAggregate) {
+ MonoInst *load, *store;
+ int i, slot;
+
+ /*
+ * Part of the structure is passed in registers.
+ */
+ for (i = 0; i < ainfo->nregs; ++i) {
+ slot = ainfo->reg + i;
+
+ if (ainfo->atype == AggregateSingleHFA) {
+ MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 4;
+ load->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
+ } else if (ainfo->atype == AggregateDoubleHFA) {
+ MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 8;
+ load->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
+ } else {
+ MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 8;
+ load->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, cfg->arch.reg_out0 + ainfo->reg + i, FALSE);
+ }
+ MONO_ADD_INS (cfg->cbb, load);
+ }
+
+ /*
+ * Part of the structure is passed on the stack.
+ */
+ for (i = ainfo->nregs; i < ainfo->nslots; ++i) {
+ slot = ainfo->reg + i;
+
+ MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * sizeof (gpointer);
+ load->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, load);
+
+ MONO_INST_NEW (cfg, store, OP_STOREI8_MEMBASE_REG);
+ store->sreg1 = load->dreg;
+ store->inst_destbasereg = IA64_SP;
+ store->inst_offset = 16 + ainfo->offset + (slot - 8) * 8;
+ MONO_ADD_INS (cfg->cbb, store);
+ }
+ } else {
+ mini_emit_memcpy (cfg, IA64_SP, 16 + ainfo->offset, src->dreg, 0, size, 4);
+ }
+}
- return call;
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ CallInfo *cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
+
+ switch (cinfo->ret.storage) {
+ case ArgInIReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ break;
+ case ArgInFloatReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
}
-static void
-peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
+void
+mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n;
+}
- MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (ins, n, &bb->ins_list, node) {
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
+void
+mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ MonoInst *ins, *n, *last_ins = NULL;
+ ins = bb->code;
+ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_MOVE:
case OP_FMOVE:
* OP_MOVE reg, reg
*/
if (ins->dreg == ins->sreg1) {
- MONO_DEL_INS (ins);
+ MONO_DELETE_INS (bb, ins);
continue;
}
/*
if (last_ins && last_ins->opcode == OP_MOVE &&
ins->sreg1 == last_ins->dreg &&
ins->dreg == last_ins->sreg1) {
- MONO_DEL_INS (ins);
+ MONO_DELETE_INS (bb, ins);
continue;
}
break;
if (ins->dreg != ins->sreg1) {
ins->opcode = OP_MOVE;
} else {
- MONO_DEL_INS (ins);
+ MONO_DELETE_INS (bb, ins);
continue;
}
}
break;
}
+
+ last_ins = ins;
+ ins = ins->next;
}
+ bb->last_ins = last_ins;
}
int cond_to_ia64_cmp [][3] = {
return cond_to_ia64_cmp_imm [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
}
-#define NEW_INS(cfg,ins,dest,op) do { \
+#define NEW_INS(cfg,dest,op) do { \
(dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
- MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
+ mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
+ last_ins = (dest); \
} while (0)
/*
* Converts complex opcodes into simpler ones so that each IR instruction
* corresponds to one machine instruction.
*/
-static void
+void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n, *next, *temp, *temp2, *temp3;
+ MonoInst *ins, *n, *next, *temp, *temp2, *temp3, *last_ins = NULL;
+ ins = bb->code;
- if (bb->max_vreg > cfg->rs->next_vreg)
- cfg->rs->next_vreg = bb->max_vreg;
-
- MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (ins, n, &bb->ins_list, node) {
+ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
if (ins->inst_offset == 0) {
temp2 = NULL;
} else if (ia64_is_imm14 (ins->inst_offset)) {
- NEW_INS (cfg, ins, temp2, OP_ADD_IMM);
+ NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_destbasereg;
temp2->inst_imm = ins->inst_offset;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, ins, temp2, CEE_ADD);
+ temp->dreg = mono_alloc_ireg (cfg);
+
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_destbasereg;
temp2->sreg2 = temp->dreg;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
switch (ins->opcode) {
if (ins->inst_imm == 0)
ins->sreg1 = IA64_R0;
else {
- NEW_INS (cfg, ins, temp3, OP_I8CONST);
+ NEW_INS (cfg, temp3, OP_I8CONST);
temp3->inst_c0 = ins->inst_imm;
- temp3->dreg = mono_regstate_next_int (cfg->rs);
+ temp3->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp3->dreg;
}
break;
}
else if (ia64_is_imm14 (ins->inst_offset)) {
- NEW_INS (cfg, ins, temp2, OP_ADD_IMM);
+ NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_destbasereg;
temp2->inst_imm = ins->inst_offset;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, ins, temp2, CEE_ADD);
+ temp->dreg = mono_alloc_ireg (cfg);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_destbasereg;
temp2->sreg2 = temp->dreg;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
ins->inst_offset = 0;
break;
}
else if (ia64_is_imm14 (ins->inst_offset)) {
- NEW_INS (cfg, ins, temp2, OP_ADD_IMM);
+ NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_basereg;
temp2->inst_imm = ins->inst_offset;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, ins, temp2, CEE_ADD);
+ temp->dreg = mono_alloc_ireg (cfg);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_basereg;
temp2->sreg2 = temp->dreg;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
ins->inst_offset = 0;
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
case OP_ISUB_IMM:
+ case OP_LSUB_IMM:
+ case OP_AND_IMM:
case OP_IAND_IMM:
+ case OP_LAND_IMM:
case OP_IOR_IMM:
+ case OP_LOR_IMM:
case OP_IXOR_IMM:
- case OP_AND_IMM:
+ case OP_LXOR_IMM:
case OP_SHL_IMM:
case OP_SHR_IMM:
case OP_ISHL_IMM:
switch (ins->opcode) {
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
is_imm = ia64_is_imm14 (ins->inst_imm);
switched = TRUE;
break;
case OP_ISUB_IMM:
+ case OP_LSUB_IMM:
is_imm = ia64_is_imm14 (- (ins->inst_imm));
if (is_imm) {
/* A = B - IMM -> A = B + (-IMM) */
case OP_IOR_IMM:
case OP_IXOR_IMM:
case OP_AND_IMM:
+ case OP_LAND_IMM:
+ case OP_LOR_IMM:
+ case OP_LXOR_IMM:
is_imm = ia64_is_imm8 (ins->inst_imm);
switched = TRUE;
break;
break;
}
- switch (ins->opcode) {
- case OP_ADD_IMM:
- ins->opcode = CEE_ADD;
- break;
- case OP_IADD_IMM:
- ins->opcode = OP_IADD;
- break;
- case OP_ISUB_IMM:
- ins->opcode = OP_ISUB;
- break;
- case OP_IAND_IMM:
- ins->opcode = OP_IAND;
- break;
- case OP_IOR_IMM:
- ins->opcode = OP_IOR;
- break;
- case OP_IXOR_IMM:
- ins->opcode = OP_IXOR;
- break;
- case OP_ISHL_IMM:
- ins->opcode = OP_ISHL;
- break;
- case OP_ISHR_IMM:
- ins->opcode = OP_ISHR;
- break;
- case OP_ISHR_UN_IMM:
- ins->opcode = OP_ISHR_UN;
- break;
- case OP_AND_IMM:
- ins->opcode = CEE_AND;
- break;
- case OP_SHL_IMM:
- ins->opcode = OP_LSHL;
- break;
- case OP_SHR_IMM:
- ins->opcode = OP_LSHR;
- break;
- case OP_LSHL_IMM:
- ins->opcode = OP_LSHL;
- break;
- case OP_LSHR_IMM:
- ins->opcode = OP_LSHR;
- break;
- case OP_LSHR_UN_IMM:
- ins->opcode = OP_LSHR_UN;
- break;
- default:
- g_assert_not_reached ();
- }
+ ins->opcode = mono_op_imm_to_op (ins->opcode);
if (ins->inst_imm == 0)
ins->sreg2 = IA64_R0;
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
}
break;
}
case OP_COMPARE_IMM:
- case OP_ICOMPARE_IMM: {
+ case OP_ICOMPARE_IMM:
+ case OP_LCOMPARE_IMM: {
/* Instead of compare+b<cond>, ia64 has compare<cond>+br */
gboolean imm;
CompRelation cond;
+ next = ins->next;
+
+ /* Branch opts can eliminate the branch */
+ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
+ NULLIFY_INS (ins);
+ break;
+ }
+
/*
* The compare_imm instructions have switched up arguments, and
* some of them take an imm between -127 and 128.
*/
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- g_assert(next);
+ next = ins->next;
cond = mono_opcode_to_cond (next->opcode);
if ((cond == CMP_LT) || (cond == CMP_GE))
imm = ia64_is_imm8 (ins->inst_imm - 1);
if (ins->inst_imm == 0)
ins->sreg2 = IA64_R0;
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
}
}
- switch (next->opcode) {
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLE:
- case CEE_BGT:
- case CEE_BLE_UN:
- case CEE_BGT_UN:
- case CEE_BGE:
- case CEE_BLT:
- case CEE_BGE_UN:
- case CEE_BLT_UN:
- case OP_IBEQ:
- case OP_IBNE_UN:
- case OP_IBLE:
- case OP_IBLT:
- case OP_IBGT:
- case OP_IBGE:
- case OP_IBLE_UN:
- case OP_IBLT_UN:
- case OP_IBGE_UN:
- case OP_IBGT_UN:
+ if (MONO_IS_COND_BRANCH_OP (next)) {
next->opcode = OP_IA64_BR_COND;
if (! (next->flags & MONO_INST_BRLABEL))
next->inst_target_bb = next->inst_true_bb;
- break;
- case OP_COND_EXC_EQ:
- case OP_COND_EXC_GT:
- case OP_COND_EXC_LT:
- case OP_COND_EXC_GT_UN:
- case OP_COND_EXC_LE_UN:
- case OP_COND_EXC_NE_UN:
- case OP_COND_EXC_LT_UN:
+ } else if (MONO_IS_COND_EXC (next)) {
next->opcode = OP_IA64_COND_EXC;
- break;
- case OP_CEQ:
- case OP_CLT:
- case OP_CGT:
- case OP_CLT_UN:
- case OP_CGT_UN:
- case OP_ICEQ:
- case OP_ICLT:
- case OP_ICGT:
- case OP_ICLT_UN:
- case OP_ICGT_UN:
+ } else if (MONO_IS_SETCC (next)) {
next->opcode = OP_IA64_CSET;
- break;
- default:
+ } else {
printf ("%s\n", mono_inst_name (next->opcode));
NOT_IMPLEMENTED;
}
case OP_FCOMPARE: {
/* Instead of compare+b<cond>, ia64 has compare<cond>+br */
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- g_assert(next);
+ next = ins->next;
+
+ /* Branch opts can eliminate the branch */
+ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
+ NULLIFY_INS (ins);
+ break;
+ }
ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
- switch (next->opcode) {
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLE:
- case CEE_BGE:
- case CEE_BLT:
- case CEE_BGT:
- case CEE_BLE_UN:
- case CEE_BGE_UN:
- case CEE_BLT_UN:
- case CEE_BGT_UN:
- case OP_IBEQ:
- case OP_IBNE_UN:
- case OP_IBLE:
- case OP_IBLT:
- case OP_IBGT:
- case OP_IBGE:
- case OP_IBLE_UN:
- case OP_IBLT_UN:
- case OP_IBGE_UN:
- case OP_IBGT_UN:
- case OP_FBEQ:
- case OP_FBNE_UN:
- case OP_FBLT:
- case OP_FBLT_UN:
- case OP_FBGT:
- case OP_FBGT_UN:
- case OP_FBGE:
- case OP_FBGE_UN:
- case OP_FBLE:
- case OP_FBLE_UN:
+
+ if (MONO_IS_COND_BRANCH_OP (next)) {
next->opcode = OP_IA64_BR_COND;
if (! (next->flags & MONO_INST_BRLABEL))
next->inst_target_bb = next->inst_true_bb;
- break;
- case OP_COND_EXC_LT:
- case OP_COND_EXC_GT:
- case OP_COND_EXC_GT_UN:
- case OP_COND_EXC_LE_UN:
+ } else if (MONO_IS_COND_EXC (next)) {
next->opcode = OP_IA64_COND_EXC;
- break;
- case OP_CEQ:
- case OP_CLT:
- case OP_CGT:
- case OP_CLT_UN:
- case OP_CGT_UN:
- case OP_ICEQ:
- case OP_ICLT:
- case OP_ICGT:
- case OP_ICLT_UN:
- case OP_ICGT_UN:
- case OP_FCEQ:
- case OP_FCLT:
- case OP_FCGT:
- case OP_FCLT_UN:
- case OP_FCGT_UN:
+ } else if (MONO_IS_SETCC (next)) {
next->opcode = OP_IA64_CSET;
- break;
- default:
+ } else {
printf ("%s\n", mono_inst_name (next->opcode));
NOT_IMPLEMENTED;
}
+
break;
}
+ case OP_FCEQ:
+ case OP_FCGT:
+ case OP_FCGT_UN:
+ case OP_FCLT:
+ case OP_FCLT_UN:
+ /* The front end removes the fcompare, so introduce it again */
+ NEW_INS (cfg, temp, opcode_to_ia64_cmp (ins->opcode, OP_FCOMPARE));
+ temp->sreg1 = ins->sreg1;
+ temp->sreg2 = ins->sreg2;
+
+ ins->opcode = OP_IA64_CSET;
+ break;
case OP_MUL_IMM:
case OP_LMUL_IMM:
case OP_IMUL_IMM: {
sum_reg = 0;
for (i = 0; i < 64; ++i) {
if (ins->inst_imm & (((gint64)1) << i)) {
- NEW_INS (cfg, ins, temp, shl_op);
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ NEW_INS (cfg, temp, shl_op);
+ temp->dreg = mono_alloc_ireg (cfg);
temp->sreg1 = ins->sreg1;
temp->inst_imm = i;
if (sum_reg == 0)
sum_reg = temp->dreg;
else {
- NEW_INS (cfg, ins, temp2, CEE_ADD);
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ NEW_INS (cfg, temp2, OP_LADD);
+ temp2->dreg = mono_alloc_ireg (cfg);
temp2->sreg1 = sum_reg;
temp2->sreg2 = temp->dreg;
sum_reg = temp2->dreg;
}
break;
}
- case CEE_CONV_OVF_U4:
- NEW_INS (cfg, ins, temp, OP_IA64_CMP4_LT);
+ case OP_LCONV_TO_OVF_U4:
+ NEW_INS (cfg, temp, OP_IA64_CMP4_LT);
temp->sreg1 = ins->sreg1;
temp->sreg2 = IA64_R0;
- NEW_INS (cfg, ins, temp, OP_IA64_COND_EXC);
+ NEW_INS (cfg, temp, OP_IA64_COND_EXC);
temp->inst_p1 = (char*)"OverflowException";
ins->opcode = OP_MOVE;
break;
- case CEE_CONV_OVF_I4_UN:
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ case OP_LCONV_TO_OVF_I4_UN:
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = 0x7fffffff;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
- NEW_INS (cfg, ins, temp2, OP_IA64_CMP4_GT_UN);
+ NEW_INS (cfg, temp2, OP_IA64_CMP4_GT_UN);
temp2->sreg1 = ins->sreg1;
temp2->sreg2 = temp->dreg;
- NEW_INS (cfg, ins, temp, OP_IA64_COND_EXC);
+ NEW_INS (cfg, temp, OP_IA64_COND_EXC);
temp->inst_p1 = (char*)"OverflowException";
ins->opcode = OP_MOVE;
case OP_FCONV_TO_U2:
case OP_FCONV_TO_I1:
case OP_FCONV_TO_U1:
- NEW_INS (cfg, ins, temp, OP_FCONV_TO_I8);
+ NEW_INS (cfg, temp, OP_FCONV_TO_I8);
temp->sreg1 = ins->sreg1;
temp->dreg = ins->dreg;
default:
break;
}
+ last_ins = ins;
+ ins = ins->next;
}
+ bb->last_ins = last_ins;
- bb->max_vreg = cfg->rs->next_vreg;
-}
-
-void
-mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
-{
- if (MONO_INST_LIST_EMPTY (&bb->ins_list))
- return;
-
- mono_arch_lowering_pass (cfg, bb);
-
- mono_local_regalloc (cfg, bb);
+ bb->max_vreg = cfg->next_vreg;
}
/*
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
+
ins = cfg->args [i];
if (sig->hasthis && (i == 0))
case OP_VOIDCALL_REG:
case OP_VOIDCALL_MEMBASE:
break;
- case CEE_CALL:
+ case OP_CALL:
case OP_CALL_REG:
case OP_CALL_MEMBASE:
case OP_LCALL:
case OP_FCALL_REG:
case OP_FCALL_MEMBASE:
g_assert (ins->dreg == 8);
+ if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
+ ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
case OP_VCALL:
case OP_VCALL_REG:
- case OP_VCALL_MEMBASE: {
+ case OP_VCALL_MEMBASE:
+ case OP_VCALL2:
+ case OP_VCALL2_REG:
+ case OP_VCALL2_MEMBASE: {
ArgStorage storage;
cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
guint offset;
Ia64CodegenState code;
guint8 *code_start = cfg->native_code + cfg->code_len;
+ MonoInst *last_ins = NULL;
guint last_offset = 0;
int max_len, cpos;
- if (cfg->opt & MONO_OPT_PEEPHOLE)
- peephole_pass (cfg, bb);
-
if (cfg->opt & MONO_OPT_LOOP) {
/* FIXME: */
}
else
ia64_movl (code, ins->dreg, ins->inst_c0);
break;
+ case OP_JUMP_TABLE:
+ add_patch_info (cfg, code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ ia64_movl (code, ins->dreg, 0);
+ break;
case OP_MOVE:
ia64_mov (code, ins->dreg, ins->sreg1);
break;
ins->inst_c0 = code.buf - cfg->native_code;
break;
case OP_NOP:
+ case OP_RELAXED_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL:
break;
case OP_BR_REG:
ia64_mov_to_br (code, IA64_B6, ins->sreg1);
ia64_br_cond_reg (code, IA64_B6);
break;
- case CEE_ADD:
case OP_IADD:
+ case OP_LADD:
ia64_add (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_AND:
+ case OP_ISUB:
+ case OP_LSUB:
+ ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
case OP_IAND:
+ case OP_LAND:
ia64_and (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IOR:
- case CEE_OR:
+ case OP_LOR:
ia64_or (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IXOR:
- case CEE_XOR:
+ case OP_LXOR:
ia64_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_INEG:
- case CEE_NEG:
+ case OP_LNEG:
ia64_sub (code, ins->dreg, IA64_R0, ins->sreg1);
break;
case OP_INOT:
- case CEE_NOT:
+ case OP_LNOT:
ia64_andcm_imm (code, ins->dreg, -1, ins->sreg1);
break;
case OP_ISHL:
+ case OP_LSHL:
ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ISHR:
ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
ia64_shr_u (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
break;
- case CEE_SHL:
- case OP_LSHL:
- ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
- break;
case OP_LSHR_UN:
ia64_shr_u (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_SUB:
- case OP_ISUB:
- ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
- break;
case OP_IADDCC:
/* p6 and p7 is set if there is signed/unsigned overflow */
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IAND_IMM:
case OP_AND_IMM:
+ case OP_LAND_IMM:
ia64_and_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IOR_IMM:
+ case OP_LOR_IMM:
ia64_or_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IXOR_IMM:
+ case OP_LXOR_IMM:
ia64_xor_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_SHL_IMM:
case OP_LSHR_UN_IMM:
ia64_shr_u_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
- case CEE_MUL:
+ case OP_LMUL:
/* Based on gcc code */
ia64_setf_sig (code, FP_SCRATCH_REG, ins->sreg1);
ia64_setf_sig (code, FP_SCRATCH_REG2, ins->sreg2);
break;
case OP_STOREI8_MEMBASE_REG:
case OP_STORE_MEMBASE_REG:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
+ }
+ ins->inst_destbasereg = GP_SCRATCH_REG;
+ }
ia64_st8_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
break;
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEMBASE:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
+ }
+ ins->inst_basereg = GP_SCRATCH_REG;
+ }
ia64_ld8 (code, ins->dreg, ins->inst_basereg);
break;
ia64_no_stop (code);
ia64_add1_pred (code, 6, ins->dreg, IA64_R0, IA64_R0);
break;
- case CEE_CONV_I1:
+ case OP_ICONV_TO_I1:
+ case OP_LCONV_TO_I1:
/* FIXME: Is this needed ? */
ia64_sxt1 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I2:
+ case OP_ICONV_TO_I2:
+ case OP_LCONV_TO_I2:
/* FIXME: Is this needed ? */
ia64_sxt2 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I4:
+ case OP_LCONV_TO_I4:
/* FIXME: Is this needed ? */
ia64_sxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U1:
+ case OP_ICONV_TO_U1:
+ case OP_LCONV_TO_U1:
/* FIXME: Is this needed */
ia64_zxt1 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U2:
+ case OP_ICONV_TO_U2:
+ case OP_LCONV_TO_U2:
/* FIXME: Is this needed */
ia64_zxt2 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U4:
+ case OP_LCONV_TO_U4:
/* FIXME: Is this needed */
ia64_zxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I8:
- case CEE_CONV_I:
+ case OP_ICONV_TO_I8:
+ case OP_ICONV_TO_I:
+ case OP_LCONV_TO_I8:
+ case OP_LCONV_TO_I:
ia64_sxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U8:
- case CEE_CONV_U:
+ case OP_LCONV_TO_U8:
+ case OP_LCONV_TO_U:
ia64_zxt4 (code, ins->dreg, ins->sreg1);
break;
ia64_fmov (code, ins->dreg, ins->sreg1);
break;
case OP_STORER8_MEMBASE_REG:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
+ }
+ ins->inst_destbasereg = GP_SCRATCH_REG;
+ }
ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
case OP_STORER4_MEMBASE_REG:
ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
break;
case OP_LOADR8_MEMBASE:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
+ }
+ ins->inst_basereg = GP_SCRATCH_REG;
+ }
ia64_ldfd (code, ins->dreg, ins->inst_basereg);
break;
case OP_LOADR4_MEMBASE:
ia64_ldfs (code, ins->dreg, ins->inst_basereg);
ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
- case CEE_CONV_R4:
+ case OP_ICONV_TO_R4:
+ case OP_LCONV_TO_R4:
ia64_setf_sig (code, ins->dreg, ins->sreg1);
ia64_fcvt_xf (code, ins->dreg, ins->dreg);
ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
break;
- case CEE_CONV_R8:
- ia64_setf_sig (code, ins->dreg, ins->sreg1);
- ia64_fcvt_xf (code, ins->dreg, ins->dreg);
- ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
- break;
+ case OP_ICONV_TO_R8:
case OP_LCONV_TO_R8:
- /* FIXME: Difference with CEE_CONV_R8 ? */
ia64_setf_sig (code, ins->dreg, ins->sreg1);
ia64_fcvt_xf (code, ins->dreg, ins->dreg);
ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
- case OP_LCONV_TO_R4:
- /* FIXME: Difference with CEE_CONV_R4 ? */
- ia64_setf_sig (code, ins->dreg, ins->sreg1);
- ia64_fcvt_xf (code, ins->dreg, ins->dreg);
- ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
- break;
case OP_FCONV_TO_R4:
ia64_fnorm_s_sf (code, ins->dreg, ins->sreg1, 0);
break;
case OP_FCONV_TO_I8:
+ case OP_FCONV_TO_I:
ia64_fcvt_fx_trunc_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
break;
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
- case CEE_CALL:
+ case OP_CALL:
call = (MonoCallInst*)ins;
if (ins->flags & MONO_INST_HAS_METHOD)
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
- case OP_VOIDCALL_REG:
- call = (MonoCallInst*)ins;
+ case OP_VCALL2_REG:
+ case OP_VOIDCALL_REG: {
+ MonoCallInst *call = (MonoCallInst*)ins;
+ CallInfo *cinfo;
+ int out_reg;
- /* Indirect call */
/*
- * mono_arch_patch_delegate_trampoline will patch this, this is why R8 is
- * used.
+ * mono_arch_find_this_arg () needs to find the this argument in a global
+ * register.
*/
+ cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
+ out_reg = cfg->arch.reg_out0;
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg)
+ out_reg ++;
+ ia64_mov (code, IA64_R10, out_reg);
+
+ /* Indirect call */
ia64_mov (code, IA64_R8, ins->sreg1);
ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, IA64_R8, 8);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
code = emit_move_return_value (cfg, ins, code);
break;
-
+ }
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE: {
MonoCallInst *call = (MonoCallInst*)ins;
if (call->method && ins->inst_offset < 0) {
/*
* This is a possible IMT call so save the IMT method in a global
- * register where mono_arch_find_imt_method () and its friends can access
- * it.
+ * register where mono_arch_find_imt_method () and its friends can
+ * access it.
*/
ia64_movl (code, IA64_R9, call->method);
}
break;
}
+ case OP_LOCALLOC_IMM: {
+ gint32 abi_offset;
+
+ /* FIXME: Sigaltstack support */
+
+ gssize size = ins->inst_imm;
+ size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
+
+ if (ia64_is_adds_imm (size))
+ ia64_adds_imm (code, GP_SCRATCH_REG, size, IA64_R0);
+ else
+ ia64_movl (code, GP_SCRATCH_REG, size);
+
+ ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
+ ia64_mov (code, ins->dreg, IA64_SP);
+
+ /* An area at sp is reserved by the ABI for parameter passing */
+ abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_FRAME_ALIGNMENT);
+ if (ia64_is_adds_imm (abi_offset))
+ ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
+ else {
+ ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
+ ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
+ }
+
+ if (ins->flags & MONO_INST_INIT) {
+ /* Upper limit */
+ ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
+
+ ia64_codegen_set_one_ins_per_bundle (code, TRUE);
+
+ /* Init loop */
+ ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
+ ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
+ ia64_br_cond_pred (code, 8, -2);
+
+ ia64_codegen_set_one_ins_per_bundle (code, FALSE);
+
+ ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
+ }
+
+ break;
+ }
case OP_TLS_GET:
ia64_adds_imm (code, ins->dreg, ins->inst_offset, IA64_TP);
ia64_ld8 (code, ins->dreg, ins->dreg);
/* Signal to endfilter that we are called by call_filter */
ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
- /* Save the return address */
- ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
+ /* Branch target: */
+ if (ia64_is_imm14 (spvar->inst_offset))
+ ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
+ else {
+ ia64_movl (code, GP_SCRATCH_REG2, spvar->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG2, cfg->frame_reg, GP_SCRATCH_REG2);
+ }
+
+ /* Save the return address */
ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
ia64_codegen_set_one_ins_per_bundle (code, FALSE);
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
/* Load the return address */
- ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
+ if (ia64_is_imm14 (spvar->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, spvar->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, cfg->frame_reg, GP_SCRATCH_REG);
+ }
ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
/* Test caller */
cpos += max_len;
+ last_ins = ins;
last_offset = offset;
}
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
+
inst = cfg->args [i];
if (sig->hasthis && (i == 0))
stack_offset = ainfo->offset + ARGS_OFFSET;
+ /*
+ * FIXME: Native code might pass non register sized integers
+ * without initializing the upper bits.
+ */
+ if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED && !arg_type->byref && ainfo->storage == ArgInIReg) {
+ int reg = cfg->arch.reg_in0 + ainfo->reg;
+
+ switch (mono_type_to_load_membase (cfg, arg_type)) {
+ case OP_LOADI1_MEMBASE:
+ ia64_sxt1 (code, reg, reg);
+ break;
+ case OP_LOADU1_MEMBASE:
+ ia64_zxt1 (code, reg, reg);
+ break;
+ case OP_LOADI2_MEMBASE:
+ ia64_sxt2 (code, reg, reg);
+ break;
+ case OP_LOADU2_MEMBASE:
+ ia64_zxt2 (code, reg, reg);
+ break;
+ default:
+ break;
+ }
+ }
+
/* Save volatile arguments to the stack */
if (inst->opcode != OP_REGVAR) {
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatReg:
+ case ArgInFloatRegR4:
g_assert (inst->opcode == OP_REGOFFSET);
if (ia64_is_adds_imm (inst->inst_offset))
ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
return NULL;
}
-static gboolean tls_offset_inited = FALSE;
-
void
mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
{
- if (!tls_offset_inited) {
- tls_offset_inited = TRUE;
-
- appdomain_tls_offset = mono_domain_get_tls_offset ();
- thread_tls_offset = mono_thread_get_tls_offset ();
- }
}
void
{
}
-void
-mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
-{
- MonoCallInst *call = (MonoCallInst*)inst;
- int out_reg = cfg->arch.reg_out0;
-
- if (vt_reg != -1) {
- CallInfo * cinfo = get_call_info (cfg, cfg->mempool, inst->signature, FALSE);
- MonoInst *vtarg;
-
- if (cinfo->ret.storage == ArgAggregate) {
- MonoInst *local = (MonoInst*)cfg->arch.ret_var_addr_local;
-
- /*
- * The valuetype is in registers after the call, need to be copied
- * to the stack. Save the address to a local here, so the call
- * instruction can access it.
- */
- g_assert (local->opcode == OP_REGOFFSET);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, local->inst_basereg, local->inst_offset, vt_reg);
- }
- else {
- MONO_INST_NEW (cfg, vtarg, OP_MOVE);
- vtarg->sreg1 = vt_reg;
- vtarg->dreg = mono_regstate_next_int (cfg->rs);
- mono_bblock_add_inst (cfg->cbb, vtarg);
-
- mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, out_reg, FALSE);
-
- out_reg ++;
- }
- }
-
- /* add the this argument */
- if (this_reg != -1) {
- MonoInst *this;
- MONO_INST_NEW (cfg, this, OP_MOVE);
- this->type = this_type;
- this->sreg1 = this_reg;
- this->dreg = mono_regstate_next_int (cfg->rs);
- mono_bblock_add_inst (cfg->cbb, this);
-
- mono_call_inst_add_outarg_reg (cfg, call, this->dreg, out_reg, FALSE);
- }
-}
-
-
#ifdef MONO_ARCH_HAVE_IMT
/*
* LOCKING: called with the domain lock held
*/
gpointer
-mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
+ gpointer fail_tramp)
{
int i;
int size = 0;
guint8 *start, *buf;
Ia64CodegenState code;
+ g_assert (!fail_tramp);
+
size = count * 256;
buf = g_malloc0 (size);
ia64_codegen_init (code, buf);
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done) {
- ia64_movl (code, GP_SCRATCH_REG, item->method);
+ ia64_movl (code, GP_SCRATCH_REG, item->key);
ia64_cmp_eq (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
}
item->jmp_code = (guint8*)code.buf + code.nins;
ia64_br_cond_pred (code, 7, 0);
- ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->vtable_slot]));
+ ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
ia64_br_cond_reg (code, IA64_B6);
#if ENABLE_WRONG_METHOD_CHECK
g_assert_not_reached ();
#endif
- ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->vtable_slot]));
+ ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
ia64_br_cond_reg (code, IA64_B6);
#endif
}
} else {
- ia64_movl (code, GP_SCRATCH_REG, item->method);
+ ia64_movl (code, GP_SCRATCH_REG, item->key);
ia64_cmp_geu (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
item->jmp_code = (guint8*)code.buf + code.nins;
ia64_br_cond_pred (code, 6, 0);
g_assert (code.buf - buf <= size);
size = code.buf - buf;
- start = mono_code_manager_reserve (domain->code_mp, size);
+ start = mono_domain_code_reserve (domain, size);
memcpy (start, buf, size);
mono_arch_flush_icache (start, size);
return regs [IA64_R9];
}
+void
+mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
+{
+ /* Done by the implementation of the CALL_MEMBASE opcodes */
+}
+#endif
+
+gpointer
+mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
+{
+ return (gpointer)regs [IA64_R10];
+}
+
MonoObject*
-mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
{
- return regs [IA64_R10];
+ return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
}
-void
-mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call)
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
- /* Done by the implementation of the CALL_MEMBASE opcodes */
+ return NULL;
}
-#endif
MonoInst*
-mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
- if(cmethod->klass->image == mono_defaults.corlib &&
- (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
- (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
+ if (cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
+ (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
+
/*
- * We don't use the generic version in mini_get_inst_for_method () since the
+ * We don't use the generic version in mini_emit_inst_for_method () since we
* ia64 has atomic_add_imm opcodes.
*/
if (strcmp (cmethod->name, "Increment") == 0) {
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_preg (cfg);
ins->inst_imm = 1;
- ins->inst_i0 = args [0];
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
} else if (strcmp (cmethod->name, "Decrement") == 0) {
guint32 opcode;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_preg (cfg);
ins->inst_imm = -1;
- ins->inst_i0 = args [0];
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
} else if (strcmp (cmethod->name, "Add") == 0) {
guint32 opcode;
+ gboolean is_imm = FALSE;
+ gint64 imm = 0;
- if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_NEW_I4;
- else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
- else
- g_assert_not_reached ();
-
- MONO_INST_NEW (cfg, ins, opcode);
+ if ((args [1]->opcode == OP_ICONST) || (args [1]->opcode == OP_I8CONST)) {
+ imm = (args [1]->opcode == OP_ICONST) ? args [1]->inst_c0 : args [1]->inst_l;
- ins->inst_i0 = args [0];
- ins->inst_i1 = args [1];
+ is_imm = (imm == 1 || imm == 4 || imm == 8 || imm == 16 || imm == -1 || imm == -4 || imm == -8 || imm == -16);
+ }
+
+ if (is_imm) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ else
+ g_assert_not_reached ();
+
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ ins->inst_imm = imm;
+ ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
+ } else {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_NEW_I8;
+ else
+ g_assert_not_reached ();
+
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ ins->sreg2 = args [1]->dreg;
+ ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ }
+ MONO_ADD_INS (cfg->cbb, ins);
}
}
return 0;
}
-MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
+MonoInst*
+mono_arch_get_domain_intrinsic (MonoCompile* cfg)
{
- MonoInst* ins;
-
- if (appdomain_tls_offset == -1)
- return NULL;
-
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->inst_offset = appdomain_tls_offset;
- return ins;
+ return mono_get_domain_intrinsic (cfg);
}
-MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
+MonoInst*
+mono_arch_get_thread_intrinsic (MonoCompile* cfg)
{
- MonoInst* ins;
-
- if (thread_tls_offset == -1)
- return NULL;
-
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->inst_offset = thread_tls_offset;
- return ins;
+ return mono_get_thread_intrinsic (cfg);
+}
+
+gpointer
+mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
+{
+ /* FIXME: implement */
+ g_assert_not_reached ();
}