#include "trace.h"
#include "mini-ia64.h"
-#include "inssel.h"
#include "cpu-ia64.h"
-
-static gint appdomain_tls_offset = -1;
-static gint thread_tls_offset = -1;
+#include "jit-icalls.h"
+#include "ir-emit.h"
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
#define LOOP_ALIGNMENT 8
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
-#define NOT_IMPLEMENTED g_assert_not_reached ()
-
static const char* gregs [] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
typedef enum {
ArgInIReg,
ArgInFloatReg,
+ ArgInFloatRegR4,
ArgOnStack,
ArgValuetypeAddrInIReg,
ArgAggregate,
guint32 reg_usage;
guint32 freg_usage;
gboolean need_stack_align;
+ gboolean vtype_retaddr;
+ /* The index of the vret arg in the argument list */
+ int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
#define DEBUG(a) if (cfg->verbose_level > 1) a
-#define NEW_ICONST(cfg,dest,val) do { \
- (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
- (dest)->opcode = OP_ICONST; \
- (dest)->inst_c0 = (val); \
- (dest)->type = STACK_I4; \
- } while (0)
-
#define PARAM_REGS 8
static void inline
(*stack_size) += sizeof (gpointer);
}
else {
- ainfo->storage = ArgInFloatReg;
+ ainfo->storage = is_double ? ArgInFloatReg : ArgInFloatRegR4;
ainfo->reg = 8 + *fr;
(*fr) += 1;
(*gr) += 1;
ainfo->nregs = info->num_fields;
ainfo->nslots = ainfo->nregs;
(*fr) += info->num_fields;
+ if (ainfo->atype == AggregateSingleHFA) {
+ /*
+ * FIXME: Have to keep track of the parameter slot number, which is
+ * not the same as *gr.
+ */
+ (*gr) += ALIGN_TO (info->num_fields, 2) / 2;
+ } else {
+ (*gr) += info->num_fields;
+ }
return;
}
}
* Gude" document for more information.
*/
static CallInfo*
-get_call_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoCompile *cfg, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
{
- guint32 i, gr, fr;
+ guint32 i, gr, fr, pstart;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
CallInfo *cinfo;
+ MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
- cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ if (mp)
+ cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ else
+ cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
gr = 0;
fr = 0;
/* return value */
{
ret_type = mono_type_get_underlying_type (sig->ret);
+ ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
switch (ret_type->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
cinfo->ret.reg = 8;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = IA64_R8;
break;
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
- add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- if (cinfo->ret.storage == ArgOnStack)
- /* The caller passes the address where the value is stored */
- add_general (&gr, &stack_size, &cinfo->ret);
- if (cinfo->ret.storage == ArgInIReg)
- cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ if (sig->ret->byref) {
+ /* This seems to happen with ldfld wrappers */
+ cinfo->ret.storage = ArgInIReg;
+ } else {
+ add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+ if (cinfo->ret.storage == ArgOnStack) {
+ /* The caller passes the address where the value is stored */
+ cinfo->vtype_retaddr = TRUE;
+ }
+ }
break;
}
case MONO_TYPE_VOID:
}
}
- /* this */
- if (sig->hasthis)
- add_general (&gr, &stack_size, cinfo->args + 0);
+ pstart = 0;
+ /*
+ * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
+ * the first argument, allowing 'this' to be always passed in the first arg reg.
+ * Also do this if the first argument is a reference type, since virtual calls
+ * are sometimes made using calli without sig->hasthis set, like in the delegate
+ * invoke wrappers.
+ */
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0);
+ } else {
+ add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
+ pstart = 1;
+ }
+ add_general (&gr, &stack_size, &cinfo->ret);
+ if (cinfo->ret.storage == ArgInIReg)
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ cinfo->vret_arg_index = 1;
+ } else {
+ /* this */
+ if (sig->hasthis)
+ add_general (&gr, &stack_size, cinfo->args + 0);
+
+ if (cinfo->vtype_retaddr) {
+ add_general (&gr, &stack_size, &cinfo->ret);
+ if (cinfo->ret.storage == ArgInIReg)
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ }
+ }
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
gr = PARAM_REGS;
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
- for (i = 0; i < sig->param_count; ++i) {
+ for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
continue;
}
ptype = mono_type_get_underlying_type (sig->params [i]);
+ ptype = mini_get_basic_type_from_generic (gsctx, ptype);
switch (ptype->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+ if (!mono_type_generic_inst_is_valuetype (ptype)) {
add_general (&gr, &stack_size, ainfo);
break;
}
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k;
- CallInfo *cinfo = get_call_info (NULL, csig, FALSE);
+ CallInfo *cinfo = get_call_info (NULL, NULL, csig, FALSE);
guint32 args_size = cinfo->stack_usage;
/* The arguments are saved to a stack area in mono_arch_instrument_prolog */
return 0;
}
-static void
-mono_arch_break (void)
-{
-}
-
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
MonoMethodHeader *header;
CallInfo *cinfo;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
MonoInst *ins = cfg->args [i];
/* Already done */
return;
- cinfo = get_call_info (cfg->generic_sharing_context, mono_method_signature (cfg->method), FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (cfg->method), FALSE);
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
/* Some registers are reserved for use by the prolog/epilog */
reserved_regs = header->num_clauses ? 4 : 3;
* exception throwing code.
*/
cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, 2);
-
- g_free (cinfo);
}
GList *
gint32 *offsets;
CallInfo *cinfo;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
/*
* Determine whenever the frame pointer can be eliminated.
cfg->arch.omit_fp = FALSE;
if (cfg->param_area)
cfg->arch.omit_fp = FALSE;
+ if ((sig->ret->type != MONO_TYPE_VOID) && (cinfo->ret.storage == ArgAggregate))
+ cfg->arch.omit_fp = FALSE;
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
+ cfg->arch.omit_fp = FALSE;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
*/
if (cfg->arch.omit_fp) {
+ cfg->flags |= MONO_CFG_HAS_SPILLUP;
cfg->frame_reg = IA64_SP;
offset = ARGS_OFFSET;
}
cfg->ret->inst_c0 = cinfo->ret.reg;
break;
case ArgValuetypeAddrInIReg:
- cfg->ret->opcode = OP_REGVAR;
- cfg->ret->inst_c0 = cfg->arch.reg_in0 + cinfo->ret.reg;
+ cfg->vret_addr->opcode = OP_REGVAR;
+ cfg->vret_addr->dreg = cfg->arch.reg_in0 + cinfo->ret.reg;
break;
case ArgAggregate:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
inst->dreg = cfg->arch.reg_in0 + ainfo->reg;
break;
case ArgInFloatReg:
+ case ArgInFloatRegR4:
/*
* Since float regs are volatile, we save the arguments to
* the stack in the prolog.
}
if (!inreg && (ainfo->storage != ArgOnStack)) {
+ guint32 size = 0;
+
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
switch (ainfo->storage) {
case ArgAggregate:
if (ainfo->atype == AggregateSingleHFA)
- offset += ainfo->nslots * 4;
+ size = ainfo->nslots * 4;
else
- offset += ainfo->nslots * 8;
+ size = ainfo->nslots * 8;
break;
default:
- offset += sizeof (gpointer);
+ size = sizeof (gpointer);
break;
}
+
offset = ALIGN_TO (offset, sizeof (gpointer));
- if (cfg->arch.omit_fp)
+
+ if (cfg->arch.omit_fp) {
inst->inst_offset = offset;
- else
+ offset += size;
+ } else {
+ offset += size;
inst->inst_offset = - offset;
+ }
}
}
}
+ /*
+ * FIXME: This doesn't work because some variables are allocated during local
+ * regalloc.
+ */
+ /*
if (cfg->arch.omit_fp && offset == 16)
offset = 0;
+ */
cfg->stack_offset = offset;
-
- g_free (cinfo);
}
void
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
if (cinfo->ret.storage == ArgAggregate)
cfg->ret_var_is_local = TRUE;
-
- g_free (cinfo);
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
+ cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr = ");
+ mono_print_ins (cfg->vret_addr);
+ }
+ }
}
static void
-add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, MonoInst *arg, ArgStorage storage, int reg, MonoInst *tree)
+add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
{
+ MonoInst *arg;
+
+ MONO_INST_NEW (cfg, arg, OP_NOP);
+ arg->sreg1 = tree->dreg;
+
switch (storage) {
case ArgInIReg:
- arg->opcode = OP_OUTARG_REG;
- arg->inst_left = tree;
- arg->inst_right = (MonoInst*)call;
- arg->backend.reg3 = reg;
- call->used_iregs |= 1 << reg;
+ arg->opcode = OP_MOVE;
+ arg->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
break;
case ArgInFloatReg:
- arg->opcode = OP_OUTARG_FREG;
- arg->inst_left = tree;
- arg->inst_right = (MonoInst*)call;
- arg->backend.reg3 = reg;
- call->used_fregs |= 1 << reg;
+ arg->opcode = OP_FMOVE;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
+ break;
+ case ArgInFloatRegR4:
+ arg->opcode = OP_FCONV_TO_R4;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
}
+
+ MONO_ADD_INS (cfg->cbb, arg);
}
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
- MonoInst *arg;
MonoMethodSignature *tmp_sig;
- MonoInst *sig_arg;
+ /* Emit the signature cookie just before the implicit arguments */
+ MonoInst *sig_arg;
/* FIXME: Add support for signature tokens to AOT */
cfg->disable_aot = TRUE;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->dreg = mono_alloc_ireg (cfg);
sig_arg->inst_p0 = tmp_sig;
+ MONO_ADD_INS (cfg->cbb, sig_arg);
- MONO_INST_NEW (cfg, arg, OP_OUTARG);
- arg->inst_left = sig_arg;
- arg->inst_imm = 16 + cinfo->sig_cookie.offset;
- arg->type = STACK_PTR;
-
- /* prepend, so they get reversed */
- arg->next = call->out_args;
- call->out_args = arg;
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + cinfo->sig_cookie.offset, sig_arg->dreg);
}
-/*
- * take the arguments and generate the arch-specific
- * instructions to properly call the function in call.
- * This includes pushing, moving arguments to the right register
- * etc.
- */
-MonoCallInst*
-mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual)
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
- MonoInst *arg, *in;
+ MonoInst *in;
MonoMethodSignature *sig;
int i, n, stack_size;
CallInfo *cinfo;
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->generic_sharing_context, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
if (cinfo->ret.storage == ArgAggregate) {
- /* The code in emit_this_vret_arg needs a local */
- cfg->arch.ret_var_addr_local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- ((MonoInst*)cfg->arch.ret_var_addr_local)->flags |= MONO_INST_VOLATILE;
+ MonoInst *vtarg;
+ MonoInst *local;
+
+ /*
+ * The valuetype is in registers after the call, need to be copied
+ * to the stack. Save the address to a local here, so the call
+ * instruction can access it.
+ */
+ local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ local->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ret_var_addr_local = local;
+
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = local->dreg;
+ MONO_ADD_INS (cfg->cbb, vtarg);
+ }
+
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
+ add_outarg_reg (cfg, call, ArgInIReg, cfg->arch.reg_out0 + cinfo->ret.reg, call->vret_var);
}
for (i = 0; i < n; ++i) {
+ MonoType *arg_type;
+
ainfo = cinfo->args + i;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
emit_sig_cookie (cfg, call, cinfo);
}
- if (is_virtual && i == 0) {
- /* the argument will be attached to the call instruction */
- in = call->args [i];
- } else {
- MonoType *arg_type;
-
- MONO_INST_NEW (cfg, arg, OP_OUTARG);
- in = call->args [i];
- arg->cil_code = in->cil_code;
- arg->inst_left = in;
- arg->type = in->type;
- /* prepend, so they get reversed */
- arg->next = call->out_args;
- call->out_args = arg;
-
- if (sig->hasthis && (i == 0))
- arg_type = &mono_defaults.object_class->byval_arg;
- else
- arg_type = sig->params [i - sig->hasthis];
-
- if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
- MonoInst *stack_addr;
- guint32 align;
- guint32 size;
-
- if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
- size = sizeof (MonoTypedRef);
- align = sizeof (gpointer);
- }
- else
- if (sig->pinvoke)
- size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
- else {
- /*
- * Other backends use mini_type_stack_size (), but that
- * aligns the size to 8, which is larger than the size of
- * the source, leading to reads of invalid memory if the
- * source is at the end of address space.
- */
- size = mono_class_value_size (in->klass, &align);
- }
-
- if (ainfo->storage == ArgAggregate) {
- MonoInst *vtaddr, *load, *load2, *offset_ins, *set_reg;
- int slot, j;
-
- vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
-
- /*
- * Part of the structure is passed in registers.
- */
- for (j = 0; j < ainfo->nregs; ++j) {
- int offset, load_op, dest_reg, arg_storage;
-
- slot = ainfo->reg + j;
-
- if (ainfo->atype == AggregateSingleHFA) {
- load_op = CEE_LDIND_R4;
- offset = j * 4;
- dest_reg = ainfo->reg + j;
- arg_storage = ArgInFloatReg;
- } else if (ainfo->atype == AggregateDoubleHFA) {
- load_op = CEE_LDIND_R8;
- offset = j * 8;
- dest_reg = ainfo->reg + j;
- arg_storage = ArgInFloatReg;
- } else {
- load_op = CEE_LDIND_I;
- offset = j * 8;
- dest_reg = cfg->arch.reg_out0 + ainfo->reg + j;
- arg_storage = ArgInIReg;
- }
-
- MONO_INST_NEW (cfg, load, CEE_LDIND_I);
- load->ssa_op = MONO_SSA_LOAD;
- load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
-
- NEW_ICONST (cfg, offset_ins, offset);
- MONO_INST_NEW (cfg, load2, CEE_ADD);
- load2->inst_left = load;
- load2->inst_right = offset_ins;
-
- MONO_INST_NEW (cfg, load, load_op);
- load->inst_left = load2;
-
- if (j == 0)
- set_reg = arg;
- else
- MONO_INST_NEW (cfg, set_reg, OP_OUTARG_REG);
- add_outarg_reg (cfg, call, set_reg, arg_storage, dest_reg, load);
- if (set_reg != call->out_args) {
- set_reg->next = call->out_args;
- call->out_args = set_reg;
- }
- }
-
- /*
- * Part of the structure is passed on the stack.
- */
- for (j = ainfo->nregs; j < ainfo->nslots; ++j) {
- MonoInst *outarg;
+ in = call->args [i];
- slot = ainfo->reg + j;
-
- MONO_INST_NEW (cfg, load, CEE_LDIND_I);
- load->ssa_op = MONO_SSA_LOAD;
- load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
+ if (sig->hasthis && (i == 0))
+ arg_type = &mono_defaults.object_class->byval_arg;
+ else
+ arg_type = sig->params [i - sig->hasthis];
- NEW_ICONST (cfg, offset_ins, (j * sizeof (gpointer)));
- MONO_INST_NEW (cfg, load2, CEE_ADD);
- load2->inst_left = load;
- load2->inst_right = offset_ins;
+ if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
+ guint32 align;
+ guint32 size;
- MONO_INST_NEW (cfg, load, CEE_LDIND_I);
- load->inst_left = load2;
+ if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else if (sig->pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
- if (j == 0)
- outarg = arg;
- else
- MONO_INST_NEW (cfg, outarg, OP_OUTARG);
- outarg->inst_left = load;
- outarg->inst_imm = 16 + ainfo->offset + (slot - 8) * 8;
+ if (size > 0) {
+ MonoInst *arg;
- if (outarg != call->out_args) {
- outarg->next = call->out_args;
- call->out_args = outarg;
- }
- }
+ MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
+ arg->sreg1 = in->dreg;
+ arg->klass = in->klass;
+ arg->backend.size = size;
+ arg->inst_p0 = call;
+ arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
- /* Trees can't be shared so make a copy */
- MONO_INST_NEW (cfg, arg, CEE_STIND_I);
- arg->cil_code = in->cil_code;
- arg->ssa_op = MONO_SSA_STORE;
- arg->inst_left = vtaddr;
- arg->inst_right = in;
- arg->type = in->type;
-
- /* prepend, so they get reversed */
- arg->next = call->out_args;
- call->out_args = arg;
- }
- else {
- MONO_INST_NEW (cfg, stack_addr, OP_REGOFFSET);
- stack_addr->inst_basereg = IA64_SP;
- stack_addr->inst_offset = 16 + ainfo->offset;
- stack_addr->inst_imm = size;
-
- arg->opcode = OP_OUTARG_VT;
- arg->inst_right = stack_addr;
- }
+ MONO_ADD_INS (cfg->cbb, arg);
}
- else {
- switch (ainfo->storage) {
- case ArgInIReg:
- add_outarg_reg (cfg, call, arg, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
- break;
- case ArgInFloatReg:
- add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
- break;
- case ArgOnStack:
- if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref) {
- arg->opcode = OP_OUTARG_R4;
- }
- else
- arg->opcode = OP_OUTARG;
- arg->inst_imm = 16 + ainfo->offset;
- break;
- default:
- g_assert_not_reached ();
- }
+ }
+ else {
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ add_outarg_reg (cfg, call, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
+ break;
+ case ArgInFloatReg:
+ case ArgInFloatRegR4:
+ add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
+ break;
+ case ArgOnStack:
+ if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ else if (arg_type->type == MONO_TYPE_R8 && !arg_type->byref)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
}
}
}
}
call->stack_usage = cinfo->stack_usage;
- cfg->param_area = MAX (cfg->param_area, call->stack_usage);
cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
- cfg->flags |= MONO_CFG_HAS_CALLS;
+}
- g_free (cinfo);
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
+ int size = ins->backend.size;
- return call;
+ if (ainfo->storage == ArgAggregate) {
+ MonoInst *load, *store;
+ int i, slot;
+
+ /*
+ * Part of the structure is passed in registers.
+ */
+ for (i = 0; i < ainfo->nregs; ++i) {
+ slot = ainfo->reg + i;
+
+ if (ainfo->atype == AggregateSingleHFA) {
+ MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 4;
+ load->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
+ } else if (ainfo->atype == AggregateDoubleHFA) {
+ MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 8;
+ load->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
+ } else {
+ MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 8;
+ load->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, cfg->arch.reg_out0 + ainfo->reg + i, FALSE);
+ }
+ MONO_ADD_INS (cfg->cbb, load);
+ }
+
+ /*
+ * Part of the structure is passed on the stack.
+ */
+ for (i = ainfo->nregs; i < ainfo->nslots; ++i) {
+ slot = ainfo->reg + i;
+
+ MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * sizeof (gpointer);
+ load->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, load);
+
+ MONO_INST_NEW (cfg, store, OP_STOREI8_MEMBASE_REG);
+ store->sreg1 = load->dreg;
+ store->inst_destbasereg = IA64_SP;
+ store->inst_offset = 16 + ainfo->offset + (slot - 8) * 8;
+ MONO_ADD_INS (cfg->cbb, store);
+ }
+ } else {
+ mini_emit_memcpy (cfg, IA64_SP, 16 + ainfo->offset, src->dreg, 0, size, 4);
+ }
}
-static void
-peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
- MonoInst *ins, *last_ins = NULL;
+ CallInfo *cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
+
+ switch (cinfo->ret.storage) {
+ case ArgInIReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ break;
+ case ArgInFloatReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+void
+mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+}
+
+void
+mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ MonoInst *ins, *n, *last_ins = NULL;
ins = bb->code;
- while (ins) {
+ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_MOVE:
case OP_FMOVE:
* OP_MOVE reg, reg
*/
if (ins->dreg == ins->sreg1) {
- if (last_ins)
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
/*
if (last_ins && last_ins->opcode == OP_MOVE &&
ins->sreg1 == last_ins->dreg &&
ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
break;
if (ins->dreg != ins->sreg1) {
ins->opcode = OP_MOVE;
} else {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
}
return cond_to_ia64_cmp_imm [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
}
-static void
-insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
-{
- if (ins == NULL) {
- ins = bb->code;
- bb->code = to_insert;
- to_insert->next = ins;
- }
- else {
- to_insert->next = ins->next;
- ins->next = to_insert;
- }
-}
-
#define NEW_INS(cfg,dest,op) do { \
(dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
- insert_after_ins (bb, last_ins, (dest)); \
+ mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
last_ins = (dest); \
} while (0)
* Converts complex opcodes into simpler ones so that each IR instruction
* corresponds to one machine instruction.
*/
-static void
+void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *next, *temp, *temp2, *temp3, *last_ins = NULL;
+ MonoInst *ins, *n, *next, *temp, *temp2, *temp3, *last_ins = NULL;
ins = bb->code;
- if (bb->max_vreg > cfg->rs->next_vreg)
- cfg->rs->next_vreg = bb->max_vreg;
-
- while (ins) {
+ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_destbasereg;
temp2->inst_imm = ins->inst_offset;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
else {
NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, temp2, CEE_ADD);
+ temp->dreg = mono_alloc_ireg (cfg);
+
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_destbasereg;
temp2->sreg2 = temp->dreg;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
switch (ins->opcode) {
else {
NEW_INS (cfg, temp3, OP_I8CONST);
temp3->inst_c0 = ins->inst_imm;
- temp3->dreg = mono_regstate_next_int (cfg->rs);
+ temp3->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp3->dreg;
}
NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_destbasereg;
temp2->inst_imm = ins->inst_offset;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
else {
NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, temp2, CEE_ADD);
+ temp->dreg = mono_alloc_ireg (cfg);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_destbasereg;
temp2->sreg2 = temp->dreg;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
ins->inst_offset = 0;
NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_basereg;
temp2->inst_imm = ins->inst_offset;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
else {
NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, temp2, CEE_ADD);
+ temp->dreg = mono_alloc_ireg (cfg);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_basereg;
temp2->sreg2 = temp->dreg;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->dreg = mono_alloc_ireg (cfg);
}
ins->inst_offset = 0;
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
case OP_ISUB_IMM:
+ case OP_LSUB_IMM:
+ case OP_AND_IMM:
case OP_IAND_IMM:
+ case OP_LAND_IMM:
case OP_IOR_IMM:
+ case OP_LOR_IMM:
case OP_IXOR_IMM:
- case OP_AND_IMM:
+ case OP_LXOR_IMM:
case OP_SHL_IMM:
case OP_SHR_IMM:
case OP_ISHL_IMM:
switch (ins->opcode) {
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
is_imm = ia64_is_imm14 (ins->inst_imm);
switched = TRUE;
break;
case OP_ISUB_IMM:
+ case OP_LSUB_IMM:
is_imm = ia64_is_imm14 (- (ins->inst_imm));
if (is_imm) {
/* A = B - IMM -> A = B + (-IMM) */
case OP_IOR_IMM:
case OP_IXOR_IMM:
case OP_AND_IMM:
+ case OP_LAND_IMM:
+ case OP_LOR_IMM:
+ case OP_LXOR_IMM:
is_imm = ia64_is_imm8 (ins->inst_imm);
switched = TRUE;
break;
break;
}
- switch (ins->opcode) {
- case OP_ADD_IMM:
- ins->opcode = CEE_ADD;
- break;
- case OP_IADD_IMM:
- ins->opcode = OP_IADD;
- break;
- case OP_ISUB_IMM:
- ins->opcode = OP_ISUB;
- break;
- case OP_IAND_IMM:
- ins->opcode = OP_IAND;
- break;
- case OP_IOR_IMM:
- ins->opcode = OP_IOR;
- break;
- case OP_IXOR_IMM:
- ins->opcode = OP_IXOR;
- break;
- case OP_ISHL_IMM:
- ins->opcode = OP_ISHL;
- break;
- case OP_ISHR_IMM:
- ins->opcode = OP_ISHR;
- break;
- case OP_ISHR_UN_IMM:
- ins->opcode = OP_ISHR_UN;
- break;
- case OP_AND_IMM:
- ins->opcode = CEE_AND;
- break;
- case OP_SHL_IMM:
- ins->opcode = OP_LSHL;
- break;
- case OP_SHR_IMM:
- ins->opcode = OP_LSHR;
- break;
- case OP_LSHL_IMM:
- ins->opcode = OP_LSHL;
- break;
- case OP_LSHR_IMM:
- ins->opcode = OP_LSHR;
- break;
- case OP_LSHR_UN_IMM:
- ins->opcode = OP_LSHR_UN;
- break;
- default:
- g_assert_not_reached ();
- }
+ ins->opcode = mono_op_imm_to_op (ins->opcode);
if (ins->inst_imm == 0)
ins->sreg2 = IA64_R0;
else {
NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
}
break;
}
case OP_COMPARE_IMM:
- case OP_ICOMPARE_IMM: {
+ case OP_ICOMPARE_IMM:
+ case OP_LCOMPARE_IMM: {
/* Instead of compare+b<cond>, ia64 has compare<cond>+br */
gboolean imm;
CompRelation cond;
+ next = ins->next;
+
+ /* Branch opts can eliminate the branch */
+ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
+ NULLIFY_INS (ins);
+ break;
+ }
+
/*
* The compare_imm instructions have switched up arguments, and
* some of them take an imm between -127 and 128.
else {
NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
}
}
- switch (next->opcode) {
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLE:
- case CEE_BGT:
- case CEE_BLE_UN:
- case CEE_BGT_UN:
- case CEE_BGE:
- case CEE_BLT:
- case CEE_BGE_UN:
- case CEE_BLT_UN:
- case OP_IBEQ:
- case OP_IBNE_UN:
- case OP_IBLE:
- case OP_IBLT:
- case OP_IBGT:
- case OP_IBGE:
- case OP_IBLE_UN:
- case OP_IBLT_UN:
- case OP_IBGE_UN:
- case OP_IBGT_UN:
+ if (MONO_IS_COND_BRANCH_OP (next)) {
next->opcode = OP_IA64_BR_COND;
- if (! (next->flags & MONO_INST_BRLABEL))
- next->inst_target_bb = next->inst_true_bb;
- break;
- case OP_COND_EXC_EQ:
- case OP_COND_EXC_GT:
- case OP_COND_EXC_LT:
- case OP_COND_EXC_GT_UN:
- case OP_COND_EXC_LE_UN:
- case OP_COND_EXC_NE_UN:
- case OP_COND_EXC_LT_UN:
+ next->inst_target_bb = next->inst_true_bb;
+ } else if (MONO_IS_COND_EXC (next)) {
next->opcode = OP_IA64_COND_EXC;
- break;
- case OP_CEQ:
- case OP_CLT:
- case OP_CGT:
- case OP_CLT_UN:
- case OP_CGT_UN:
- case OP_ICEQ:
- case OP_ICLT:
- case OP_ICGT:
- case OP_ICLT_UN:
- case OP_ICGT_UN:
+ } else if (MONO_IS_SETCC (next)) {
next->opcode = OP_IA64_CSET;
- break;
- default:
+ } else {
printf ("%s\n", mono_inst_name (next->opcode));
NOT_IMPLEMENTED;
}
next = ins->next;
+ /* Branch opts can eliminate the branch */
+ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
+ NULLIFY_INS (ins);
+ break;
+ }
+
ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
- switch (next->opcode) {
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLE:
- case CEE_BGE:
- case CEE_BLT:
- case CEE_BGT:
- case CEE_BLE_UN:
- case CEE_BGE_UN:
- case CEE_BLT_UN:
- case CEE_BGT_UN:
- case OP_IBEQ:
- case OP_IBNE_UN:
- case OP_IBLE:
- case OP_IBLT:
- case OP_IBGT:
- case OP_IBGE:
- case OP_IBLE_UN:
- case OP_IBLT_UN:
- case OP_IBGE_UN:
- case OP_IBGT_UN:
- case OP_FBEQ:
- case OP_FBNE_UN:
- case OP_FBLT:
- case OP_FBLT_UN:
- case OP_FBGT:
- case OP_FBGT_UN:
- case OP_FBGE:
- case OP_FBGE_UN:
- case OP_FBLE:
- case OP_FBLE_UN:
+
+ if (MONO_IS_COND_BRANCH_OP (next)) {
next->opcode = OP_IA64_BR_COND;
- if (! (next->flags & MONO_INST_BRLABEL))
- next->inst_target_bb = next->inst_true_bb;
- break;
- case OP_COND_EXC_LT:
- case OP_COND_EXC_GT:
- case OP_COND_EXC_GT_UN:
- case OP_COND_EXC_LE_UN:
+ next->inst_target_bb = next->inst_true_bb;
+ } else if (MONO_IS_COND_EXC (next)) {
next->opcode = OP_IA64_COND_EXC;
- break;
- case OP_CEQ:
- case OP_CLT:
- case OP_CGT:
- case OP_CLT_UN:
- case OP_CGT_UN:
- case OP_ICEQ:
- case OP_ICLT:
- case OP_ICGT:
- case OP_ICLT_UN:
- case OP_ICGT_UN:
- case OP_FCEQ:
- case OP_FCLT:
- case OP_FCGT:
- case OP_FCLT_UN:
- case OP_FCGT_UN:
+ } else if (MONO_IS_SETCC (next)) {
next->opcode = OP_IA64_CSET;
- break;
- default:
+ } else {
printf ("%s\n", mono_inst_name (next->opcode));
NOT_IMPLEMENTED;
}
+
break;
}
+ case OP_FCEQ:
+ case OP_FCGT:
+ case OP_FCGT_UN:
+ case OP_FCLT:
+ case OP_FCLT_UN:
+ /* The front end removes the fcompare, so introduce it again */
+ NEW_INS (cfg, temp, opcode_to_ia64_cmp (ins->opcode, OP_FCOMPARE));
+ temp->sreg1 = ins->sreg1;
+ temp->sreg2 = ins->sreg2;
+
+ ins->opcode = OP_IA64_CSET;
+ MONO_INST_NULLIFY_SREGS (ins);
+ break;
case OP_MUL_IMM:
case OP_LMUL_IMM:
case OP_IMUL_IMM: {
for (i = 0; i < 64; ++i) {
if (ins->inst_imm & (((gint64)1) << i)) {
NEW_INS (cfg, temp, shl_op);
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
temp->sreg1 = ins->sreg1;
temp->inst_imm = i;
if (sum_reg == 0)
sum_reg = temp->dreg;
else {
- NEW_INS (cfg, temp2, CEE_ADD);
- temp2->dreg = mono_regstate_next_int (cfg->rs);
+ NEW_INS (cfg, temp2, OP_LADD);
+ temp2->dreg = mono_alloc_ireg (cfg);
temp2->sreg1 = sum_reg;
temp2->sreg2 = temp->dreg;
sum_reg = temp2->dreg;
}
break;
}
- case CEE_CONV_OVF_U4:
+ case OP_LCONV_TO_OVF_U4:
NEW_INS (cfg, temp, OP_IA64_CMP4_LT);
temp->sreg1 = ins->sreg1;
temp->sreg2 = IA64_R0;
ins->opcode = OP_MOVE;
break;
- case CEE_CONV_OVF_I4_UN:
+ case OP_LCONV_TO_OVF_I4_UN:
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = 0x7fffffff;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->dreg = mono_alloc_ireg (cfg);
NEW_INS (cfg, temp2, OP_IA64_CMP4_GT_UN);
temp2->sreg1 = ins->sreg1;
}
bb->last_ins = last_ins;
- bb->max_vreg = cfg->rs->next_vreg;
-}
-
-void
-mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
-{
- if (!bb->code)
- return;
-
- mono_arch_lowering_pass (cfg, bb);
-
- mono_local_regalloc (cfg, bb);
+ bb->max_vreg = cfg->next_vreg;
}
/*
sig = mono_method_signature (method);
- cinfo = get_call_info (cfg->generic_sharing_context, sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
/* This is the opposite of the code in emit_prolog */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
+
ins = cfg->args [i];
if (sig->hasthis && (i == 0))
}
}
- g_free (cinfo);
-
return code;
}
case OP_VOIDCALL_REG:
case OP_VOIDCALL_MEMBASE:
break;
- case CEE_CALL:
+ case OP_CALL:
case OP_CALL_REG:
case OP_CALL_MEMBASE:
case OP_LCALL:
case OP_FCALL_REG:
case OP_FCALL_MEMBASE:
g_assert (ins->dreg == 8);
+ if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
+ ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
case OP_VCALL:
case OP_VCALL_REG:
- case OP_VCALL_MEMBASE: {
+ case OP_VCALL_MEMBASE:
+ case OP_VCALL2:
+ case OP_VCALL2_REG:
+ case OP_VCALL2_MEMBASE: {
ArgStorage storage;
- cinfo = get_call_info (cfg->generic_sharing_context, ((MonoCallInst*)ins)->signature, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
storage = cinfo->ret.storage;
if (storage == ArgAggregate) {
}
}
}
- g_free (cinfo);
break;
}
default:
guint last_offset = 0;
int max_len, cpos;
- if (cfg->opt & MONO_OPT_PEEPHOLE)
- peephole_pass (cfg, bb);
-
if (cfg->opt & MONO_OPT_LOOP) {
/* FIXME: */
}
break_count ();
#endif
- ins = bb->code;
- while (ins) {
+ MONO_BB_FOR_EACH_INS (bb, ins) {
offset = code.buf - cfg->native_code;
max_len = ((int)(((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN])) + 128;
else
ia64_movl (code, ins->dreg, ins->inst_c0);
break;
+ case OP_JUMP_TABLE:
+ add_patch_info (cfg, code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ ia64_movl (code, ins->dreg, 0);
+ break;
case OP_MOVE:
ia64_mov (code, ins->dreg, ins->sreg1);
break;
int pred = 0;
if (ins->opcode == OP_IA64_BR_COND)
pred = 6;
- if (ins->flags & MONO_INST_BRLABEL) {
- if (ins->inst_i0->inst_c0) {
- NOT_IMPLEMENTED;
- } else {
- add_patch_info (cfg, code, MONO_PATCH_INFO_LABEL, ins->inst_i0);
- ia64_br_cond_pred (code, pred, 0);
- }
- } else {
- if (ins->inst_target_bb->native_offset) {
- guint8 *pos = code.buf + code.nins;
+ if (ins->inst_target_bb->native_offset) {
+ guint8 *pos = code.buf + code.nins;
- ia64_br_cond_pred (code, pred, 0);
- ia64_begin_bundle (code);
- ia64_patch (pos, cfg->native_code + ins->inst_target_bb->native_offset);
- } else {
- add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
- ia64_br_cond_pred (code, pred, 0);
- }
- }
+ ia64_br_cond_pred (code, pred, 0);
+ ia64_begin_bundle (code);
+ ia64_patch (pos, cfg->native_code + ins->inst_target_bb->native_offset);
+ } else {
+ add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
+ ia64_br_cond_pred (code, pred, 0);
+ }
break;
}
case OP_LABEL:
ins->inst_c0 = code.buf - cfg->native_code;
break;
case OP_NOP:
+ case OP_RELAXED_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL:
break;
case OP_BR_REG:
ia64_mov_to_br (code, IA64_B6, ins->sreg1);
ia64_br_cond_reg (code, IA64_B6);
break;
- case CEE_ADD:
case OP_IADD:
+ case OP_LADD:
ia64_add (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_AND:
+ case OP_ISUB:
+ case OP_LSUB:
+ ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
case OP_IAND:
+ case OP_LAND:
ia64_and (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IOR:
- case CEE_OR:
+ case OP_LOR:
ia64_or (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IXOR:
- case CEE_XOR:
+ case OP_LXOR:
ia64_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_INEG:
- case CEE_NEG:
+ case OP_LNEG:
ia64_sub (code, ins->dreg, IA64_R0, ins->sreg1);
break;
case OP_INOT:
- case CEE_NOT:
+ case OP_LNOT:
ia64_andcm_imm (code, ins->dreg, -1, ins->sreg1);
break;
case OP_ISHL:
+ case OP_LSHL:
ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ISHR:
+ ia64_sxt4 (code, GP_SCRATCH_REG, ins->sreg1);
+ ia64_shr (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
+ break;
case OP_LSHR:
ia64_shr (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
ia64_shr_u (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
break;
- case CEE_SHL:
- case OP_LSHL:
- ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
- break;
case OP_LSHR_UN:
ia64_shr_u (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_SUB:
- case OP_ISUB:
- ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
- break;
case OP_IADDCC:
/* p6 and p7 is set if there is signed/unsigned overflow */
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IAND_IMM:
case OP_AND_IMM:
+ case OP_LAND_IMM:
ia64_and_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IOR_IMM:
+ case OP_LOR_IMM:
ia64_or_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IXOR_IMM:
+ case OP_LXOR_IMM:
ia64_xor_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_SHL_IMM:
ia64_shl_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
case OP_SHR_IMM:
- case OP_ISHR_IMM:
case OP_LSHR_IMM:
ia64_shr_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
+ case OP_ISHR_IMM:
+ g_assert (ins->inst_imm <= 64);
+ ia64_extr (code, ins->dreg, ins->sreg1, ins->inst_imm, 32 - ins->inst_imm);
+ break;
case OP_ISHR_UN_IMM:
ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
ia64_shr_u_imm (code, ins->dreg, GP_SCRATCH_REG, ins->inst_imm);
case OP_LSHR_UN_IMM:
ia64_shr_u_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
- case CEE_MUL:
+ case OP_LMUL:
/* Based on gcc code */
ia64_setf_sig (code, FP_SCRATCH_REG, ins->sreg1);
ia64_setf_sig (code, FP_SCRATCH_REG2, ins->sreg2);
break;
case OP_STOREI8_MEMBASE_REG:
case OP_STORE_MEMBASE_REG:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
+ }
+ ins->inst_destbasereg = GP_SCRATCH_REG;
+ }
ia64_st8_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
break;
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEMBASE:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
+ }
+ ins->inst_basereg = GP_SCRATCH_REG;
+ }
ia64_ld8 (code, ins->dreg, ins->inst_basereg);
break;
ia64_no_stop (code);
ia64_add1_pred (code, 6, ins->dreg, IA64_R0, IA64_R0);
break;
- case CEE_CONV_I1:
+ case OP_ICONV_TO_I1:
+ case OP_LCONV_TO_I1:
/* FIXME: Is this needed ? */
ia64_sxt1 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I2:
+ case OP_ICONV_TO_I2:
+ case OP_LCONV_TO_I2:
/* FIXME: Is this needed ? */
ia64_sxt2 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I4:
+ case OP_LCONV_TO_I4:
/* FIXME: Is this needed ? */
ia64_sxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U1:
+ case OP_ICONV_TO_U1:
+ case OP_LCONV_TO_U1:
/* FIXME: Is this needed */
ia64_zxt1 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U2:
+ case OP_ICONV_TO_U2:
+ case OP_LCONV_TO_U2:
/* FIXME: Is this needed */
ia64_zxt2 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U4:
+ case OP_LCONV_TO_U4:
/* FIXME: Is this needed */
ia64_zxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I8:
- case CEE_CONV_I:
+ case OP_ICONV_TO_I8:
+ case OP_ICONV_TO_I:
+ case OP_LCONV_TO_I8:
+ case OP_LCONV_TO_I:
ia64_sxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U8:
- case CEE_CONV_U:
+ case OP_LCONV_TO_U8:
+ case OP_LCONV_TO_U:
ia64_zxt4 (code, ins->dreg, ins->sreg1);
break;
ia64_fmov (code, ins->dreg, ins->sreg1);
break;
case OP_STORER8_MEMBASE_REG:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
+ }
+ ins->inst_destbasereg = GP_SCRATCH_REG;
+ }
ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
case OP_STORER4_MEMBASE_REG:
ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
break;
case OP_LOADR8_MEMBASE:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
+ }
+ ins->inst_basereg = GP_SCRATCH_REG;
+ }
ia64_ldfd (code, ins->dreg, ins->inst_basereg);
break;
case OP_LOADR4_MEMBASE:
ia64_ldfs (code, ins->dreg, ins->inst_basereg);
ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
- case CEE_CONV_R4:
+ case OP_ICONV_TO_R4:
+ case OP_LCONV_TO_R4:
ia64_setf_sig (code, ins->dreg, ins->sreg1);
ia64_fcvt_xf (code, ins->dreg, ins->dreg);
ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
break;
- case CEE_CONV_R8:
- ia64_setf_sig (code, ins->dreg, ins->sreg1);
- ia64_fcvt_xf (code, ins->dreg, ins->dreg);
- ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
- break;
+ case OP_ICONV_TO_R8:
case OP_LCONV_TO_R8:
- /* FIXME: Difference with CEE_CONV_R8 ? */
ia64_setf_sig (code, ins->dreg, ins->sreg1);
ia64_fcvt_xf (code, ins->dreg, ins->dreg);
ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
- case OP_LCONV_TO_R4:
- /* FIXME: Difference with CEE_CONV_R4 ? */
- ia64_setf_sig (code, ins->dreg, ins->sreg1);
- ia64_fcvt_xf (code, ins->dreg, ins->dreg);
- ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
- break;
case OP_FCONV_TO_R4:
ia64_fnorm_s_sf (code, ins->dreg, ins->sreg1, 0);
break;
case OP_FCONV_TO_I8:
+ case OP_FCONV_TO_I:
ia64_fcvt_fx_trunc_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
break;
/* Calls */
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
- ia64_ld8 (code, GP_SCRATCH_REG, ins->sreg1);
+ /* Can't use ld8 as this could be a vtype address */
+ ia64_ld1 (code, GP_SCRATCH_REG, ins->sreg1);
break;
case OP_ARGLIST:
ia64_adds_imm (code, GP_SCRATCH_REG, cfg->sig_cookie, cfg->frame_reg);
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
- case CEE_CALL:
+ case OP_CALL:
call = (MonoCallInst*)ins;
if (ins->flags & MONO_INST_HAS_METHOD)
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
- case OP_VOIDCALL_REG:
- call = (MonoCallInst*)ins;
+ case OP_VCALL2_REG:
+ case OP_VOIDCALL_REG: {
+ MonoCallInst *call = (MonoCallInst*)ins;
+ CallInfo *cinfo;
+ int out_reg;
- /* Indirect call */
/*
- * mono_arch_patch_delegate_trampoline will patch this, this is why R8 is
- * used.
+ * mono_arch_get_this_arg_from_call () needs to find the this argument in a global
+ * register.
*/
+ cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
+ out_reg = cfg->arch.reg_out0;
+ ia64_mov (code, IA64_R10, out_reg);
+
+ /* Indirect call */
ia64_mov (code, IA64_R8, ins->sreg1);
ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, IA64_R8, 8);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
code = emit_move_return_value (cfg, ins, code);
break;
-
+ }
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE: {
MonoCallInst *call = (MonoCallInst*)ins;
CallInfo *cinfo;
int out_reg;
- /*
- * There are no membase instructions on ia64, but we can't
- * lower this since get_vcall_slot_addr () needs to decode it.
- */
-
- /* Keep this in synch with get_vcall_slot_addr */
ia64_mov (code, IA64_R11, ins->sreg1);
if (ia64_is_imm14 (ins->inst_offset))
ia64_adds_imm (code, IA64_R8, ins->inst_offset, ins->sreg1);
if (call->method && ins->inst_offset < 0) {
/*
* This is a possible IMT call so save the IMT method in a global
- * register where mono_arch_find_imt_method () and its friends can access
- * it.
+ * register where mono_arch_find_imt_method () and its friends can
+ * access it.
*/
ia64_movl (code, IA64_R9, call->method);
}
* mono_arch_find_this_arg () needs to find the this argument in a global
* register.
*/
- cinfo = get_call_info (NULL, call->signature, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
out_reg = cfg->arch.reg_out0;
- if (cinfo->ret.storage == ArgValuetypeAddrInIReg)
- out_reg ++;
- g_free (cinfo);
ia64_mov (code, IA64_R10, out_reg);
- ia64_begin_bundle (code);
- ia64_codegen_set_one_ins_per_bundle (code, TRUE);
-
ia64_ld8 (code, GP_SCRATCH_REG, IA64_R8);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
- /*
- * This nop will tell get_vcall_slot_addr that this is a virtual
- * call.
- */
- ia64_nop_i (code, 0x12345);
-
ia64_br_call_reg (code, IA64_B0, IA64_B6);
- ia64_codegen_set_one_ins_per_bundle (code, FALSE);
-
code = emit_move_return_value (cfg, ins, code);
break;
}
break;
}
case OP_BREAK:
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_arch_break);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_break);
break;
case OP_LOCALLOC: {
break;
}
+ case OP_LOCALLOC_IMM: {
+ gint32 abi_offset;
+
+ /* FIXME: Sigaltstack support */
+
+ gssize size = ins->inst_imm;
+ size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
+
+ if (ia64_is_adds_imm (size))
+ ia64_adds_imm (code, GP_SCRATCH_REG, size, IA64_R0);
+ else
+ ia64_movl (code, GP_SCRATCH_REG, size);
+
+ ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
+ ia64_mov (code, ins->dreg, IA64_SP);
+
+ /* An area at sp is reserved by the ABI for parameter passing */
+ abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_FRAME_ALIGNMENT);
+ if (ia64_is_adds_imm (abi_offset))
+ ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
+ else {
+ ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
+ ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
+ }
+
+ if (ins->flags & MONO_INST_INIT) {
+ /* Upper limit */
+ ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
+
+ ia64_codegen_set_one_ins_per_bundle (code, TRUE);
+
+ /* Init loop */
+ ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
+ ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
+ ia64_br_cond_pred (code, 8, -2);
+
+ ia64_codegen_set_one_ins_per_bundle (code, FALSE);
+
+ ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
+ }
+
+ break;
+ }
case OP_TLS_GET:
ia64_adds_imm (code, ins->dreg, ins->inst_offset, IA64_TP);
ia64_ld8 (code, ins->dreg, ins->dreg);
ia64_movl (code, GP_SCRATCH_REG2, 0);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
ia64_br_cond_reg (code, IA64_B6);
+ // FIXME:
+ //mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
ia64_codegen_set_one_ins_per_bundle (code, FALSE);
break;
case OP_START_HANDLER: {
/* Signal to endfilter that we are called by call_filter */
ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
- /* Save the return address */
- ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
+ /* Branch target: */
+ if (ia64_is_imm14 (spvar->inst_offset))
+ ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
+ else {
+ ia64_movl (code, GP_SCRATCH_REG2, spvar->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG2, cfg->frame_reg, GP_SCRATCH_REG2);
+ }
+
+ /* Save the return address */
ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
ia64_codegen_set_one_ins_per_bundle (code, FALSE);
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
/* Load the return address */
- ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
+ if (ia64_is_imm14 (spvar->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, spvar->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, cfg->frame_reg, GP_SCRATCH_REG);
+ }
ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
/* Test caller */
last_ins = ins;
last_offset = offset;
-
- ins = ins->next;
}
ia64_codegen_close (code);
void
mono_arch_register_lowlevel_calls (void)
{
- mono_register_jit_icall (mono_arch_break, "mono_arch_break", NULL, TRUE);
}
static Ia64InsType ins_types_in_template [32][3] = {
sig = mono_method_signature (method);
pos = 0;
- cinfo = get_call_info (cfg->generic_sharing_context, sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
- cfg->code_size = MAX (((MonoMethodNormal *)method)->header->code_size * 4, 512);
+ cfg->code_size = MAX (cfg->header->code_size * 4, 512);
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
cfg->code_size += 1024;
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
+
inst = cfg->args [i];
if (sig->hasthis && (i == 0))
stack_offset = ainfo->offset + ARGS_OFFSET;
+ /*
+ * FIXME: Native code might pass non register sized integers
+ * without initializing the upper bits.
+ */
+ if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED && !arg_type->byref && ainfo->storage == ArgInIReg) {
+ int reg = cfg->arch.reg_in0 + ainfo->reg;
+
+ switch (mono_type_to_load_membase (cfg, arg_type)) {
+ case OP_LOADI1_MEMBASE:
+ ia64_sxt1 (code, reg, reg);
+ break;
+ case OP_LOADU1_MEMBASE:
+ ia64_zxt1 (code, reg, reg);
+ break;
+ case OP_LOADI2_MEMBASE:
+ ia64_sxt2 (code, reg, reg);
+ break;
+ case OP_LOADU2_MEMBASE:
+ ia64_zxt2 (code, reg, reg);
+ break;
+ default:
+ break;
+ }
+ }
+
/* Save volatile arguments to the stack */
if (inst->opcode != OP_REGVAR) {
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatReg:
+ case ArgInFloatRegR4:
g_assert (inst->opcode == OP_REGOFFSET);
if (ia64_is_adds_imm (inst->inst_offset))
ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
ia64_codegen_close (code);
- g_free (cinfo);
-
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
code.buf = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code.buf, TRUE);
}
/* Load returned vtypes into registers if needed */
- cinfo = get_call_info (cfg->generic_sharing_context, mono_method_signature (method), FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
ainfo = &cinfo->ret;
switch (ainfo->storage) {
case ArgAggregate:
default:
break;
}
- g_free (cinfo);
ia64_begin_bundle (code);
/* Allocate a new area on the stack and save arguments there */
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
n = sig->param_count + sig->hasthis;
ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
ia64_adds_imm (code, IA64_SP, 16, IA64_SP);
-
- g_free (cinfo);
}
ia64_codegen_close (code);
}
void*
-mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
+mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
{
Ia64CodegenState code;
CallInfo *cinfo = NULL;
ia64_codegen_init (code, p);
- cinfo = get_call_info (cfg->generic_sharing_context, sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
/* Save return value + pass it to func */
switch (cinfo->ret.storage) {
break;
}
- g_free (cinfo);
-
add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, method);
ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
return 0;
}
-gpointer
-mono_arch_get_vcall_slot (guint8* code, gpointer *regs, int *displacement)
-{
- guint8 *bundle2 = code - 48;
- guint8 *bundle3 = code - 32;
- guint8 *bundle4 = code - 16;
- guint64 ins21 = ia64_bundle_ins1 (bundle2);
- guint64 ins22 = ia64_bundle_ins2 (bundle2);
- guint64 ins23 = ia64_bundle_ins3 (bundle2);
- guint64 ins31 = ia64_bundle_ins1 (bundle3);
- guint64 ins32 = ia64_bundle_ins2 (bundle3);
- guint64 ins33 = ia64_bundle_ins3 (bundle3);
- guint64 ins41 = ia64_bundle_ins1 (bundle4);
- guint64 ins42 = ia64_bundle_ins2 (bundle4);
- guint64 ins43 = ia64_bundle_ins3 (bundle4);
-
- /*
- * Virtual calls are made with:
- *
- * [MII] ld8 r31=[r8]
- * nop.i 0x0
- * nop.i 0x0;;
- * [MII] nop.m 0x0
- * mov.sptk b6=r31,0x2000000000f32a80
- * nop.i 0x0
- * [MII] nop.m 0x0
- * nop.i 0x123456
- * nop.i 0x0
- * [MIB] nop.m 0x0
- * nop.i 0x0
- * br.call.sptk.few b0=b6;;
- */
-
- if (((ia64_bundle_template (bundle3) == IA64_TEMPLATE_MII) ||
- (ia64_bundle_template (bundle3) == IA64_TEMPLATE_MIIS)) &&
- (ia64_bundle_template (bundle4) == IA64_TEMPLATE_MIBS) &&
- (ins31 == IA64_NOP_M) &&
- (ia64_ins_opcode (ins32) == 0) && (ia64_ins_x3 (ins32) == 0) && (ia64_ins_x6 (ins32) == 0x1) && (ia64_ins_y (ins32) == 0) &&
- (ins33 == IA64_NOP_I) &&
- (ins41 == IA64_NOP_M) &&
- (ins42 == IA64_NOP_I) &&
- (ia64_ins_opcode (ins43) == 1) && (ia64_ins_b1 (ins43) == 0) && (ia64_ins_b2 (ins43) == 6) &&
- ((ins32 >> 6) & 0xfffff) == 0x12345) {
- g_assert (ins21 == IA64_NOP_M);
- g_assert (ins23 == IA64_NOP_I);
- g_assert (ia64_ins_opcode (ins22) == 0);
- g_assert (ia64_ins_x3 (ins22) == 7);
- g_assert (ia64_ins_x (ins22) == 0);
- g_assert (ia64_ins_b1 (ins22) == IA64_B6);
-
- *displacement = (gssize)regs [IA64_R8] - (gssize)regs [IA64_R11];
-
- return regs [IA64_R11];
- }
-
- return NULL;
-}
-
gpointer*
-mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
-{
- gpointer vt;
- int displacement;
- vt = mono_arch_get_vcall_slot (code, regs, &displacement);
- if (!vt)
- return NULL;
- return (gpointer*)(gpointer)((char*)vt + displacement);
-}
-
-gpointer*
-mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
+mono_arch_get_delegate_method_ptr_addr (guint8* code, mgreg_t *regs)
{
NOT_IMPLEMENTED;
return NULL;
}
-static gboolean tls_offset_inited = FALSE;
-
void
mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
{
- if (!tls_offset_inited) {
- tls_offset_inited = TRUE;
-
- appdomain_tls_offset = mono_domain_get_tls_offset ();
- thread_tls_offset = mono_thread_get_tls_offset ();
- }
}
void
{
}
-void
-mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
-{
- MonoCallInst *call = (MonoCallInst*)inst;
- int out_reg = cfg->arch.reg_out0;
-
- if (vt_reg != -1) {
- CallInfo * cinfo = get_call_info (cfg->generic_sharing_context, inst->signature, FALSE);
- MonoInst *vtarg;
-
- if (cinfo->ret.storage == ArgAggregate) {
- MonoInst *local = (MonoInst*)cfg->arch.ret_var_addr_local;
-
- /*
- * The valuetype is in registers after the call, need to be copied
- * to the stack. Save the address to a local here, so the call
- * instruction can access it.
- */
- g_assert (local->opcode == OP_REGOFFSET);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, local->inst_basereg, local->inst_offset, vt_reg);
- }
- else {
- MONO_INST_NEW (cfg, vtarg, OP_MOVE);
- vtarg->sreg1 = vt_reg;
- vtarg->dreg = mono_regstate_next_int (cfg->rs);
- mono_bblock_add_inst (cfg->cbb, vtarg);
-
- mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, out_reg, FALSE);
-
- out_reg ++;
- }
-
- g_free (cinfo);
- }
-
- /* add the this argument */
- if (this_reg != -1) {
- MonoInst *this;
- MONO_INST_NEW (cfg, this, OP_MOVE);
- this->type = this_type;
- this->sreg1 = this_reg;
- this->dreg = mono_regstate_next_int (cfg->rs);
- mono_bblock_add_inst (cfg->cbb, this);
-
- mono_call_inst_add_outarg_reg (cfg, call, this->dreg, out_reg, FALSE);
- }
-}
-
-
#ifdef MONO_ARCH_HAVE_IMT
/*
* LOCKING: called with the domain lock held
*/
gpointer
-mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
+ gpointer fail_tramp)
{
int i;
int size = 0;
ia64_begin_bundle (code);
item->code_target = (guint8*)code.buf + code.nins;
if (item->is_equals) {
- if (item->check_target_idx) {
- if (!item->compare_done) {
- ia64_movl (code, GP_SCRATCH_REG, item->method);
+ gboolean fail_case = !item->check_target_idx && fail_tramp;
+
+ if (item->check_target_idx || fail_case) {
+ if (!item->compare_done || fail_case) {
+ ia64_movl (code, GP_SCRATCH_REG, item->key);
ia64_cmp_eq (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
}
item->jmp_code = (guint8*)code.buf + code.nins;
ia64_br_cond_pred (code, 7, 0);
- ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->vtable_slot]));
- ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
+ if (item->has_target_code) {
+ ia64_movl (code, GP_SCRATCH_REG, item->value.target_code);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
+ ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
+ }
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
ia64_br_cond_reg (code, IA64_B6);
+
+ if (fail_case) {
+ ia64_begin_bundle (code);
+ ia64_patch (item->jmp_code, (guint8*)code.buf + code.nins);
+ ia64_movl (code, GP_SCRATCH_REG, fail_tramp);
+ ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
+ ia64_br_cond_reg (code, IA64_B6);
+ item->jmp_code = NULL;
+ }
} else {
/* enable the commented code to assert on wrong method */
#if ENABLE_WRONG_METHOD_CHECK
g_assert_not_reached ();
#endif
- ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->vtable_slot]));
+ ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
ia64_br_cond_reg (code, IA64_B6);
#endif
}
} else {
- ia64_movl (code, GP_SCRATCH_REG, item->method);
+ ia64_movl (code, GP_SCRATCH_REG, item->key);
ia64_cmp_geu (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
item->jmp_code = (guint8*)code.buf + code.nins;
ia64_br_cond_pred (code, 6, 0);
g_assert (code.buf - buf <= size);
size = code.buf - buf;
- start = mono_code_manager_reserve (domain->code_mp, size);
+ if (fail_tramp) {
+ start = mono_method_alloc_generic_virtual_thunk (domain, size + 16);
+ start = (gpointer)ALIGN_TO (start, 16);
+ } else {
+ start = mono_domain_code_reserve (domain, size);
+ }
memcpy (start, buf, size);
mono_arch_flush_icache (start, size);
}
MonoMethod*
-mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
{
- return regs [IA64_R9];
-}
-
-MonoObject*
-mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
-{
- return regs [IA64_R10];
+ return (MonoMethod*)regs [IA64_R9];
}
void
-mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call)
+mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
{
/* Done by the implementation of the CALL_MEMBASE opcodes */
}
#endif
+gpointer
+mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
+{
+ return (gpointer)regs [IA64_R10];
+}
+
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+{
+ return NULL;
+}
+
MonoInst*
-mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
- if (cmethod->klass == mono_defaults.thread_class &&
- strcmp (cmethod->name, "MemoryBarrier") == 0) {
- MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
- } else if(cmethod->klass->image == mono_defaults.corlib &&
- (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
- (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
+ if (cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
+ (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
+ /*
+ * We don't use the generic version in mini_emit_inst_for_method () since we
+ * ia64 has atomic_add_imm opcodes.
+ */
if (strcmp (cmethod->name, "Increment") == 0) {
guint32 opcode;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_preg (cfg);
ins->inst_imm = 1;
- ins->inst_i0 = args [0];
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
} else if (strcmp (cmethod->name, "Decrement") == 0) {
guint32 opcode;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_preg (cfg);
ins->inst_imm = -1;
- ins->inst_i0 = args [0];
- } else if (strcmp (cmethod->name, "Exchange") == 0) {
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else if (strcmp (cmethod->name, "Add") == 0) {
guint32 opcode;
+ gboolean is_imm = FALSE;
+ gint64 imm = 0;
- if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_EXCHANGE_I4;
- else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
- (fsig->params [0]->type == MONO_TYPE_I) ||
- (fsig->params [0]->type == MONO_TYPE_OBJECT))
- opcode = OP_ATOMIC_EXCHANGE_I8;
- else
- return NULL;
+ if ((args [1]->opcode == OP_ICONST) || (args [1]->opcode == OP_I8CONST)) {
+ imm = (args [1]->opcode == OP_ICONST) ? args [1]->inst_c0 : args [1]->inst_l;
- MONO_INST_NEW (cfg, ins, opcode);
+ is_imm = (imm == 1 || imm == 4 || imm == 8 || imm == 16 || imm == -1 || imm == -4 || imm == -8 || imm == -16);
+ }
- ins->inst_i0 = args [0];
- ins->inst_i1 = args [1];
- } else if (strcmp (cmethod->name, "Add") == 0) {
- guint32 opcode;
+ if (is_imm) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ else
+ g_assert_not_reached ();
- if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_NEW_I4;
- else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
- else
- g_assert_not_reached ();
-
- MONO_INST_NEW (cfg, ins, opcode);
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ ins->inst_imm = imm;
+ ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
+ } else {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_NEW_I8;
+ else
+ g_assert_not_reached ();
- ins->inst_i0 = args [0];
- ins->inst_i1 = args [1];
- } else if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
- /* 64 bit reads are already atomic */
- MONO_INST_NEW (cfg, ins, CEE_LDIND_I8);
- ins->inst_i0 = args [0];
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ ins->sreg2 = args [1]->dreg;
+ ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ }
+ MONO_ADD_INS (cfg->cbb, ins);
}
}
return 0;
}
-MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
+MonoInst*
+mono_arch_get_domain_intrinsic (MonoCompile* cfg)
{
- MonoInst* ins;
-
- if (appdomain_tls_offset == -1)
- return NULL;
-
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->inst_offset = appdomain_tls_offset;
- return ins;
+ return mono_get_domain_intrinsic (cfg);
}
-MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
+gpointer
+mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
- MonoInst* ins;
-
- if (thread_tls_offset == -1)
- return NULL;
-
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->inst_offset = thread_tls_offset;
- return ins;
+ /* FIXME: implement */
+ g_assert_not_reached ();
}