#include "mini-ia64.h"
#include "inssel.h"
#include "cpu-ia64.h"
+#include "jit-icalls.h"
static gint appdomain_tls_offset = -1;
static gint thread_tls_offset = -1;
#define LOOP_ALIGNMENT 8
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
-#define NOT_IMPLEMENTED g_assert_not_reached ()
-
static const char* gregs [] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
typedef enum {
ArgInIReg,
ArgInFloatReg,
+ ArgInFloatRegR4,
ArgOnStack,
ArgValuetypeAddrInIReg,
ArgAggregate,
(*stack_size) += sizeof (gpointer);
}
else {
- ainfo->storage = ArgInFloatReg;
+ ainfo->storage = is_double ? ArgInFloatReg : ArgInFloatRegR4;
ainfo->reg = 8 + *fr;
(*fr) += 1;
(*gr) += 1;
}
static void
-add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
+add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
gboolean is_return,
guint32 *gr, guint32 *fr, guint32 *stack_size)
{
else if (sig->pinvoke)
size = mono_type_native_stack_size (&klass->byval_arg, NULL);
else
- size = mono_type_stack_size (&klass->byval_arg, NULL);
+ size = mini_type_stack_size (gsctx, &klass->byval_arg, NULL);
if (!sig->pinvoke || (size == 0)) {
/* Allways pass in memory */
ainfo->nregs = info->num_fields;
ainfo->nslots = ainfo->nregs;
(*fr) += info->num_fields;
+ if (ainfo->atype == AggregateSingleHFA) {
+ /*
+ * FIXME: Have to keep track of the parameter slot number, which is
+ * not the same as *gr.
+ */
+ (*gr) += ALIGN_TO (info->num_fields, 2) / 2;
+ } else {
+ (*gr) += info->num_fields;
+ }
return;
}
}
* Gude" document for more information.
*/
static CallInfo*
-get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoCompile *cfg, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
{
guint32 i, gr, fr;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
CallInfo *cinfo;
+ MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
- cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ if (mp)
+ cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ else
+ cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
gr = 0;
fr = 0;
/* return value */
{
ret_type = mono_type_get_underlying_type (sig->ret);
+ ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
switch (ret_type->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
- add_valuetype (sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- if (cinfo->ret.storage == ArgOnStack)
- /* The caller passes the address where the value is stored */
- add_general (&gr, &stack_size, &cinfo->ret);
- if (cinfo->ret.storage == ArgInIReg)
- cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ if (sig->ret->byref) {
+ /* This seems to happen with ldfld wrappers */
+ cinfo->ret.storage = ArgInIReg;
+ } else {
+ add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+ if (cinfo->ret.storage == ArgOnStack)
+ /* The caller passes the address where the value is stored */
+ add_general (&gr, &stack_size, &cinfo->ret);
+ if (cinfo->ret.storage == ArgInIReg)
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ }
break;
}
case MONO_TYPE_VOID:
continue;
}
ptype = mono_type_get_underlying_type (sig->params [i]);
+ ptype = mini_get_basic_type_from_generic (gsctx, ptype);
switch (ptype->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_TYPEDBYREF:
/* FIXME: */
/* We allways pass valuetypes on the stack */
- add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
+ add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k;
- CallInfo *cinfo = get_call_info (csig, FALSE);
+ CallInfo *cinfo = get_call_info (NULL, NULL, csig, FALSE);
guint32 args_size = cinfo->stack_usage;
/* The arguments are saved to a stack area in mono_arch_instrument_prolog */
{
}
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+}
+
+/*
+ * Cleanup architecture specific code.
+ */
+void
+mono_arch_cleanup (void)
+{
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
return 0;
}
-static void
-mono_arch_break (void)
-{
-}
-
-static gboolean
-is_regsize_var (MonoType *t) {
- if (t->byref)
- return TRUE;
- t = mono_type_get_underlying_type (t);
- switch (t->type) {
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
- case MONO_TYPE_BOOLEAN:
- return TRUE;
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_STRING:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_ARRAY:
- return TRUE;
- case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (t))
- return TRUE;
- return FALSE;
- case MONO_TYPE_VALUETYPE:
- return FALSE;
- }
- return FALSE;
-}
-
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- MonoInst *ins = cfg->varinfo [i];
+ MonoInst *ins = cfg->args [i];
ArgInfo *ainfo = &cinfo->args [i];
(ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
- if (is_regsize_var (ins->inst_vtype)) {
+ if (mono_is_regsize_var (ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = g_list_prepend (vars, vmv);
/* Already done */
return;
- cinfo = get_call_info (mono_method_signature (cfg->method), FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (cfg->method), FALSE);
header = mono_method_get_header (cfg->method);
}
/*
- * Need to allocate at least 2 out register for use by CEE_THROW / the system
+ * Need to allocate at least 2 out register for use by OP_THROW / the system
* exception throwing code.
*/
cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, 2);
-
- g_free (cinfo);
}
GList *
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
/*
* Determine whenever the frame pointer can be eliminated.
cfg->arch.omit_fp = FALSE;
if (cfg->param_area)
cfg->arch.omit_fp = FALSE;
+ if ((sig->ret->type != MONO_TYPE_VOID) && (cinfo->ret.storage == ArgAggregate))
+ cfg->arch.omit_fp = FALSE;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
*/
if (cfg->arch.omit_fp) {
+ cfg->flags |= MONO_CFG_HAS_SPILLUP;
cfg->frame_reg = IA64_SP;
offset = ARGS_OFFSET;
}
cfg->ret->inst_c0 = cinfo->ret.reg;
break;
case ArgValuetypeAddrInIReg:
- cfg->ret->opcode = OP_REGVAR;
- cfg->ret->inst_c0 = cfg->arch.reg_in0 + cinfo->ret.reg;
+ cfg->vret_addr->opcode = OP_REGVAR;
+ cfg->vret_addr->dreg = cfg->arch.reg_in0 + cinfo->ret.reg;
break;
case ArgAggregate:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
// printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
}
}
- g_free (offsets);
offset += locals_stack_size;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- inst = cfg->varinfo [i];
+ inst = cfg->args [i];
if (inst->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
inst->dreg = cfg->arch.reg_in0 + ainfo->reg;
break;
case ArgInFloatReg:
+ case ArgInFloatRegR4:
/*
* Since float regs are volatile, we save the arguments to
* the stack in the prolog.
}
if (!inreg && (ainfo->storage != ArgOnStack)) {
+ guint32 size = 0;
+
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
switch (ainfo->storage) {
case ArgAggregate:
if (ainfo->atype == AggregateSingleHFA)
- offset += ainfo->nslots * 4;
+ size = ainfo->nslots * 4;
else
- offset += ainfo->nslots * 8;
+ size = ainfo->nslots * 8;
break;
default:
- offset += sizeof (gpointer);
+ size = sizeof (gpointer);
break;
}
+
offset = ALIGN_TO (offset, sizeof (gpointer));
- if (cfg->arch.omit_fp)
+
+ if (cfg->arch.omit_fp) {
inst->inst_offset = offset;
- else
+ offset += size;
+ } else {
+ offset += size;
inst->inst_offset = - offset;
+ }
}
}
}
+ /*
+ * FIXME: This doesn't work because some variables are allocated during local
+ * regalloc.
+ */
+ /*
if (cfg->arch.omit_fp && offset == 16)
offset = 0;
+ */
cfg->stack_offset = offset;
-
- g_free (cinfo);
}
void
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
if (cinfo->ret.storage == ArgAggregate)
cfg->ret_var_is_local = TRUE;
-
- g_free (cinfo);
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
+ cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr = ");
+ mono_print_ins (cfg->vret_addr);
+ }
+ }
}
static void
arg->backend.reg3 = reg;
call->used_fregs |= 1 << reg;
break;
+ case ArgInFloatRegR4:
+ arg->opcode = OP_OUTARG_FREG_R4;
+ arg->inst_left = tree;
+ arg->inst_right = (MonoInst*)call;
+ arg->backend.reg3 = reg;
+ call->used_fregs |= 1 << reg;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
+{
+ MonoInst *arg;
+
+ MONO_INST_NEW (cfg, arg, OP_NOP);
+ arg->sreg1 = tree->dreg;
+
+ switch (storage) {
+ case ArgInIReg:
+ arg->opcode = OP_MOVE;
+ arg->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
+ break;
+ case ArgInFloatReg:
+ arg->opcode = OP_FMOVE;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
+ break;
+ case ArgInFloatRegR4:
+ arg->opcode = OP_FCONV_TO_R4;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
+ break;
default:
g_assert_not_reached ();
}
+
+ MONO_ADD_INS (cfg->cbb, arg);
}
static void
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (sig, sig->pinvoke);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
if (cinfo->ret.storage == ArgAggregate) {
/* The code in emit_this_vret_arg needs a local */
size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
else {
/*
- * Other backends use mono_type_stack_size (), but that
+ * Other backends use mini_type_stack_size (), but that
* aligns the size to 8, which is larger than the size of
* the source, leading to reads of invalid memory if the
* source is at the end of address space.
add_outarg_reg (cfg, call, arg, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
break;
case ArgInFloatReg:
+ case ArgInFloatRegR4:
add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
break;
case ArgOnStack:
cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
cfg->flags |= MONO_CFG_HAS_CALLS;
- g_free (cinfo);
-
return call;
}
static void
-peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
+emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoMethodSignature *tmp_sig;
+
+ /* Emit the signature cookie just before the implicit arguments */
+ MonoInst *sig_arg;
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+
+ g_assert (cinfo->sig_cookie.storage == ArgOnStack);
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->dreg = mono_alloc_ireg (cfg);
+ sig_arg->inst_p0 = tmp_sig;
+ MONO_ADD_INS (cfg->cbb, sig_arg);
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + cinfo->sig_cookie.offset, sig_arg->dreg);
+}
+
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *in;
+ MonoMethodSignature *sig;
+ int i, n, stack_size;
+ CallInfo *cinfo;
+ ArgInfo *ainfo;
+
+ stack_size = 0;
+
+ mono_ia64_alloc_stacked_registers (cfg);
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
+
+ if (cinfo->ret.storage == ArgAggregate) {
+ MonoInst *vtarg;
+ MonoInst *local;
+
+ /*
+ * The valuetype is in registers after the call, need to be copied
+ * to the stack. Save the address to a local here, so the call
+ * instruction can access it.
+ */
+ local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ local->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ret_var_addr_local = local;
+
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = local->dreg;
+ MONO_ADD_INS (cfg->cbb, vtarg);
+ }
+
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
+ add_outarg_reg2 (cfg, call, ArgInIReg, cfg->arch.reg_out0 + cinfo->ret.reg, call->vret_var);
+ }
+
+ for (i = 0; i < n; ++i) {
+ MonoType *arg_type;
+
+ ainfo = cinfo->args + i;
+
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
+ /* Emit the signature cookie just before the implicit arguments */
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ in = call->args [i];
+
+ if (sig->hasthis && (i == 0))
+ arg_type = &mono_defaults.object_class->byval_arg;
+ else
+ arg_type = sig->params [i - sig->hasthis];
+
+ if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
+ guint32 align;
+ guint32 size;
+
+ if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else if (sig->pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
+
+ if (size > 0) {
+ MonoInst *arg;
+
+ MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
+ arg->sreg1 = in->dreg;
+ arg->klass = in->klass;
+ arg->backend.size = size;
+ arg->inst_p0 = call;
+ arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
+
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+ }
+ else {
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ add_outarg_reg2 (cfg, call, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
+ break;
+ case ArgInFloatReg:
+ case ArgInFloatRegR4:
+ add_outarg_reg2 (cfg, call, ainfo->storage, ainfo->reg, in);
+ break;
+ case ArgOnStack:
+ if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ else if (arg_type->type == MONO_TYPE_R8 && !arg_type->byref)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+ }
+
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ call->stack_usage = cinfo->stack_usage;
+ cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
+ int size = ins->backend.size;
+
+ if (ainfo->storage == ArgAggregate) {
+ MonoInst *load, *store;
+ int i, slot;
+
+ /*
+ * Part of the structure is passed in registers.
+ */
+ for (i = 0; i < ainfo->nregs; ++i) {
+ slot = ainfo->reg + i;
+
+ if (ainfo->atype == AggregateSingleHFA) {
+ MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 4;
+ load->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
+ } else if (ainfo->atype == AggregateDoubleHFA) {
+ MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 8;
+ load->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
+ } else {
+ MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 8;
+ load->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, cfg->arch.reg_out0 + ainfo->reg + i, FALSE);
+ }
+ MONO_ADD_INS (cfg->cbb, load);
+ }
+
+ /*
+ * Part of the structure is passed on the stack.
+ */
+ for (i = ainfo->nregs; i < ainfo->nslots; ++i) {
+ slot = ainfo->reg + i;
+
+ MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * sizeof (gpointer);
+ load->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, load);
+
+ MONO_INST_NEW (cfg, store, OP_STOREI8_MEMBASE_REG);
+ store->sreg1 = load->dreg;
+ store->inst_destbasereg = IA64_SP;
+ store->inst_offset = 16 + ainfo->offset + (slot - 8) * 8;
+ MONO_ADD_INS (cfg->cbb, store);
+ }
+ } else {
+ mini_emit_memcpy2 (cfg, IA64_SP, 16 + ainfo->offset, src->dreg, 0, size, 4);
+ }
+}
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
- MonoInst *ins, *last_ins = NULL;
+ CallInfo *cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
+
+ switch (cinfo->ret.storage) {
+ case ArgInIReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ break;
+ case ArgInFloatReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+void
+mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+}
+
+void
+mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ MonoInst *ins, *n, *last_ins = NULL;
ins = bb->code;
- while (ins) {
+ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_MOVE:
case OP_FMOVE:
- case OP_SETREG:
/*
* Removes:
*
* OP_MOVE reg, reg
*/
if (ins->dreg == ins->sreg1) {
- if (last_ins)
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
/*
if (last_ins && last_ins->opcode == OP_MOVE &&
ins->sreg1 == last_ins->dreg &&
ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
break;
if (ins->dreg != ins->sreg1) {
ins->opcode = OP_MOVE;
} else {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
}
bb->last_ins = last_ins;
}
-typedef enum {
- CMP_EQ,
- CMP_NE,
- CMP_LE,
- CMP_GE,
- CMP_LT,
- CMP_GT,
- CMP_LE_UN,
- CMP_GE_UN,
- CMP_LT_UN,
- CMP_GT_UN
-} CompRelation;
-
-typedef enum {
- CMP_TYPE_L,
- CMP_TYPE_I,
- CMP_TYPE_F
-} CompType;
-
-static CompRelation
-opcode_to_cond (int opcode)
-{
- switch (opcode) {
- case CEE_BEQ:
- case OP_CEQ:
- case OP_IBEQ:
- case OP_ICEQ:
- case OP_FBEQ:
- case OP_FCEQ:
- case OP_COND_EXC_EQ:
- return CMP_EQ;
- case CEE_BNE_UN:
- case OP_COND_EXC_NE_UN:
- case OP_IBNE_UN:
- case OP_FBNE_UN:
- return CMP_NE;
- case CEE_BLE:
- case OP_IBLE:
- case OP_FBLE:
- return CMP_LE;
- case CEE_BGE:
- case OP_IBGE:
- case OP_FBGE:
- return CMP_GE;
- case CEE_BLT:
- case OP_COND_EXC_LT:
- case OP_CLT:
- case OP_IBLT:
- case OP_ICLT:
- case OP_FBLT:
- case OP_FCLT:
- return CMP_LT;
- case CEE_BGT:
- case OP_COND_EXC_GT:
- case OP_CGT:
- case OP_IBGT:
- case OP_ICGT:
- case OP_FBGT:
- case OP_FCGT:
- return CMP_GT;
-
- case CEE_BLE_UN:
- case OP_COND_EXC_LE_UN:
- case OP_IBLE_UN:
- case OP_FBLE_UN:
- return CMP_LE_UN;
- case CEE_BGE_UN:
- case OP_IBGE_UN:
- case OP_FBGE_UN:
- return CMP_GE_UN;
- case CEE_BLT_UN:
- case OP_CLT_UN:
- case OP_IBLT_UN:
- case OP_ICLT_UN:
- case OP_FBLT_UN:
- case OP_FCLT_UN:
- case OP_COND_EXC_LT_UN:
- return CMP_LT_UN;
- case CEE_BGT_UN:
- case OP_COND_EXC_GT_UN:
- case OP_CGT_UN:
- case OP_IBGT_UN:
- case OP_ICGT_UN:
- case OP_FCGT_UN:
- case OP_FBGT_UN:
- return CMP_GT_UN;
- default:
- printf ("%s\n", mono_inst_name (opcode));
- NOT_IMPLEMENTED;
- }
-}
-
-static CompType
-opcode_to_type (int opcode, int cmp_opcode)
-{
- if ((opcode >= CEE_BEQ) && (opcode <= CEE_BLT_UN))
- return CMP_TYPE_L;
- else if ((opcode >= OP_CEQ) && (opcode <= OP_CLT_UN))
- return CMP_TYPE_L;
- else if ((opcode >= OP_IBEQ) && (opcode <= OP_IBLE_UN))
- return CMP_TYPE_I;
- else if ((opcode >= OP_ICEQ) && (opcode <= OP_ICLT_UN))
- return CMP_TYPE_I;
- else if ((opcode >= OP_FBEQ) && (opcode <= OP_FBLE_UN))
- return CMP_TYPE_F;
- else if ((opcode >= OP_FCEQ) && (opcode <= OP_FCLT_UN))
- return CMP_TYPE_F;
- else if ((opcode >= OP_COND_EXC_EQ) && (opcode <= OP_COND_EXC_LT_UN)) {
- switch (cmp_opcode) {
- case OP_ICOMPARE:
- case OP_ICOMPARE_IMM:
- return CMP_TYPE_I;
- default:
- return CMP_TYPE_L;
- }
- } else {
- g_error ("Unknown opcode '%s' in opcode_to_type", mono_inst_name (opcode));
- return 0;
- }
-}
-
int cond_to_ia64_cmp [][3] = {
{OP_IA64_CMP_EQ, OP_IA64_CMP4_EQ, OP_IA64_FCMP_EQ},
{OP_IA64_CMP_NE, OP_IA64_CMP4_NE, OP_IA64_FCMP_NE},
static int
opcode_to_ia64_cmp (int opcode, int cmp_opcode)
{
- return cond_to_ia64_cmp [opcode_to_cond (opcode)][opcode_to_type (opcode, cmp_opcode)];
+ return cond_to_ia64_cmp [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
}
int cond_to_ia64_cmp_imm [][3] = {
opcode_to_ia64_cmp_imm (int opcode, int cmp_opcode)
{
/* The condition needs to be reversed */
- return cond_to_ia64_cmp_imm [opcode_to_cond (opcode)][opcode_to_type (opcode, cmp_opcode)];
-}
-
-static void
-insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
-{
- if (ins == NULL) {
- ins = bb->code;
- bb->code = to_insert;
- to_insert->next = ins;
- }
- else {
- to_insert->next = ins->next;
- ins->next = to_insert;
- }
+ return cond_to_ia64_cmp_imm [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
}
#define NEW_INS(cfg,dest,op) do { \
(dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
- insert_after_ins (bb, last_ins, (dest)); \
+ mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
last_ins = (dest); \
} while (0)
* Converts complex opcodes into simpler ones so that each IR instruction
* corresponds to one machine instruction.
*/
-static void
+void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *next, *temp, *temp2, *temp3, *last_ins = NULL;
+ MonoInst *ins, *n, *next, *temp, *temp2, *temp3, *last_ins = NULL;
ins = bb->code;
- if (bb->max_ireg > cfg->rs->next_vireg)
- cfg->rs->next_vireg = bb->max_ireg;
- if (bb->max_freg > cfg->rs->next_vfreg)
- cfg->rs->next_vfreg = bb->max_freg;
+ if (bb->max_vreg > cfg->rs->next_vreg)
+ cfg->rs->next_vreg = bb->max_vreg;
- while (ins) {
+ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, temp2, CEE_ADD);
+
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_destbasereg;
temp2->sreg2 = temp->dreg;
temp2->dreg = mono_regstate_next_int (cfg->rs);
NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, temp2, CEE_ADD);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_destbasereg;
temp2->sreg2 = temp->dreg;
temp2->dreg = mono_regstate_next_int (cfg->rs);
NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, temp2, CEE_ADD);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_basereg;
temp2->sreg2 = temp->dreg;
temp2->dreg = mono_regstate_next_int (cfg->rs);
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
case OP_ISUB_IMM:
+ case OP_LSUB_IMM:
+ case OP_AND_IMM:
case OP_IAND_IMM:
+ case OP_LAND_IMM:
case OP_IOR_IMM:
+ case OP_LOR_IMM:
case OP_IXOR_IMM:
- case OP_AND_IMM:
+ case OP_LXOR_IMM:
case OP_SHL_IMM:
+ case OP_SHR_IMM:
case OP_ISHL_IMM:
case OP_LSHL_IMM:
case OP_ISHR_IMM:
switch (ins->opcode) {
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
is_imm = ia64_is_imm14 (ins->inst_imm);
switched = TRUE;
break;
case OP_ISUB_IMM:
+ case OP_LSUB_IMM:
is_imm = ia64_is_imm14 (- (ins->inst_imm));
if (is_imm) {
/* A = B - IMM -> A = B + (-IMM) */
case OP_IOR_IMM:
case OP_IXOR_IMM:
case OP_AND_IMM:
+ case OP_LAND_IMM:
+ case OP_LOR_IMM:
+ case OP_LXOR_IMM:
is_imm = ia64_is_imm8 (ins->inst_imm);
switched = TRUE;
break;
case OP_SHL_IMM:
+ case OP_SHR_IMM:
case OP_ISHL_IMM:
case OP_LSHL_IMM:
case OP_ISHR_IMM:
break;
}
- switch (ins->opcode) {
- case OP_ADD_IMM:
- ins->opcode = CEE_ADD;
- break;
- case OP_IADD_IMM:
- ins->opcode = OP_IADD;
- break;
- case OP_ISUB_IMM:
- ins->opcode = OP_ISUB;
- break;
- case OP_IAND_IMM:
- ins->opcode = OP_IAND;
- break;
- case OP_IOR_IMM:
- ins->opcode = OP_IOR;
- break;
- case OP_IXOR_IMM:
- ins->opcode = OP_IXOR;
- break;
- case OP_ISHL_IMM:
- ins->opcode = OP_ISHL;
- break;
- case OP_ISHR_IMM:
- ins->opcode = OP_ISHR;
- break;
- case OP_ISHR_UN_IMM:
- ins->opcode = OP_ISHR_UN;
- break;
- case OP_AND_IMM:
- ins->opcode = CEE_AND;
- break;
- case OP_SHL_IMM:
- ins->opcode = OP_LSHL;
- break;
- case OP_LSHL_IMM:
- ins->opcode = OP_LSHL;
- break;
- case OP_LSHR_IMM:
- ins->opcode = OP_LSHR;
- break;
- case OP_LSHR_UN_IMM:
- ins->opcode = OP_LSHR_UN;
- break;
- default:
- g_assert_not_reached ();
- }
+ ins->opcode = mono_op_imm_to_op (ins->opcode);
if (ins->inst_imm == 0)
ins->sreg2 = IA64_R0;
break;
}
case OP_COMPARE_IMM:
- case OP_ICOMPARE_IMM: {
+ case OP_ICOMPARE_IMM:
+ case OP_LCOMPARE_IMM: {
/* Instead of compare+b<cond>, ia64 has compare<cond>+br */
gboolean imm;
+ CompRelation cond;
+
+ next = ins->next;
+
+ /* Branch opts can eliminate the branch */
+ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
+ ins->opcode = OP_NOP;
+ break;
+ }
/*
* The compare_imm instructions have switched up arguments, and
* some of them take an imm between -127 and 128.
*/
next = ins->next;
- switch (next->opcode) {
- case CEE_BGE:
- case CEE_BLT:
- case OP_COND_EXC_LT:
- case OP_IBGE:
- case OP_IBLT:
+ cond = mono_opcode_to_cond (next->opcode);
+ if ((cond == CMP_LT) || (cond == CMP_GE))
imm = ia64_is_imm8 (ins->inst_imm - 1);
- break;
- case OP_IBGE_UN:
- case OP_IBLT_UN:
- case CEE_BGE_UN:
- case CEE_BLT_UN:
+ else if ((cond == CMP_LT_UN) || (cond == CMP_GE_UN))
imm = ia64_is_imm8 (ins->inst_imm - 1) && (ins->inst_imm > 0);
- break;
- default:
+ else
imm = ia64_is_imm8 (ins->inst_imm);
- break;
- }
if (imm) {
ins->opcode = opcode_to_ia64_cmp_imm (next->opcode, ins->opcode);
}
}
- switch (next->opcode) {
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLE:
- case CEE_BGT:
- case CEE_BLE_UN:
- case CEE_BGT_UN:
- case CEE_BGE:
- case CEE_BLT:
- case CEE_BGE_UN:
- case CEE_BLT_UN:
- case OP_IBEQ:
- case OP_IBNE_UN:
- case OP_IBLE:
- case OP_IBLT:
- case OP_IBGT:
- case OP_IBGE:
- case OP_IBLE_UN:
- case OP_IBLT_UN:
- case OP_IBGE_UN:
- case OP_IBGT_UN:
+ if (MONO_IS_COND_BRANCH_OP (next)) {
next->opcode = OP_IA64_BR_COND;
if (! (next->flags & MONO_INST_BRLABEL))
next->inst_target_bb = next->inst_true_bb;
- break;
- case OP_COND_EXC_EQ:
- case OP_COND_EXC_GT:
- case OP_COND_EXC_LT:
- case OP_COND_EXC_GT_UN:
- case OP_COND_EXC_LE_UN:
- case OP_COND_EXC_NE_UN:
- case OP_COND_EXC_LT_UN:
+ } else if (MONO_IS_COND_EXC (next)) {
next->opcode = OP_IA64_COND_EXC;
- break;
- case OP_CEQ:
- case OP_CLT:
- case OP_CGT:
- case OP_CLT_UN:
- case OP_CGT_UN:
- case OP_ICEQ:
- case OP_ICLT:
- case OP_ICGT:
- case OP_ICLT_UN:
- case OP_ICGT_UN:
+ } else if (MONO_IS_SETCC (next)) {
next->opcode = OP_IA64_CSET;
- break;
- default:
+ } else {
printf ("%s\n", mono_inst_name (next->opcode));
NOT_IMPLEMENTED;
}
next = ins->next;
+ /* Branch opts can eliminate the branch */
+ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
+ ins->opcode = OP_NOP;
+ break;
+ }
+
ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
- switch (next->opcode) {
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLE:
- case CEE_BGE:
- case CEE_BLT:
- case CEE_BGT:
- case CEE_BLE_UN:
- case CEE_BGE_UN:
- case CEE_BLT_UN:
- case CEE_BGT_UN:
- case OP_IBEQ:
- case OP_IBNE_UN:
- case OP_IBLE:
- case OP_IBLT:
- case OP_IBGT:
- case OP_IBGE:
- case OP_IBLE_UN:
- case OP_IBLT_UN:
- case OP_IBGE_UN:
- case OP_IBGT_UN:
- case OP_FBEQ:
- case OP_FBNE_UN:
- case OP_FBLT:
- case OP_FBLT_UN:
- case OP_FBGT:
- case OP_FBGT_UN:
- case OP_FBGE:
- case OP_FBGE_UN:
- case OP_FBLE:
- case OP_FBLE_UN:
+
+ if (MONO_IS_COND_BRANCH_OP (next)) {
next->opcode = OP_IA64_BR_COND;
if (! (next->flags & MONO_INST_BRLABEL))
next->inst_target_bb = next->inst_true_bb;
- break;
- case OP_COND_EXC_LT:
- case OP_COND_EXC_GT:
- case OP_COND_EXC_GT_UN:
- case OP_COND_EXC_LE_UN:
+ } else if (MONO_IS_COND_EXC (next)) {
next->opcode = OP_IA64_COND_EXC;
- break;
- case OP_CEQ:
- case OP_CLT:
- case OP_CGT:
- case OP_CLT_UN:
- case OP_CGT_UN:
- case OP_ICEQ:
- case OP_ICLT:
- case OP_ICGT:
- case OP_ICLT_UN:
- case OP_ICGT_UN:
- case OP_FCEQ:
- case OP_FCLT:
- case OP_FCGT:
- case OP_FCLT_UN:
- case OP_FCGT_UN:
+ } else if (MONO_IS_SETCC (next)) {
next->opcode = OP_IA64_CSET;
- break;
- default:
+ } else {
printf ("%s\n", mono_inst_name (next->opcode));
NOT_IMPLEMENTED;
}
+
break;
}
+ case OP_FCEQ:
+ case OP_FCGT:
+ case OP_FCGT_UN:
+ case OP_FCLT:
+ case OP_FCLT_UN:
+ /* The front end removes the fcompare, so introduce it again */
+ NEW_INS (cfg, temp, opcode_to_ia64_cmp (ins->opcode, OP_FCOMPARE));
+ temp->sreg1 = ins->sreg1;
+ temp->sreg2 = ins->sreg2;
+
+ ins->opcode = OP_IA64_CSET;
+ break;
case OP_MUL_IMM:
case OP_LMUL_IMM:
case OP_IMUL_IMM: {
if (sum_reg == 0)
sum_reg = temp->dreg;
else {
- NEW_INS (cfg, temp2, CEE_ADD);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->dreg = mono_regstate_next_int (cfg->rs);
temp2->sreg1 = sum_reg;
temp2->sreg2 = temp->dreg;
}
break;
}
- case CEE_CONV_OVF_U4:
+ case OP_LCONV_TO_OVF_U4:
NEW_INS (cfg, temp, OP_IA64_CMP4_LT);
temp->sreg1 = ins->sreg1;
temp->sreg2 = IA64_R0;
ins->opcode = OP_MOVE;
break;
- case CEE_CONV_OVF_I4_UN:
+ case OP_LCONV_TO_OVF_I4_UN:
NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = 0x7fffffff;
temp->dreg = mono_regstate_next_int (cfg->rs);
}
bb->last_ins = last_ins;
- bb->max_ireg = cfg->rs->next_vireg;
- bb->max_freg = cfg->rs->next_vfreg;
-}
-
-void
-mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
-{
- if (!bb->code)
- return;
-
- mono_arch_lowering_pass (cfg, bb);
-
- mono_local_regalloc (cfg, bb);
+ bb->max_vreg = cfg->rs->next_vreg;
}
/*
sig = mono_method_signature (method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
/* This is the opposite of the code in emit_prolog */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
- ins = cfg->varinfo [i];
+
+ ins = cfg->args [i];
if (sig->hasthis && (i == 0))
arg_type = &mono_defaults.object_class->byval_arg;
NOT_IMPLEMENTED;
}
}
- }
-
- g_free (cinfo);
+ }
return code;
}
case OP_VOIDCALL_REG:
case OP_VOIDCALL_MEMBASE:
break;
- case CEE_CALL:
+ case OP_CALL:
case OP_CALL_REG:
case OP_CALL_MEMBASE:
case OP_LCALL:
break;
case OP_VCALL:
case OP_VCALL_REG:
- case OP_VCALL_MEMBASE: {
+ case OP_VCALL_MEMBASE:
+ case OP_VCALL2:
+ case OP_VCALL2_REG:
+ case OP_VCALL2_MEMBASE: {
ArgStorage storage;
- cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
storage = cinfo->ret.storage;
if (storage == ArgAggregate) {
}
}
}
- g_free (cinfo);
break;
}
default:
guint last_offset = 0;
int max_len, cpos;
- if (cfg->opt & MONO_OPT_PEEPHOLE)
- peephole_pass (cfg, bb);
-
if (cfg->opt & MONO_OPT_LOOP) {
/* FIXME: */
}
break_count ();
#endif
- ins = bb->code;
- while (ins) {
+ MONO_BB_FOR_EACH_INS (bb, ins) {
offset = code.buf - cfg->native_code;
max_len = ((int)(((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN])) + 128;
else
ia64_movl (code, ins->dreg, ins->inst_c0);
break;
+ case OP_JUMP_TABLE:
+ add_patch_info (cfg, code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ ia64_movl (code, ins->dreg, 0);
+ break;
case OP_MOVE:
ia64_mov (code, ins->dreg, ins->sreg1);
break;
- case CEE_BR:
+ case OP_BR:
case OP_IA64_BR_COND: {
int pred = 0;
if (ins->opcode == OP_IA64_BR_COND)
ia64_begin_bundle (code);
ins->inst_c0 = code.buf - cfg->native_code;
break;
- case CEE_NOP:
+ case OP_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL:
break;
case OP_BR_REG:
ia64_mov_to_br (code, IA64_B6, ins->sreg1);
ia64_br_cond_reg (code, IA64_B6);
break;
- case CEE_ADD:
case OP_IADD:
+ case OP_LADD:
ia64_add (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_AND:
+ case OP_ISUB:
+ case OP_LSUB:
+ ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
case OP_IAND:
+ case OP_LAND:
ia64_and (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IOR:
- case CEE_OR:
+ case OP_LOR:
ia64_or (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IXOR:
- case CEE_XOR:
+ case OP_LXOR:
ia64_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_INEG:
- case CEE_NEG:
+ case OP_LNEG:
ia64_sub (code, ins->dreg, IA64_R0, ins->sreg1);
break;
case OP_INOT:
- case CEE_NOT:
+ case OP_LNOT:
ia64_andcm_imm (code, ins->dreg, -1, ins->sreg1);
break;
case OP_ISHL:
+ case OP_LSHL:
ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ISHR:
ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
ia64_shr_u (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
break;
- case CEE_SHL:
- case OP_LSHL:
- ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
- break;
case OP_LSHR_UN:
ia64_shr_u (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_SUB:
- case OP_ISUB:
- ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
- break;
case OP_IADDCC:
/* p6 and p7 is set if there is signed/unsigned overflow */
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IAND_IMM:
case OP_AND_IMM:
+ case OP_LAND_IMM:
ia64_and_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IOR_IMM:
+ case OP_LOR_IMM:
ia64_or_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IXOR_IMM:
+ case OP_LXOR_IMM:
ia64_xor_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_SHL_IMM:
case OP_LSHL_IMM:
ia64_shl_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
- case OP_LSHR_IMM:
- ia64_shr_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
- break;
+ case OP_SHR_IMM:
case OP_ISHR_IMM:
+ case OP_LSHR_IMM:
ia64_shr_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
case OP_ISHR_UN_IMM:
case OP_LSHR_UN_IMM:
ia64_shr_u_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
- case CEE_MUL:
+ case OP_LMUL:
/* Based on gcc code */
ia64_setf_sig (code, FP_SCRATCH_REG, ins->sreg1);
ia64_setf_sig (code, FP_SCRATCH_REG2, ins->sreg2);
break;
case OP_STOREI8_MEMBASE_REG:
case OP_STORE_MEMBASE_REG:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
+ }
+ ins->inst_destbasereg = GP_SCRATCH_REG;
+ }
ia64_st8_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
break;
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEMBASE:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
+ }
+ ins->inst_basereg = GP_SCRATCH_REG;
+ }
ia64_ld8 (code, ins->dreg, ins->inst_basereg);
break;
ia64_no_stop (code);
ia64_add1_pred (code, 6, ins->dreg, IA64_R0, IA64_R0);
break;
- case CEE_CONV_I1:
+ case OP_ICONV_TO_I1:
+ case OP_LCONV_TO_I1:
/* FIXME: Is this needed ? */
ia64_sxt1 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I2:
+ case OP_ICONV_TO_I2:
+ case OP_LCONV_TO_I2:
/* FIXME: Is this needed ? */
ia64_sxt2 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I4:
+ case OP_LCONV_TO_I4:
/* FIXME: Is this needed ? */
ia64_sxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U1:
+ case OP_ICONV_TO_U1:
+ case OP_LCONV_TO_U1:
/* FIXME: Is this needed */
ia64_zxt1 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U2:
+ case OP_ICONV_TO_U2:
+ case OP_LCONV_TO_U2:
/* FIXME: Is this needed */
ia64_zxt2 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U4:
+ case OP_LCONV_TO_U4:
/* FIXME: Is this needed */
ia64_zxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I8:
- case CEE_CONV_I:
- /* FIXME: Sign extend ? */
- ia64_mov (code, ins->dreg, ins->sreg1);
+ case OP_ICONV_TO_I8:
+ case OP_ICONV_TO_I:
+ case OP_LCONV_TO_I8:
+ case OP_LCONV_TO_I:
+ ia64_sxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U8:
- case CEE_CONV_U:
+ case OP_LCONV_TO_U8:
+ case OP_LCONV_TO_U:
ia64_zxt4 (code, ins->dreg, ins->sreg1);
break;
ia64_fmov (code, ins->dreg, ins->sreg1);
break;
case OP_STORER8_MEMBASE_REG:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
+ }
+ ins->inst_destbasereg = GP_SCRATCH_REG;
+ }
ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
case OP_STORER4_MEMBASE_REG:
ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
break;
case OP_LOADR8_MEMBASE:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
+ }
+ ins->inst_basereg = GP_SCRATCH_REG;
+ }
ia64_ldfd (code, ins->dreg, ins->inst_basereg);
break;
case OP_LOADR4_MEMBASE:
ia64_ldfs (code, ins->dreg, ins->inst_basereg);
ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
- case CEE_CONV_R4:
+ case OP_ICONV_TO_R4:
+ case OP_LCONV_TO_R4:
ia64_setf_sig (code, ins->dreg, ins->sreg1);
ia64_fcvt_xf (code, ins->dreg, ins->dreg);
ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
break;
- case CEE_CONV_R8:
- ia64_setf_sig (code, ins->dreg, ins->sreg1);
- ia64_fcvt_xf (code, ins->dreg, ins->dreg);
- ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
- break;
+ case OP_ICONV_TO_R8:
case OP_LCONV_TO_R8:
- /* FIXME: Difference with CEE_CONV_R8 ? */
ia64_setf_sig (code, ins->dreg, ins->sreg1);
ia64_fcvt_xf (code, ins->dreg, ins->dreg);
ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
- case OP_LCONV_TO_R4:
- /* FIXME: Difference with CEE_CONV_R4 ? */
- ia64_setf_sig (code, ins->dreg, ins->sreg1);
- ia64_fcvt_xf (code, ins->dreg, ins->dreg);
- ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
- break;
case OP_FCONV_TO_R4:
ia64_fnorm_s_sf (code, ins->dreg, ins->sreg1, 0);
break;
case OP_FCONV_TO_I8:
+ case OP_FCONV_TO_I:
ia64_fcvt_fx_trunc_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
break;
case OP_FNEG:
ia64_fmerge_ns (code, ins->dreg, ins->sreg1, ins->sreg1);
break;
- case CEE_CKFINITE:
+ case OP_CKFINITE:
/* Quiet NaN */
ia64_fclass_m (code, 6, 7, ins->sreg1, 0x080);
emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
- case CEE_CALL:
+ case OP_CALL:
call = (MonoCallInst*)ins;
if (ins->flags & MONO_INST_HAS_METHOD)
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
- case OP_VOIDCALL_REG:
- call = (MonoCallInst*)ins;
+ case OP_VCALL2_REG:
+ case OP_VOIDCALL_REG: {
+ MonoCallInst *call = (MonoCallInst*)ins;
+ CallInfo *cinfo;
+ int out_reg;
- /* Indirect call */
/*
- * mono_arch_patch_delegate_trampoline will patch this, this is why R8 is
- * used.
+ * mono_arch_find_this_arg () needs to find the this argument in a global
+ * register.
*/
+ cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
+ out_reg = cfg->arch.reg_out0;
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg)
+ out_reg ++;
+ ia64_mov (code, IA64_R10, out_reg);
+
+ /* Indirect call */
ia64_mov (code, IA64_R8, ins->sreg1);
ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, IA64_R8, 8);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
code = emit_move_return_value (cfg, ins, code);
break;
-
+ }
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
- case OP_CALL_MEMBASE:
+ case OP_CALL_MEMBASE: {
+ MonoCallInst *call = (MonoCallInst*)ins;
+ CallInfo *cinfo;
+ int out_reg;
+
/*
* There are no membase instructions on ia64, but we can't
* lower this since get_vcall_slot_addr () needs to decode it.
*/
/* Keep this in synch with get_vcall_slot_addr */
+ ia64_mov (code, IA64_R11, ins->sreg1);
if (ia64_is_imm14 (ins->inst_offset))
ia64_adds_imm (code, IA64_R8, ins->inst_offset, ins->sreg1);
else {
ia64_add (code, IA64_R8, GP_SCRATCH_REG, ins->sreg1);
}
+ if (call->method && ins->inst_offset < 0) {
+ /*
+ * This is a possible IMT call so save the IMT method in a global
+ * register where mono_arch_find_imt_method () and its friends can
+ * access it.
+ */
+ ia64_movl (code, IA64_R9, call->method);
+ }
+
+ /*
+ * mono_arch_find_this_arg () needs to find the this argument in a global
+ * register.
+ */
+ cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
+ out_reg = cfg->arch.reg_out0;
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg)
+ out_reg ++;
+ ia64_mov (code, IA64_R10, out_reg);
+
ia64_begin_bundle (code);
ia64_codegen_set_one_ins_per_bundle (code, TRUE);
code = emit_move_return_value (cfg, ins, code);
break;
- case CEE_JMP: {
+ }
+ case OP_JMP: {
/*
* Keep in sync with the code in emit_epilog.
*/
break;
}
- case CEE_BREAK:
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_arch_break);
+ case OP_BREAK:
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_break);
break;
case OP_LOCALLOC: {
/* FIXME: Sigaltstack support */
/* keep alignment */
- ia64_adds_imm (code, GP_SCRATCH_REG, MONO_ARCH_FRAME_ALIGNMENT - 1, ins->sreg1);
- ia64_movl (code, GP_SCRATCH_REG2, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
+ ia64_adds_imm (code, GP_SCRATCH_REG, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->sreg1);
+ ia64_movl (code, GP_SCRATCH_REG2, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
ia64_and (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
ia64_mov (code, ins->dreg, IA64_SP);
+ /* An area at sp is reserved by the ABI for parameter passing */
+ abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_LOCALLOC_ALIGNMENT);
+ if (ia64_is_adds_imm (abi_offset))
+ ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
+ else {
+ ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
+ ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
+ }
+
+ if (ins->flags & MONO_INST_INIT) {
+ /* Upper limit */
+ ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
+
+ ia64_codegen_set_one_ins_per_bundle (code, TRUE);
+
+ /* Init loop */
+ ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
+ ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
+ ia64_br_cond_pred (code, 8, -2);
+
+ ia64_codegen_set_one_ins_per_bundle (code, FALSE);
+
+ ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
+ }
+
+ break;
+ }
+ case OP_LOCALLOC_IMM: {
+ gint32 abi_offset;
+
+ /* FIXME: Sigaltstack support */
+
+ gssize size = ins->inst_imm;
+ size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
+
+ if (ia64_is_adds_imm (size))
+ ia64_adds_imm (code, GP_SCRATCH_REG, size, IA64_R0);
+ else
+ ia64_movl (code, GP_SCRATCH_REG, size);
+
+ ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
+ ia64_mov (code, ins->dreg, IA64_SP);
+
/* An area at sp is reserved by the ABI for parameter passing */
abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_FRAME_ALIGNMENT);
if (ia64_is_adds_imm (abi_offset))
/* Signal to endfilter that we are called by call_filter */
ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
- /* Save the return address */
- ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
+ /* Branch target: */
+ if (ia64_is_imm14 (spvar->inst_offset))
+ ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
+ else {
+ ia64_movl (code, GP_SCRATCH_REG2, spvar->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG2, cfg->frame_reg, GP_SCRATCH_REG2);
+ }
+
+ /* Save the return address */
ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
ia64_codegen_set_one_ins_per_bundle (code, FALSE);
break;
}
- case CEE_ENDFINALLY:
+ case OP_ENDFINALLY:
case OP_ENDFILTER: {
/* FIXME: Return the value in ENDFILTER */
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
/* Load the return address */
- ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
+ if (ia64_is_imm14 (spvar->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, spvar->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, cfg->frame_reg, GP_SCRATCH_REG);
+ }
ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
/* Test caller */
ia64_br_cond_reg (code, IA64_B6);
break;
}
- case CEE_THROW:
+ case OP_THROW:
ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
last_ins = ins;
last_offset = offset;
-
- ins = ins->next;
}
ia64_codegen_close (code);
void
mono_arch_register_lowlevel_calls (void)
{
- mono_register_jit_icall (mono_arch_break, "mono_arch_break", NULL, TRUE);
}
static Ia64InsType ins_types_in_template [32][3] = {
sig = mono_method_signature (method);
pos = 0;
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
cfg->code_size = MAX (((MonoMethodNormal *)method)->header->code_size * 4, 512);
}
if (alloc_size) {
+#if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
int pagesize = getpagesize ();
-#if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
if (alloc_size >= pagesize) {
gint32 remaining_size = alloc_size;
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
- inst = cfg->varinfo [i];
+
+ inst = cfg->args [i];
if (sig->hasthis && (i == 0))
arg_type = &mono_defaults.object_class->byval_arg;
stack_offset = ainfo->offset + ARGS_OFFSET;
+ /*
+ * FIXME: Native code might pass non register sized integers
+ * without initializing the upper bits.
+ */
+ if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED && !arg_type->byref && ainfo->storage == ArgInIReg) {
+ int reg = cfg->arch.reg_in0 + ainfo->reg;
+
+ switch (mono_type_to_load_membase (cfg, arg_type)) {
+ case OP_LOADI1_MEMBASE:
+ ia64_sxt1 (code, reg, reg);
+ break;
+ case OP_LOADU1_MEMBASE:
+ ia64_zxt1 (code, reg, reg);
+ break;
+ case OP_LOADI2_MEMBASE:
+ ia64_sxt2 (code, reg, reg);
+ break;
+ case OP_LOADU2_MEMBASE:
+ ia64_zxt2 (code, reg, reg);
+ break;
+ default:
+ break;
+ }
+ }
+
/* Save volatile arguments to the stack */
if (inst->opcode != OP_REGVAR) {
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatReg:
- /* FIXME: big offsets */
+ case ArgInFloatRegR4:
g_assert (inst->opcode == OP_REGOFFSET);
- ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
+ if (ia64_is_adds_imm (inst->inst_offset))
+ ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
+ else {
+ ia64_movl (code, GP_SCRATCH_REG2, inst->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
+ }
if (arg_type->byref)
ia64_st8_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg, 0);
else {
ia64_codegen_close (code);
- g_free (cinfo);
-
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
code.buf = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code.buf, TRUE);
ia64_codegen_init (code, buf);
- /* the code restoring the registers must be kept in sync with CEE_JMP */
+ /* the code restoring the registers must be kept in sync with OP_JMP */
pos = 0;
if (method->save_lmf) {
}
/* Load returned vtypes into registers if needed */
- cinfo = get_call_info (mono_method_signature (method), FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
ainfo = &cinfo->ret;
switch (ainfo->storage) {
case ArgAggregate:
default:
break;
}
- g_free (cinfo);
ia64_begin_bundle (code);
/* Allocate a new area on the stack and save arguments there */
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
n = sig->param_count + sig->hasthis;
/* Save arguments to the stack */
for (i = 0; i < n; ++i) {
- ins = cfg->varinfo [i];
+ ins = cfg->args [i];
if (ins->opcode == OP_REGVAR) {
ia64_movl (code, GP_SCRATCH_REG, (i * 8));
ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
ia64_adds_imm (code, IA64_SP, 16, IA64_SP);
-
- g_free (cinfo);
}
ia64_codegen_close (code);
ia64_codegen_init (code, p);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
/* Save return value + pass it to func */
switch (cinfo->ret.storage) {
break;
}
- g_free (cinfo);
-
add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, method);
ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
return 0;
}
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
+gpointer
+mono_arch_get_vcall_slot (guint8* code, gpointer *regs, int *displacement)
{
guint8 *bundle2 = code - 48;
guint8 *bundle3 = code - 32;
guint64 ins41 = ia64_bundle_ins1 (bundle4);
guint64 ins42 = ia64_bundle_ins2 (bundle4);
guint64 ins43 = ia64_bundle_ins3 (bundle4);
- int reg;
/*
* Virtual calls are made with:
g_assert (ia64_ins_x (ins22) == 0);
g_assert (ia64_ins_b1 (ins22) == IA64_B6);
- reg = IA64_R8;
-
- /*
- * Must be a scratch register, since only those are saved by the trampoline
- */
- g_assert ((1 << reg) & MONO_ARCH_CALLEE_REGS);
-
- g_assert (regs [reg]);
+ *displacement = (gssize)regs [IA64_R8] - (gssize)regs [IA64_R11];
- return regs [reg];
+ return regs [IA64_R11];
}
return NULL;
}
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
+{
+ gpointer vt;
+ int displacement;
+ vt = mono_arch_get_vcall_slot (code, regs, &displacement);
+ if (!vt)
+ return NULL;
+ return (gpointer*)(gpointer)((char*)vt + displacement);
+}
+
gpointer*
mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
{
int out_reg = cfg->arch.reg_out0;
if (vt_reg != -1) {
- CallInfo * cinfo = get_call_info (inst->signature, FALSE);
+ CallInfo * cinfo = get_call_info (cfg, cfg->mempool, inst->signature, FALSE);
MonoInst *vtarg;
if (cinfo->ret.storage == ArgAggregate) {
out_reg ++;
}
-
- g_free (cinfo);
}
/* add the this argument */
}
}
+
+#ifdef MONO_ARCH_HAVE_IMT
+
+/*
+ * LOCKING: called with the domain lock held
+ */
+gpointer
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+{
+ int i;
+ int size = 0;
+ guint8 *start, *buf;
+ Ia64CodegenState code;
+
+ size = count * 256;
+ buf = g_malloc0 (size);
+ ia64_codegen_init (code, buf);
+
+ /* IA64_R9 contains the IMT method */
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ ia64_begin_bundle (code);
+ item->code_target = (guint8*)code.buf + code.nins;
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done) {
+ ia64_movl (code, GP_SCRATCH_REG, item->method);
+ ia64_cmp_eq (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
+ }
+ item->jmp_code = (guint8*)code.buf + code.nins;
+ ia64_br_cond_pred (code, 7, 0);
+
+ ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->vtable_slot]));
+ ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
+ ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
+ ia64_br_cond_reg (code, IA64_B6);
+ } else {
+ /* enable the commented code to assert on wrong method */
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->vtable_slot]));
+ ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
+ ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
+ ia64_br_cond_reg (code, IA64_B6);
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ }
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, item->method);
+ ia64_cmp_geu (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
+ item->jmp_code = (guint8*)code.buf + code.nins;
+ ia64_br_cond_pred (code, 6, 0);
+ }
+ }
+ /* patch the branches to get to the target items */
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->jmp_code) {
+ if (item->check_target_idx) {
+ ia64_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
+ }
+ }
+ }
+
+ ia64_codegen_close (code);
+ g_assert (code.buf - buf <= size);
+
+ size = code.buf - buf;
+ start = mono_code_manager_reserve (domain->code_mp, size);
+ memcpy (start, buf, size);
+
+ mono_arch_flush_icache (start, size);
+
+ mono_stats.imt_thunks_size += size;
+
+ return start;
+}
+
+MonoMethod*
+mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+{
+ return regs [IA64_R9];
+}
+
+void
+mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
+{
+ /* Done by the implementation of the CALL_MEMBASE opcodes */
+}
+#endif
+
+gpointer
+mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
+{
+ return (gpointer)regs [IA64_R10];
+}
+
+MonoObject*
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
+{
+ return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
+}
+
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+{
+ return NULL;
+}
+
MonoInst*
mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
- if (cmethod->klass == mono_defaults.thread_class &&
- strcmp (cmethod->name, "MemoryBarrier") == 0) {
- MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
- } else if(cmethod->klass->image == mono_defaults.corlib &&
+ if(cmethod->klass->image == mono_defaults.corlib &&
(strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
(strcmp (cmethod->klass->name, "Interlocked") == 0)) {
-
+ /*
+ * We don't use the generic version in mini_get_inst_for_method () since the
+ * ia64 has atomic_add_imm opcodes.
+ */
if (strcmp (cmethod->name, "Increment") == 0) {
guint32 opcode;
MONO_INST_NEW (cfg, ins, opcode);
ins->inst_imm = -1;
ins->inst_i0 = args [0];
- } else if (strcmp (cmethod->name, "Exchange") == 0) {
+ } else if (strcmp (cmethod->name, "Add") == 0) {
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_EXCHANGE_I4;
- else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
- (fsig->params [0]->type == MONO_TYPE_I) ||
- (fsig->params [0]->type == MONO_TYPE_OBJECT))
- opcode = OP_ATOMIC_EXCHANGE_I8;
+ opcode = OP_ATOMIC_ADD_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_NEW_I8;
else
- return NULL;
-
+ g_assert_not_reached ();
+
MONO_INST_NEW (cfg, ins, opcode);
ins->inst_i0 = args [0];
ins->inst_i1 = args [1];
- } else if (strcmp (cmethod->name, "Add") == 0) {
+ }
+ }
+
+ return ins;
+}
+
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoInst *ins = NULL;
+
+ if (cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
+ (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
+
+ /*
+ * We don't use the generic version in mini_emit_inst_for_method () since we
+ * ia64 has atomic_add_imm opcodes.
+ */
+ if (strcmp (cmethod->name, "Increment") == 0) {
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ else
+ g_assert_not_reached ();
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_preg (cfg);
+ ins->inst_imm = 1;
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else if (strcmp (cmethod->name, "Decrement") == 0) {
+ guint32 opcode;
+
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
else
g_assert_not_reached ();
-
MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_preg (cfg);
+ ins->inst_imm = -1;
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else if (strcmp (cmethod->name, "Add") == 0) {
+ guint32 opcode;
+ gboolean is_imm = FALSE;
+ gint64 imm = 0;
- ins->inst_i0 = args [0];
- ins->inst_i1 = args [1];
- } else if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
- /* 64 bit reads are already atomic */
- MONO_INST_NEW (cfg, ins, CEE_LDIND_I8);
- ins->inst_i0 = args [0];
+ if ((args [1]->opcode == OP_ICONST) || (args [1]->opcode == OP_I8CONST)) {
+ imm = (args [1]->opcode == OP_ICONST) ? args [1]->inst_c0 : args [1]->inst_l;
+
+ is_imm = (imm == 1 || imm == 4 || imm == 8 || imm == 16 || imm == -1 || imm == -4 || imm == -8 || imm == -16);
+ }
+
+ if (is_imm) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ else
+ g_assert_not_reached ();
+
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ ins->inst_imm = imm;
+ ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
+ } else {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_NEW_I8;
+ else
+ g_assert_not_reached ();
+
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ ins->sreg2 = args [1]->dreg;
+ ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ }
+ MONO_ADD_INS (cfg->cbb, ins);
}
}
ins->inst_offset = thread_tls_offset;
return ins;
}
+
+gpointer
+mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
+{
+ /* FIXME: implement */
+ g_assert_not_reached ();
+}