#define GP_SCRATCH_REG 31
#define GP_SCRATCH_REG2 30
#define FP_SCRATCH_REG 32
+#define FP_SCRATCH_REG2 33
#define LOOP_ALIGNMENT 8
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
cinfo->ret.storage = ArgInFloatReg;
cinfo->ret.reg = 8;
break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ cinfo->ret.storage = ArgInIReg;
+ cinfo->ret.reg = IA64_R8;
+ break;
+ }
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
case MONO_TYPE_ARRAY:
add_general (&gr, &stack_size, ainfo);
break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+ add_general (&gr, &stack_size, ainfo);
+ break;
+ }
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
/* FIXME: */
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return TRUE;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (t))
+ return TRUE;
+ return FALSE;
case MONO_TYPE_VALUETYPE:
return FALSE;
}
mono_ia64_alloc_stacked_registers (MonoCompile *cfg)
{
CallInfo *cinfo;
- guint32 reserved_regs = 3;
+ guint32 reserved_regs;
+ MonoMethodHeader *header;
if (cfg->arch.reg_local0 > 0)
/* Already done */
cinfo = get_call_info (mono_method_signature (cfg->method), FALSE);
- /* Three registers are reserved for use by the prolog/epilog */
- reserved_regs = 3;
+ header = mono_method_get_header (cfg->method);
+
+ /* Some registers are reserved for use by the prolog/epilog */
+ reserved_regs = header->num_clauses ? 4 : 3;
if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
(cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
cfg->arch.reg_saved_ar_pfs = cfg->arch.reg_local0 - 1;
cfg->arch.reg_saved_b0 = cfg->arch.reg_local0 - 2;
- cfg->arch.reg_saved_sp = cfg->arch.reg_local0 - 3;
+ cfg->arch.reg_fp = cfg->arch.reg_local0 - 3;
+
+ /*
+ * Frames without handlers save sp to fp, frames with handlers save it into
+ * a dedicated register.
+ */
+ if (header->num_clauses)
+ cfg->arch.reg_saved_sp = cfg->arch.reg_local0 - 4;
+ else
+ cfg->arch.reg_saved_sp = cfg->arch.reg_fp;
if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
(cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
- cfg->arch.reg_saved_return_val = cfg->arch.reg_local0 - 4;
+ cfg->arch.reg_saved_return_val = cfg->arch.reg_local0 - reserved_regs;
}
/*
}
else {
/* Locals are allocated backwards from %fp */
- cfg->frame_reg = cfg->arch.reg_saved_sp;
+ cfg->frame_reg = cfg->arch.reg_fp;
offset = 0;
}
arg->opcode = OP_OUTARG_REG;
arg->inst_left = tree;
arg->inst_right = (MonoInst*)call;
- arg->unused = reg;
+ arg->backend.reg3 = reg;
call->used_iregs |= 1 << reg;
break;
case ArgInFloatReg:
arg->opcode = OP_OUTARG_FREG;
arg->inst_left = tree;
arg->inst_right = (MonoInst*)call;
- arg->unused = reg;
+ arg->backend.reg3 = reg;
call->used_fregs |= 1 << reg;
break;
default:
}
}
+static void
+emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoInst *arg;
+ MonoMethodSignature *tmp_sig;
+ MonoInst *sig_arg;
+
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+
+ g_assert (cinfo->sig_cookie.storage == ArgOnStack);
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->inst_p0 = tmp_sig;
+
+ MONO_INST_NEW (cfg, arg, OP_OUTARG);
+ arg->inst_left = sig_arg;
+ arg->inst_imm = 16 + cinfo->sig_cookie.offset;
+ arg->type = STACK_PTR;
+
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
+}
+
/*
* take the arguments and generate the arch-specific
* instructions to properly call the function in call.
* This includes pushing, moving arguments to the right register
* etc.
- * Issue: who does the spilling if needed, and when?
*/
MonoCallInst*
mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual)
ainfo = cinfo->args + i;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
- MonoMethodSignature *tmp_sig;
-
/* Emit the signature cookie just before the implicit arguments */
- MonoInst *sig_arg;
- /* FIXME: Add support for signature tokens to AOT */
- cfg->disable_aot = TRUE;
-
- g_assert (cinfo->sig_cookie.storage == ArgOnStack);
-
- /*
- * mono_ArgIterator_Setup assumes the signature cookie is
- * passed first and all the arguments which were before it are
- * passed on the stack after the signature. So compensate by
- * passing a different signature.
- */
- tmp_sig = mono_metadata_signature_dup (call->signature);
- tmp_sig->param_count -= call->signature->sentinelpos;
- tmp_sig->sentinelpos = 0;
- memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
-
- MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
- sig_arg->inst_p0 = tmp_sig;
-
- MONO_INST_NEW (cfg, arg, OP_OUTARG);
- arg->inst_left = sig_arg;
- arg->inst_imm = 16 + cinfo->sig_cookie.offset;
- arg->type = STACK_PTR;
-
- /* prepend, so they get reversed */
- arg->next = call->out_args;
- call->out_args = arg;
+ emit_sig_cookie (cfg, call, cinfo);
}
if (is_virtual && i == 0) {
if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
MonoInst *stack_addr;
- gint align;
+ guint32 align;
guint32 size;
if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
if (ainfo->storage == ArgAggregate) {
MonoInst *vtaddr, *load, *load2, *offset_ins, *set_reg;
- int slot;
+ int slot, j;
vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
/*
* Part of the structure is passed in registers.
*/
- for (i = 0; i < ainfo->nregs; ++i) {
+ for (j = 0; j < ainfo->nregs; ++j) {
int offset, load_op, dest_reg, arg_storage;
- slot = ainfo->reg + i;
+ slot = ainfo->reg + j;
if (ainfo->atype == AggregateSingleHFA) {
load_op = CEE_LDIND_R4;
- offset = i * 4;
- dest_reg = ainfo->reg + i;
+ offset = j * 4;
+ dest_reg = ainfo->reg + j;
arg_storage = ArgInFloatReg;
} else if (ainfo->atype == AggregateDoubleHFA) {
load_op = CEE_LDIND_R8;
- offset = i * 8;
- dest_reg = ainfo->reg + i;
+ offset = j * 8;
+ dest_reg = ainfo->reg + j;
arg_storage = ArgInFloatReg;
} else {
load_op = CEE_LDIND_I;
- offset = i * 8;
- dest_reg = cfg->arch.reg_out0 + ainfo->reg + i;
+ offset = j * 8;
+ dest_reg = cfg->arch.reg_out0 + ainfo->reg + j;
arg_storage = ArgInIReg;
}
MONO_INST_NEW (cfg, load, load_op);
load->inst_left = load2;
- if (i == 0)
+ if (j == 0)
set_reg = arg;
else
MONO_INST_NEW (cfg, set_reg, OP_OUTARG_REG);
/*
* Part of the structure is passed on the stack.
*/
- for (i = ainfo->nregs; i < ainfo->nslots; ++i) {
+ for (j = ainfo->nregs; j < ainfo->nslots; ++j) {
MonoInst *outarg;
- slot = ainfo->reg + i;
+ slot = ainfo->reg + j;
MONO_INST_NEW (cfg, load, CEE_LDIND_I);
load->ssa_op = MONO_SSA_LOAD;
load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
- NEW_ICONST (cfg, offset_ins, (i * sizeof (gpointer)));
+ NEW_ICONST (cfg, offset_ins, (j * sizeof (gpointer)));
MONO_INST_NEW (cfg, load2, CEE_ADD);
load2->inst_left = load;
load2->inst_right = offset_ins;
MONO_INST_NEW (cfg, load, CEE_LDIND_I);
load->inst_left = load2;
- if (i == 0)
+ if (j == 0)
outarg = arg;
else
MONO_INST_NEW (cfg, outarg, OP_OUTARG);
}
}
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
+ emit_sig_cookie (cfg, call, cinfo);
+ }
+
call->stack_usage = cinfo->stack_usage;
cfg->param_area = MAX (cfg->param_area, call->stack_usage);
cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
case OP_STOREI8_MEMBASE_IMM:
case OP_STORE_MEMBASE_IMM:
/* There are no store_membase instructions on ia64 */
- if (ia64_is_imm14 (ins->inst_offset)) {
+ if (ins->inst_offset == 0) {
+ temp2 = NULL;
+ } else if (ia64_is_imm14 (ins->inst_offset)) {
NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_destbasereg;
temp2->inst_imm = ins->inst_offset;
}
ins->inst_offset = 0;
- ins->inst_destbasereg = temp2->dreg;
+ if (temp2)
+ ins->inst_destbasereg = temp2->dreg;
break;
case OP_STOREI1_MEMBASE_REG:
case OP_STOREI2_MEMBASE_REG:
case OP_LOAD_MEMBASE:
case OP_LOADR4_MEMBASE:
case OP_LOADR8_MEMBASE:
- /* There are no load_membase instructions on ia64 */
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8:
+ case OP_ATOMIC_ADD_NEW_I4:
+ case OP_ATOMIC_ADD_NEW_I8:
+ case OP_ATOMIC_ADD_IMM_NEW_I4:
+ case OP_ATOMIC_ADD_IMM_NEW_I8:
+ /* There are no membase instructions on ia64 */
if (ins->inst_offset == 0) {
break;
}
temp2->dreg = mono_regstate_next_int (cfg->rs);
}
- ins->inst_offset = 0;
- ins->inst_basereg = temp2->dreg;
- break;
- case OP_IA64_FETCHADD4_IMM:
- case OP_IA64_FETCHADD8_IMM:
- case OP_ATOMIC_EXCHANGE_I4:
- case OP_ATOMIC_EXCHANGE_I8:
- /* There are no membase instructions on ia64 */
- if (ia64_is_imm14 (ins->inst_offset)) {
- NEW_INS (cfg, temp2, OP_ADD_IMM);
- temp2->sreg1 = ins->inst_basereg;
- temp2->inst_imm = ins->inst_offset;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
- }
- else {
- NEW_INS (cfg, temp, OP_I8CONST);
- temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, temp2, CEE_ADD);
- temp2->sreg1 = ins->inst_basereg;
- temp2->sreg2 = temp->dreg;
- temp2->dreg = mono_regstate_next_int (cfg->rs);
- }
ins->inst_offset = 0;
ins->inst_basereg = temp2->dreg;
break;
break;
case OP_VCALL:
case OP_VCALL_REG:
- case OP_VCALL_MEMBASE:
+ case OP_VCALL_MEMBASE: {
+ ArgStorage storage;
+
cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
- ArgStorage storage = cinfo->ret.storage;
+ storage = cinfo->ret.storage;
if (storage == ArgAggregate) {
MonoInst *local = (MonoInst*)cfg->arch.ret_var_addr_local;
}
g_free (cinfo);
break;
+ }
default:
g_assert_not_reached ();
}
mono_add_patch_info (cfg, code.buf + code.nins - cfg->native_code, patch_type, data); \
} while (0)
+#define emit_cond_system_exception(cfg,code,exc_name,predicate) do { \
+ MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
+ if (tins == NULL) \
+ add_patch_info (cfg, code, MONO_PATCH_INFO_EXC, exc_name); \
+ else \
+ add_patch_info (cfg, code, MONO_PATCH_INFO_BB, tins->inst_true_bb); \
+ ia64_br_cond_pred (code, (predicate), 0); \
+} while (0)
+
static Ia64CodegenState
emit_call (MonoCompile *cfg, Ia64CodegenState code, guint32 patch_type, gconstpointer data)
{
if ((patch_type == MONO_PATCH_INFO_ABS) || (patch_type == MONO_PATCH_INFO_INTERNAL_METHOD)) {
/* Indirect call */
+ /* mono_arch_patch_callsite will patch this */
/* mono_arch_nullify_class_init_trampoline will patch this */
ia64_movl (code, GP_SCRATCH_REG, 0);
ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
case OP_LSHR_UN_IMM:
ia64_shr_u_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
+ case CEE_MUL:
+ /* Based on gcc code */
+ ia64_setf_sig (code, FP_SCRATCH_REG, ins->sreg1);
+ ia64_setf_sig (code, FP_SCRATCH_REG2, ins->sreg2);
+ ia64_xmpy_l (code, FP_SCRATCH_REG, FP_SCRATCH_REG, FP_SCRATCH_REG2);
+ ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
+ break;
case OP_STOREI1_MEMBASE_REG:
ia64_st1_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
case OP_COND_EXC_IOV:
case OP_COND_EXC_OV:
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "OverflowException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "OverflowException", 6);
break;
case OP_COND_EXC_IC:
case OP_COND_EXC_C:
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "OverflowException");
- ia64_br_cond_pred (code, 7, 0);
+ emit_cond_system_exception (cfg, code, "OverflowException", 7);
break;
case OP_IA64_COND_EXC:
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, ins->inst_p1);
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, ins->inst_p1, 6);
break;
case OP_IA64_CSET:
ia64_mov_pred (code, 7, ins->dreg, IA64_R0);
ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
case OP_STORER4_MEMBASE_REG:
- ia64_stfs_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
+ ia64_fnorm_s_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
+ ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
break;
case OP_LOADR8_MEMBASE:
ia64_ldfd (code, ins->dreg, ins->inst_basereg);
break;
case OP_LOADR4_MEMBASE:
ia64_ldfs (code, ins->dreg, ins->inst_basereg);
+ ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
case CEE_CONV_R4:
ia64_setf_sig (code, ins->dreg, ins->sreg1);
case CEE_CKFINITE:
/* Quiet NaN */
ia64_fclass_m (code, 6, 7, ins->sreg1, 0x080);
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "ArithmeticException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
/* Signaling NaN */
ia64_fclass_m (code, 6, 7, ins->sreg1, 0x040);
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "ArithmeticException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
/* Positive infinity */
ia64_fclass_m (code, 6, 7, ins->sreg1, 0x021);
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "ArithmeticException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
/* Negative infinity */
ia64_fclass_m (code, 6, 7, ins->sreg1, 0x022);
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "ArithmeticException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
break;
/* Calls */
case OP_MEMORY_BARRIER:
ia64_mf (code);
break;
- case OP_IA64_FETCHADD4_IMM:
+ case OP_ATOMIC_ADD_IMM_NEW_I4:
g_assert (ins->inst_offset == 0);
ia64_fetchadd4_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
break;
- case OP_IA64_FETCHADD8_IMM:
+ case OP_ATOMIC_ADD_IMM_NEW_I8:
g_assert (ins->inst_offset == 0);
ia64_fetchadd8_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
case OP_ATOMIC_EXCHANGE_I8:
ia64_xchg8_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
break;
+ case OP_ATOMIC_ADD_NEW_I4: {
+ guint8 *label, *buf;
+
+ /* From libatomic_ops */
+ ia64_mf (code);
+
+ ia64_begin_bundle (code);
+ label = code.buf + code.nins;
+ ia64_ld4_acq (code, GP_SCRATCH_REG, ins->sreg1);
+ ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
+ ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
+ ia64_cmpxchg4_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
+ ia64_cmp4_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
+ buf = code.buf + code.nins;
+ ia64_br_cond_pred (code, 7, 0);
+ ia64_begin_bundle (code);
+ ia64_patch (buf, label);
+ ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
+ break;
+ }
+ case OP_ATOMIC_ADD_NEW_I8: {
+ guint8 *label, *buf;
+
+ /* From libatomic_ops */
+ ia64_mf (code);
+
+ ia64_begin_bundle (code);
+ label = code.buf + code.nins;
+ ia64_ld8_acq (code, GP_SCRATCH_REG, ins->sreg1);
+ ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
+ ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
+ ia64_cmpxchg8_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
+ ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
+ buf = code.buf + code.nins;
+ ia64_br_cond_pred (code, 7, 0);
+ ia64_begin_bundle (code);
+ ia64_patch (buf, label);
+ ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
+ break;
+ }
/* Exception handling */
case OP_CALL_HANDLER:
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
/*
- * We might be called by call_filter, in which case the
- * the register stack is not set up correctly. So do it now.
- * Allocate a stack frame and set the fp register from the value
- * passed in by the caller.
- * R15 is used since it is writable using libunwind.
+ * R15 determines our caller. It is used since it is writable using
+ * libunwind.
* R15 == 0 means we are called by OP_CALL_HANDLER or via resume_context ()
+ * R15 != 0 means we are called by call_filter ().
*/
ia64_codegen_set_one_ins_per_bundle (code, TRUE);
ia64_cmp_eq (code, 6, 7, IA64_R15, IA64_R0);
- /* Alloc is not predictable so we have to use a branch */
- ia64_br_cond_pred (code, 6, 3);
+
+ ia64_br_cond_pred (code, 6, 6);
+
+ /*
+ * Called by call_filter:
+ * Allocate a new stack frame, and set the fp register from the
+ * value passed in by the caller.
+ * We allocate a similar frame as is done by the prolog, so
+ * if an exception is thrown while executing the filter, the
+ * unwinder can unwind through the filter frame using the unwind
+ * info for the prolog.
+ */
ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
+ ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
+ ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
ia64_mov (code, cfg->frame_reg, IA64_R15);
+ /* Signal to endfilter that we are called by call_filter */
+ ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
+
/* Save the return address */
ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
break;
}
- case CEE_ENDFINALLY: {
- MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
- /* Return the saved arp_pfs value to call_filter */
- ia64_mov (code, IA64_R9, cfg->arch.reg_saved_ar_pfs);
- ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
- ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
- ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
- ia64_br_cond_reg (code, IA64_B6);
- break;
- }
+ case CEE_ENDFINALLY:
case OP_ENDFILTER: {
- /* FIXME: Return the value */
+ /* FIXME: Return the value in ENDFILTER */
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
- /* Return the saved arp_pfs value to call_filter */
- ia64_mov (code, IA64_R9, cfg->arch.reg_saved_ar_pfs);
+
+ /* Load the return address */
ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
+
+ /* Test caller */
+ ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, IA64_R0);
+ ia64_br_cond_pred (code, 7, 4);
+
+ /* Called by call_filter */
+ /* Pop frame */
+ ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
+ ia64_mov_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
+ ia64_br_ret_reg (code, IA64_B0);
+
+ /* Called by CALL_HANDLER */
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
ia64_br_cond_reg (code, IA64_B6);
break;
static void
ia64_real_emit_bundle (Ia64CodegenState *code, int *deps_start, int *stops, int n, guint64 template, guint64 ins1, guint64 ins2, guint64 ins3, guint8 nops)
{
- g_assert (n <= code->nins);
int stop_pos, i, deps_to_shift, dep_shift;
+ g_assert (n <= code->nins);
+
// if (n > 1) printf ("FOUND: %ld.\n", template);
ia64_emit_bundle_template (code, template, ins1, ins2, ins3);
target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
+ if (patch_info->type == MONO_PATCH_INFO_NONE)
+ continue;
if (mono_compile_aot) {
NOT_IMPLEMENTED;
}
ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
if ((alloc_size || cinfo->stack_usage) && !cfg->arch.omit_fp) {
- ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + cfg->frame_reg);
- ia64_mov (code, cfg->frame_reg, IA64_SP);
+ ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + cfg->arch.reg_saved_sp);
+ ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
+ if (cfg->frame_reg != cfg->arch.reg_saved_sp)
+ ia64_mov (code, cfg->frame_reg, IA64_SP);
}
if (alloc_size) {
/* No LMF on IA64 */
}
- if (strstr (cfg->method->name, "end_invoke_int_IAsyncResult"))
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_arch_break);
-
ia64_codegen_close (code);
g_free (cinfo);
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
- int nthrows;
+ int i, nthrows;
Ia64CodegenState code;
gboolean empty = TRUE;
//unw_dyn_region_info_t *r_exceptions;
-
- /*
MonoClass *exc_classes [16];
guint8 *exc_throw_start [16], *exc_throw_end [16];
- */
guint32 code_size = 0;
/* Compute needed space */
MonoClass *exc_class;
guint8* throw_ip;
guint8* buf;
+ guint64 exc_token_index;
exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
g_assert (exc_class);
+ exc_token_index = mono_metadata_token_index (exc_class->type_token);
throw_ip = cfg->native_code + patch_info->ip.i;
ia64_begin_bundle (code);
ia64_patch (cfg->native_code + patch_info->ip.i, code.buf);
- ia64_movl (code, cfg->arch.reg_out0 + 0, exc_class->type_token);
+ /* Find a throw sequence for the same exception class */
+ for (i = 0; i < nthrows; ++i)
+ if (exc_classes [i] == exc_class)
+ break;
- ia64_begin_bundle (code);
+ if (i < nthrows) {
+ gint64 offset = exc_throw_end [i] - 16 - throw_ip;
- patch_info->data.name = "mono_arch_throw_corlib_exception";
- patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
- patch_info->ip.i = code.buf - cfg->native_code;
+ if (ia64_is_adds_imm (offset))
+ ia64_adds_imm (code, cfg->arch.reg_out0 + 1, offset, IA64_R0);
+ else
+ ia64_movl (code, cfg->arch.reg_out0 + 1, offset);
- /* Indirect call */
- ia64_movl (code, GP_SCRATCH_REG, 0);
- ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
- ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
- ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
+ buf = code.buf + code.nins;
+ ia64_br_cond_pred (code, 0, 0);
+ ia64_begin_bundle (code);
+ ia64_patch (buf, exc_throw_start [i]);
- /* Compute the offset */
- buf = code.buf + 32;
- ia64_movl (code, cfg->arch.reg_out0 + 1, buf - throw_ip);
+ patch_info->type = MONO_PATCH_INFO_NONE;
+ }
+ else {
+ /* Arg1 */
+ buf = code.buf;
+ ia64_movl (code, cfg->arch.reg_out0 + 1, 0);
- ia64_br_call_reg (code, IA64_B0, IA64_B6);
+ ia64_begin_bundle (code);
+
+ if (nthrows < 16) {
+ exc_classes [nthrows] = exc_class;
+ exc_throw_start [nthrows] = code.buf;
+ }
+
+ /* Arg2 */
+ if (ia64_is_adds_imm (exc_token_index))
+ ia64_adds_imm (code, cfg->arch.reg_out0 + 0, exc_token_index, IA64_R0);
+ else
+ ia64_movl (code, cfg->arch.reg_out0 + 0, exc_token_index);
+
+ patch_info->data.name = "mono_arch_throw_corlib_exception";
+ patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
+ patch_info->ip.i = code.buf + code.nins - cfg->native_code;
+
+ /* Indirect call */
+ ia64_movl (code, GP_SCRATCH_REG, 0);
+ ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
+ ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
+ ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
+
+ ia64_br_call_reg (code, IA64_B0, IA64_B6);
+
+ /* Patch up the throw offset */
+ ia64_begin_bundle (code);
+
+ ia64_patch (buf, (gpointer)(code.buf - 16 - throw_ip));
+
+ if (nthrows < 16) {
+ exc_throw_end [nthrows] = code.buf;
+ nthrows ++;
+ }
+ }
empty = FALSE;
break;
vtarg->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, vtarg);
- mono_call_inst_add_outarg_reg (call, vtarg->dreg, out_reg, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, out_reg, FALSE);
out_reg ++;
}
this->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, this);
- mono_call_inst_add_outarg_reg (call, this->dreg, out_reg, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, call, this->dreg, out_reg, FALSE);
}
}
(strcmp (cmethod->klass->name, "Interlocked") == 0)) {
if (strcmp (cmethod->name, "Increment") == 0) {
- MonoInst *ins_iconst;
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
- MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
- ins_iconst->inst_imm = 1;
-
+ ins->inst_imm = 1;
ins->inst_i0 = args [0];
- ins->inst_i1 = ins_iconst;
} else if (strcmp (cmethod->name, "Decrement") == 0) {
- MonoInst *ins_iconst;
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
- MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
- ins_iconst->inst_imm = -1;
-
+ ins->inst_imm = -1;
ins->inst_i0 = args [0];
- ins->inst_i1 = ins_iconst;
- /* FIXME: */
} else if (strcmp (cmethod->name, "Exchange") == 0) {
guint32 opcode;
MONO_INST_NEW (cfg, ins, opcode);
+ ins->inst_i0 = args [0];
+ ins->inst_i1 = args [1];
+ } else if (strcmp (cmethod->name, "Add") == 0) {
+ guint32 opcode;
+
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_NEW_I8;
+ else
+ g_assert_not_reached ();
+
+ MONO_INST_NEW (cfg, ins, opcode);
+
ins->inst_i0 = args [0];
ins->inst_i1 = args [1];
} else if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {