}
void
-mono_arch_allocate_vars (MonoCompile *m)
+mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
gint32 *offsets;
CallInfo *cinfo;
- mono_ia64_alloc_stacked_registers (m);
+ mono_ia64_alloc_stacked_registers (cfg);
- header = mono_method_get_header (m->method);
+ header = mono_method_get_header (cfg->method);
- sig = mono_method_signature (m->method);
+ sig = mono_method_signature (cfg->method);
cinfo = get_call_info (sig, FALSE);
*/
/* Locals are allocated backwards from %fp */
- m->frame_reg = m->arch.reg_saved_sp;
+ cfg->frame_reg = cfg->arch.reg_saved_sp;
offset = 0;
- if (m->method->save_lmf) {
+ if (cfg->method->save_lmf) {
/* FIXME: */
#if 0
/* Reserve stack space for saving LMF + argument regs */
offset += sizeof (MonoLMF);
- m->arch.lmf_offset = offset;
+ cfg->arch.lmf_offset = offset;
#endif
}
if (sig->ret->type != MONO_TYPE_VOID) {
switch (cinfo->ret.storage) {
case ArgInIReg:
- m->ret->opcode = OP_REGVAR;
- m->ret->inst_c0 = cinfo->ret.reg;
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cinfo->ret.reg;
break;
case ArgInFloatReg:
- m->ret->opcode = OP_REGVAR;
- m->ret->inst_c0 = cinfo->ret.reg;
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cinfo->ret.reg;
break;
case ArgValuetypeAddrInIReg:
- m->ret->opcode = OP_REGVAR;
- m->ret->inst_c0 = m->arch.reg_in0 + cinfo->ret.reg;
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cfg->arch.reg_in0 + cinfo->ret.reg;
break;
default:
g_assert_not_reached ();
}
- m->ret->dreg = m->ret->inst_c0;
+ cfg->ret->dreg = cfg->ret->inst_c0;
}
/* Allocate locals */
- offsets = mono_allocate_stack_slots (m, &locals_stack_size, &locals_stack_align);
+ offsets = mono_allocate_stack_slots (cfg, &locals_stack_size, &locals_stack_align);
if (locals_stack_align) {
- offset += (locals_stack_align - 1);
- offset &= ~(locals_stack_align - 1);
+ offset = ALIGN_TO (offset, locals_stack_align);
}
- for (i = m->locals_start; i < m->num_varinfo; i++) {
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
if (offsets [i] != -1) {
- MonoInst *inst = m->varinfo [i];
+ MonoInst *inst = cfg->varinfo [i];
inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = m->frame_reg;
+ inst->inst_basereg = cfg->frame_reg;
inst->inst_offset = - (offset + offsets [i]);
// printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
g_assert (cinfo->sig_cookie.storage == ArgOnStack);
- m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
+ cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- inst = m->varinfo [i];
+ inst = cfg->varinfo [i];
if (inst->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
else
arg_type = sig->params [i - sig->hasthis];
+ /* FIXME: VOLATILE is only set if the liveness pass runs */
if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
inreg = FALSE;
switch (ainfo->storage) {
case ArgInIReg:
inst->opcode = OP_REGVAR;
- inst->dreg = m->arch.reg_in0 + ainfo->reg;
+ inst->dreg = cfg->arch.reg_in0 + ainfo->reg;
break;
case ArgInFloatReg:
/*
break;
case ArgOnStack:
inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = m->frame_reg;
+ inst->inst_basereg = cfg->frame_reg;
inst->inst_offset = ARGS_OFFSET + ainfo->offset;
break;
case ArgValuetypeInReg:
if (!inreg && (ainfo->storage != ArgOnStack)) {
inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = m->frame_reg;
+ inst->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
if (ainfo->storage == ArgValuetypeInReg) {
NOT_IMPLEMENTED;
}
else
offset += sizeof (gpointer);
+ offset = ALIGN_TO (offset, sizeof (gpointer));
inst->inst_offset = - offset;
}
}
}
- m->stack_offset = offset;
+ cfg->stack_offset = offset;
g_free (cinfo);
}
size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
else
size = mono_type_stack_size (&in->klass->byval_arg, &align);
+
+ /*
+ * FIXME: The destination is 'size' long, but the source might
+ * be smaller.
+ */
+
if (ainfo->storage == ArgValuetypeInReg) {
NOT_IMPLEMENTED;
}
}
static CompType
-opcode_to_type (int opcode)
+opcode_to_type (int opcode, int cmp_opcode)
{
if ((opcode >= CEE_BEQ) && (opcode <= CEE_BLT_UN))
return CMP_TYPE_L;
else if ((opcode >= OP_CEQ) && (opcode <= OP_CLT_UN))
return CMP_TYPE_L;
- else if ((opcode >= OP_COND_EXC_EQ) && (opcode <= OP_COND_EXC_LT_UN))
- return CMP_TYPE_L;
else if ((opcode >= OP_IBEQ) && (opcode <= OP_IBLE_UN))
return CMP_TYPE_I;
else if ((opcode >= OP_ICEQ) && (opcode <= OP_ICLT_UN))
return CMP_TYPE_F;
else if ((opcode >= OP_FCEQ) && (opcode <= OP_FCLT_UN))
return CMP_TYPE_F;
- else {
+ else if ((opcode >= OP_COND_EXC_EQ) && (opcode <= OP_COND_EXC_LT_UN)) {
+ switch (cmp_opcode) {
+ case OP_ICOMPARE:
+ case OP_ICOMPARE_IMM:
+ return CMP_TYPE_I;
+ default:
+ return CMP_TYPE_L;
+ }
+ } else {
g_error ("Unknown opcode '%s' in opcode_to_type", mono_inst_name (opcode));
return 0;
}
};
static int
-opcode_to_ia64_cmp (int opcode)
+opcode_to_ia64_cmp (int opcode, int cmp_opcode)
{
- return cond_to_ia64_cmp [opcode_to_cond (opcode)][opcode_to_type (opcode)];
+ return cond_to_ia64_cmp [opcode_to_cond (opcode)][opcode_to_type (opcode, cmp_opcode)];
}
int cond_to_ia64_cmp_imm [][3] = {
};
static int
-opcode_to_ia64_cmp_imm (int opcode)
+opcode_to_ia64_cmp_imm (int opcode, int cmp_opcode)
{
/* The condition needs to be reversed */
- return cond_to_ia64_cmp_imm [opcode_to_cond (opcode)][opcode_to_type (opcode)];
+ return cond_to_ia64_cmp_imm [opcode_to_cond (opcode)][opcode_to_type (opcode, cmp_opcode)];
}
static void
switch (next->opcode) {
case CEE_BGE:
case CEE_BLT:
- case CEE_BGE_UN:
- case CEE_BLT_UN:
case OP_COND_EXC_LT:
case OP_IBGE:
case OP_IBLT:
+ imm = ia64_is_imm8 (ins->inst_imm - 1);
+ break;
case OP_IBGE_UN:
case OP_IBLT_UN:
- imm = ia64_is_imm8 (ins->inst_imm - 1);
+ case CEE_BGE_UN:
+ case CEE_BLT_UN:
+ imm = ia64_is_imm8 (ins->inst_imm - 1) && (ins->inst_imm > 0);
break;
default:
imm = ia64_is_imm8 (ins->inst_imm);
}
if (imm) {
- ins->opcode = opcode_to_ia64_cmp_imm (next->opcode);
+ ins->opcode = opcode_to_ia64_cmp_imm (next->opcode, ins->opcode);
ins->sreg2 = ins->sreg1;
}
else {
- ins->opcode = opcode_to_ia64_cmp (next->opcode);
+ ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
if (ins->inst_imm == 0)
ins->sreg2 = IA64_R0;
next = ins->next;
- ins->opcode = opcode_to_ia64_cmp (next->opcode);
+ ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
switch (next->opcode) {
case CEE_BEQ:
case CEE_BNE_UN:
}
case OP_MUL_IMM: {
/* This should be emulated, but rules in inssel.brg generate it */
- int i;
+ int i, sum_reg;
/* First the easy cases */
if (ins->inst_imm == 1) {
break;
}
+ /* This could be optimized */
if (ins->opcode == OP_MUL_IMM) {
- /* FIXME: */
- g_error ("Multiplication by %ld not implemented\n", ins->inst_imm);
+ sum_reg = 0;
+ for (i = 0; i < 64; ++i) {
+ if (ins->inst_imm & (((gint64)1) << i)) {
+ NEW_INS (cfg, temp, OP_SHL_IMM);
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ temp->sreg1 = ins->sreg1;
+ temp->inst_imm = i;
+
+ if (sum_reg == 0)
+ sum_reg = temp->dreg;
+ else {
+ NEW_INS (cfg, temp2, CEE_ADD);
+ temp2->dreg = mono_regstate_next_int (cfg->rs);
+ temp2->sreg1 = sum_reg;
+ temp2->sreg2 = temp->dreg;
+ sum_reg = temp2->dreg;
+ }
+ }
+ }
+ ins->opcode = OP_MOVE;
+ ins->sreg1 = sum_reg;
}
break;
}
+ case CEE_CONV_OVF_U4:
+ NEW_INS (cfg, temp, OP_IA64_CMP4_LT);
+ temp->sreg1 = ins->sreg1;
+ temp->sreg2 = IA64_R0;
+
+ NEW_INS (cfg, temp, OP_IA64_COND_EXC);
+ temp->inst_p1 = (char*)"OverflowException";
+
+ ins->opcode = OP_MOVE;
+ break;
+ case CEE_CONV_OVF_I4_UN:
+ NEW_INS (cfg, temp, OP_ICONST);
+ temp->inst_c0 = 0x7fffffff;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+
+ NEW_INS (cfg, temp2, OP_IA64_CMP4_GT_UN);
+ temp2->sreg1 = ins->sreg1;
+ temp2->sreg2 = temp->dreg;
+
+ NEW_INS (cfg, temp, OP_IA64_COND_EXC);
+ temp->inst_p1 = (char*)"OverflowException";
+
+ ins->opcode = OP_MOVE;
+ break;
+ case OP_FCONV_TO_I4:
+ case OP_FCONV_TO_I2:
+ case OP_FCONV_TO_U2:
+ case OP_FCONV_TO_U1:
+ NEW_INS (cfg, temp, OP_FCONV_TO_I8);
+ temp->sreg1 = ins->sreg1;
+ temp->dreg = ins->dreg;
+
+ switch (ins->opcode) {
+ case OP_FCONV_TO_I4:
+ ins->opcode = OP_SEXT_I4;
+ break;
+ case OP_FCONV_TO_I2:
+ ins->opcode = OP_SEXT_I2;
+ break;
+ case OP_FCONV_TO_U2:
+ ins->opcode = OP_ZEXT_I4;
+ break;
+ case OP_FCONV_TO_U1:
+ ins->opcode = OP_ZEXT_I1;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ ins->sreg1 = ins->dreg;
+ break;
default:
break;
}
while (ins) {
offset = code.buf - cfg->native_code;
- max_len = ((int)(((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN]));
+ max_len = ((int)(((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN])) + 128;
- if (offset > (cfg->code_size - max_len - 16)) {
+ while (offset + max_len + 16 > cfg->code_size) {
ia64_codegen_close (code);
offset = code.buf - cfg->native_code;
ia64_begin_bundle (code);
ins->inst_c0 = code.buf - cfg->native_code;
break;
+ case CEE_NOP:
+ break;
case OP_BR_REG:
ia64_mov_to_br (code, IA64_B6, ins->sreg1);
ia64_br_cond_reg (code, IA64_B6);
/* (sreg2 <= 0) && (res > ins->sreg1) => signed overflow */
ia64_cmp4_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
+ /* res <u sreg1 => unsigned overflow */
+ ia64_cmp4_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
+
+ /* FIXME: Predicate this since this is a side effect */
+ ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
+ break;
+ case OP_ISUBCC:
+ /* p6 and p7 is set if there is signed/unsigned overflow */
+
+ /* Set p8-p9 == (sreg2 > 0) */
+ ia64_cmp4_lt (code, 8, 9, IA64_R0, ins->sreg2);
+
+ ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
+
+ /* (sreg2 > 0) && (res > ins->sreg1) => signed overflow */
+ ia64_cmp4_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
+ /* (sreg2 <= 0) && (res < ins->sreg1) => signed overflow */
+ ia64_cmp4_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
+
+ /* sreg1 <u sreg2 => unsigned overflow */
+ ia64_cmp4_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
+
+ /* FIXME: Predicate this since this is a side effect */
+ ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
+ break;
+ case OP_ADDCC:
+ /* Same as OP_IADDCC */
+ ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
+
+ ia64_add (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
+
+ ia64_cmp_lt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
+ ia64_cmp_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
+
+ ia64_cmp_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
+
ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
+ break;
+ case OP_SUBCC:
+ /* Same as OP_ISUBCC */
+
+ ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
+
+ ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
+
+ ia64_cmp_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
+ ia64_cmp_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
- /* FIXME: Set p7 as well */
+ ia64_cmp_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
+
+ ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
case OP_SEXT_I2:
ia64_sxt2 (code, ins->dreg, ins->sreg1);
break;
+ case OP_SEXT_I4:
+ ia64_sxt4 (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_ZEXT_I1:
+ ia64_zxt1 (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_ZEXT_I2:
+ ia64_zxt2 (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_ZEXT_I4:
+ ia64_zxt4 (code, ins->dreg, ins->sreg1);
+ break;
/* Compare opcodes */
case OP_IA64_CMP4_EQ:
break;
case OP_COND_EXC_IOV:
- /* FIXME: */
- ia64_break_i_pred (code, 6, 0);
+ case OP_COND_EXC_OV:
+ mono_add_patch_info (cfg, code.buf - cfg->native_code,
+ MONO_PATCH_INFO_EXC, "OverflowException");
+ ia64_br_cond_pred (code, 6, 0);
break;
case OP_COND_EXC_IC:
- /* FIXME: */
- ia64_break_i_pred (code, 7, 0);
+ case OP_COND_EXC_C:
+ mono_add_patch_info (cfg, code.buf - cfg->native_code,
+ MONO_PATCH_INFO_EXC, "OverflowException");
+ ia64_br_cond_pred (code, 7, 0);
break;
case OP_IA64_COND_EXC:
mono_add_patch_info (cfg, code.buf - cfg->native_code,
case CEE_CONV_U:
ia64_zxt4 (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_OVF_U4:
- /* FIXME: */
- ia64_mov (code, ins->dreg, ins->sreg1);
- break;
- case CEE_CONV_OVF_I4_UN:
- /* FIXME: Do this in the lowering pass */
- ia64_movl (code, GP_SCRATCH_REG, 0x7fffffff);
- ia64_cmp4_gtu (code, 6, 7, ins->sreg1, GP_SCRATCH_REG);
-
- mono_add_patch_info (cfg, code.buf - cfg->native_code,
- MONO_PATCH_INFO_EXC, "OverflowException");
- ia64_br_cond_pred (code, 6, 0);
-
- /* FIXME: */
- ia64_mov (code, ins->dreg, ins->sreg1);
- break;
/*
* FLOAT OPCODES
case OP_FCONV_TO_R4:
ia64_fnorm_s_sf (code, ins->dreg, ins->sreg1, 0);
break;
- case OP_FCONV_TO_I4:
- case OP_FCONV_TO_I2:
- case OP_FCONV_TO_U2:
- case OP_FCONV_TO_U1:
- /* FIXME: sign/zero extend ? */
- ia64_fcvt_fx_trunc_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
- ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
- break;
case OP_FCONV_TO_I8:
- /* FIXME: Difference with OP_FCONV_TO_I4 ? */
ia64_fcvt_fx_trunc_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
break;
case OP_FNEG:
ia64_fmerge_ns (code, ins->dreg, ins->sreg1, ins->sreg1);
break;
+ case CEE_CKFINITE:
+ /* Quiet NaN */
+ ia64_fclass_m (code, 6, 7, ins->sreg1, 0x080);
+ mono_add_patch_info (cfg, code.buf - cfg->native_code,
+ MONO_PATCH_INFO_EXC, "ArithmeticException");
+ ia64_br_cond_pred (code, 6, 0);
+ /* Signaling NaN */
+ ia64_fclass_m (code, 6, 7, ins->sreg1, 0x040);
+ mono_add_patch_info (cfg, code.buf - cfg->native_code,
+ MONO_PATCH_INFO_EXC, "ArithmeticException");
+ ia64_br_cond_pred (code, 6, 0);
+ /* Positive infinity */
+ ia64_fclass_m (code, 6, 7, ins->sreg1, 0x021);
+ mono_add_patch_info (cfg, code.buf - cfg->native_code,
+ MONO_PATCH_INFO_EXC, "ArithmeticException");
+ ia64_br_cond_pred (code, 6, 0);
+ /* Negative infinity */
+ ia64_fclass_m (code, 6, 7, ins->sreg1, 0x022);
+ mono_add_patch_info (cfg, code.buf - cfg->native_code,
+ MONO_PATCH_INFO_EXC, "ArithmeticException");
+ ia64_br_cond_pred (code, 6, 0);
+ break;
/* Calls */
case OP_CHECK_THIS:
code = emit_move_return_value (cfg, ins, code);
break;
+ case OP_LOCALLOC:
+ /* keep alignment */
+ ia64_adds_imm (code, GP_SCRATCH_REG, MONO_ARCH_FRAME_ALIGNMENT - 1, ins->sreg1);
+ ia64_movl (code, GP_SCRATCH_REG2, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
+ ia64_and (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
+
+ ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
+
+ /* The first 16 bytes at sp are reserved by the ABI */
+ ia64_adds_imm (code, ins->dreg, 16, IA64_SP);
+
+ if (ins->flags & MONO_INST_INIT) {
+ /* Upper limit */
+ ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
+
+ /* Init loop */
+ ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
+ ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
+ ia64_br_cond_pred (code, 8, -2);
+
+ ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
+ }
+
+ break;
+
/* Exception handling */
case OP_CALL_HANDLER:
/*
* save the return address to a register and use a
* branch.
*/
+ ia64_mov (code, IA64_R15, IA64_R0);
ia64_mov_from_ip (code, GP_SCRATCH_REG);
/* Add the length of OP_CALL_HANDLER */
ia64_adds_imm (code, GP_SCRATCH_REG, 5 * 16, GP_SCRATCH_REG);
*/
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ /*
+ * We might be called by the exception handling code, in which case the
+ * the register stack is not set up correctly. So do it now.
+ */
+ ia64_alloc (code, GP_SCRATCH_REG2, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
+
+ /* Set the fp register from the value passed in by the caller */
+ /* R15 is used since it is writable using libunwind */
+ /* R15 == 0 means we are called by OP_CALL_HANDLER or via resume_context () */
+ ia64_cmp_eq (code, 6, 7, IA64_R15, IA64_R0);
+ ia64_add_pred (code, 7, cfg->frame_reg, IA64_R0, IA64_R15);
+
ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
+
break;
}
case CEE_ENDFINALLY: {
ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
+
+ /*
+ * This might be the last instruction in the method, so add a dummy
+ * instruction so the unwinder will work.
+ */
+ ia64_break_i (code, 0);
+ break;
+ case OP_RETHROW:
+ ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"mono_arch_rethrow_exception");
+
+ ia64_break_i (code, 0);
break;
default:
mono_arch_emit_prolog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
- MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *inst;
- int alloc_size, pos, max_offset, i;
+ int alloc_size, pos, i;
Ia64CodegenState code;
CallInfo *cinfo;
unw_dyn_region_info_t *r_pro;
if (inst->dreg != cfg->arch.reg_in0 + ainfo->reg)
ia64_mov (code, inst->dreg, cfg->arch.reg_in0 + ainfo->reg);
break;
+ case ArgOnStack:
+ ia64_adds_imm (code, GP_SCRATCH_REG, 16 + ainfo->offset, cfg->frame_reg);
+ ia64_ld8 (code, inst->dreg, GP_SCRATCH_REG);
+ break;
default:
NOT_IMPLEMENTED;
}
g_assert (cfg->code_len < cfg->code_size);
+ cfg->arch.prolog_end_offset = cfg->code_len;
+
return code.buf;
}
guint8 *buf;
CallInfo *cinfo;
+ cfg->arch.epilog_begin_offset = cfg->code_len;
+
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
MonoJumpInfo *patch_info;
int nthrows;
Ia64CodegenState code;
+ gboolean empty = TRUE;
/*
MonoClass *exc_classes [16];
guint8 *exc_throw_start [16], *exc_throw_end [16];
ia64_movl (code, cfg->arch.reg_out0 + 1, buf - throw_ip);
ia64_br_call_reg (code, IA64_B0, IA64_B6);
+
+ empty = FALSE;
break;
}
default:
}
}
+ if (!empty)
+ /* The unwinder needs this to work */
+ ia64_break_i (code, 0);
+
ia64_codegen_close (code);
cfg->code_len = code.buf - cfg->native_code;
void
mono_arch_flush_register_windows (void)
{
- NOT_IMPLEMENTED;
+ /* Not needed because of libunwind */
}
gboolean
gboolean
mono_arch_is_int_overflow (void *sigctx, void *info)
{
- NOT_IMPLEMENTED;
-
+ /* Division is emulated with explicit overflow checks */
return FALSE;
}