* ia64_codegen_set_one_ins_per_bundle () at those places.
*/
-#define SIGNAL_STACK_SIZE (64 * 1024)
-
#define ARGS_OFFSET 16
#define GP_SCRATCH_REG 31
cinfo->ret.storage = ArgInFloatReg;
cinfo->ret.reg = 8;
break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ cinfo->ret.storage = ArgInIReg;
+ cinfo->ret.reg = IA64_R8;
+ break;
+ }
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
case MONO_TYPE_ARRAY:
add_general (&gr, &stack_size, ainfo);
break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+ add_general (&gr, &stack_size, ainfo);
+ break;
+ }
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
/* FIXME: */
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return TRUE;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (t))
+ return TRUE;
+ return FALSE;
case MONO_TYPE_VALUETYPE:
return FALSE;
}
mono_ia64_alloc_stacked_registers (MonoCompile *cfg)
{
CallInfo *cinfo;
- guint32 reserved_regs = 3;
+ guint32 reserved_regs;
+ MonoMethodHeader *header;
if (cfg->arch.reg_local0 > 0)
/* Already done */
cinfo = get_call_info (mono_method_signature (cfg->method), FALSE);
- /* Three registers are reserved for use by the prolog/epilog */
- reserved_regs = 3;
+ header = mono_method_get_header (cfg->method);
+
+ /* Some registers are reserved for use by the prolog/epilog */
+ reserved_regs = header->num_clauses ? 4 : 3;
if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
(cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
cfg->arch.reg_saved_ar_pfs = cfg->arch.reg_local0 - 1;
cfg->arch.reg_saved_b0 = cfg->arch.reg_local0 - 2;
- cfg->arch.reg_saved_sp = cfg->arch.reg_local0 - 3;
+ cfg->arch.reg_fp = cfg->arch.reg_local0 - 3;
+
+ /*
+ * Frames without handlers save sp to fp, frames with handlers save it into
+ * a dedicated register.
+ */
+ if (header->num_clauses)
+ cfg->arch.reg_saved_sp = cfg->arch.reg_local0 - 4;
+ else
+ cfg->arch.reg_saved_sp = cfg->arch.reg_fp;
if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
(cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
- cfg->arch.reg_saved_return_val = cfg->arch.reg_local0 - 4;
+ cfg->arch.reg_saved_return_val = cfg->arch.reg_local0 - reserved_regs;
}
/*
}
else {
/* Locals are allocated backwards from %fp */
- cfg->frame_reg = cfg->arch.reg_saved_sp;
+ cfg->frame_reg = cfg->arch.reg_fp;
offset = 0;
}
if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
MonoInst *stack_addr;
- gint align;
+ guint32 align;
guint32 size;
if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
temp2->dreg = mono_regstate_next_int (cfg->rs);
}
+ ins->inst_offset = 0;
+ ins->inst_basereg = temp2->dreg;
+ break;
+ case OP_IA64_FETCHADD4_IMM:
+ case OP_IA64_FETCHADD8_IMM:
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8:
+ /* There are no membase instructions on ia64 */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ NEW_INS (cfg, temp2, OP_ADD_IMM);
+ temp2->sreg1 = ins->inst_basereg;
+ temp2->inst_imm = ins->inst_offset;
+ temp2->dreg = mono_regstate_next_int (cfg->rs);
+ }
+ else {
+ NEW_INS (cfg, temp, OP_I8CONST);
+ temp->inst_c0 = ins->inst_offset;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ NEW_INS (cfg, temp2, CEE_ADD);
+ temp2->sreg1 = ins->inst_basereg;
+ temp2->sreg2 = temp->dreg;
+ temp2->dreg = mono_regstate_next_int (cfg->rs);
+ }
ins->inst_offset = 0;
ins->inst_basereg = temp2->dreg;
break;
break;
}
case OP_MUL_IMM:
+ case OP_LMUL_IMM:
case OP_IMUL_IMM: {
int i, sum_reg;
gboolean found = FALSE;
- int shl_op = ins->opcode == OP_MUL_IMM ? OP_SHL_IMM : OP_ISHL_IMM;
+ int shl_op = ins->opcode == OP_IMUL_IMM ? OP_ISHL_IMM : OP_SHL_IMM;
/* First the easy cases */
if (ins->inst_imm == 1) {
break;
case OP_VCALL:
case OP_VCALL_REG:
- case OP_VCALL_MEMBASE:
+ case OP_VCALL_MEMBASE: {
+ ArgStorage storage;
+
cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
- ArgStorage storage = cinfo->ret.storage;
+ storage = cinfo->ret.storage;
if (storage == ArgAggregate) {
MonoInst *local = (MonoInst*)cfg->arch.ret_var_addr_local;
}
g_free (cinfo);
break;
+ }
default:
g_assert_not_reached ();
}
mono_add_patch_info (cfg, code.buf + code.nins - cfg->native_code, patch_type, data); \
} while (0)
+#define emit_cond_system_exception(cfg,code,exc_name,predicate) do { \
+ MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
+ if (tins == NULL) \
+ add_patch_info (cfg, code, MONO_PATCH_INFO_EXC, exc_name); \
+ else \
+ add_patch_info (cfg, code, MONO_PATCH_INFO_BB, tins->inst_true_bb); \
+ ia64_br_cond_pred (code, (predicate), 0); \
+} while (0)
+
static Ia64CodegenState
emit_call (MonoCompile *cfg, Ia64CodegenState code, guint32 patch_type, gconstpointer data)
{
if ((patch_type == MONO_PATCH_INFO_ABS) || (patch_type == MONO_PATCH_INFO_INTERNAL_METHOD)) {
/* Indirect call */
+ /* mono_arch_patch_callsite will patch this */
/* mono_arch_nullify_class_init_trampoline will patch this */
ia64_movl (code, GP_SCRATCH_REG, 0);
ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
case OP_COND_EXC_IOV:
case OP_COND_EXC_OV:
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "OverflowException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "OverflowException", 6);
break;
case OP_COND_EXC_IC:
case OP_COND_EXC_C:
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "OverflowException");
- ia64_br_cond_pred (code, 7, 0);
+ emit_cond_system_exception (cfg, code, "OverflowException", 7);
break;
case OP_IA64_COND_EXC:
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, ins->inst_p1);
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, ins->inst_p1, 6);
break;
case OP_IA64_CSET:
ia64_mov_pred (code, 7, ins->dreg, IA64_R0);
ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
case OP_STORER4_MEMBASE_REG:
- ia64_stfs_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
+ ia64_fnorm_s_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
+ ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
break;
case OP_LOADR8_MEMBASE:
ia64_ldfd (code, ins->dreg, ins->inst_basereg);
break;
case OP_LOADR4_MEMBASE:
ia64_ldfs (code, ins->dreg, ins->inst_basereg);
+ ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
case CEE_CONV_R4:
ia64_setf_sig (code, ins->dreg, ins->sreg1);
case CEE_CKFINITE:
/* Quiet NaN */
ia64_fclass_m (code, 6, 7, ins->sreg1, 0x080);
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "ArithmeticException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
/* Signaling NaN */
ia64_fclass_m (code, 6, 7, ins->sreg1, 0x040);
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "ArithmeticException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
/* Positive infinity */
ia64_fclass_m (code, 6, 7, ins->sreg1, 0x021);
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "ArithmeticException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
/* Negative infinity */
ia64_fclass_m (code, 6, 7, ins->sreg1, 0x022);
- add_patch_info (cfg, code,
- MONO_PATCH_INFO_EXC, "ArithmeticException");
- ia64_br_cond_pred (code, 6, 0);
+ emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
break;
/* Calls */
break;
}
+ case CEE_BREAK:
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_arch_break);
+ break;
case OP_LOCALLOC: {
gint32 abi_offset;
+ /* FIXME: Sigaltstack support */
+
/* keep alignment */
ia64_adds_imm (code, GP_SCRATCH_REG, MONO_ARCH_FRAME_ALIGNMENT - 1, ins->sreg1);
ia64_movl (code, GP_SCRATCH_REG2, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
ia64_ld8 (code, ins->dreg, ins->dreg);
break;
+ /* Synchronization */
+ case OP_MEMORY_BARRIER:
+ ia64_mf (code);
+ break;
+ case OP_IA64_FETCHADD4_IMM:
+ g_assert (ins->inst_offset == 0);
+ ia64_fetchadd4_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
+ ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
+ break;
+ case OP_IA64_FETCHADD8_IMM:
+ g_assert (ins->inst_offset == 0);
+ ia64_fetchadd8_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
+ ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
+ break;
+ case OP_ATOMIC_EXCHANGE_I4:
+ ia64_xchg4_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
+ ia64_sxt4 (code, ins->dreg, ins->dreg);
+ break;
+ case OP_ATOMIC_EXCHANGE_I8:
+ ia64_xchg8_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
+ break;
+
/* Exception handling */
case OP_CALL_HANDLER:
/*
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
/*
- * We might be called by call_filter, in which case the
- * the register stack is not set up correctly. So do it now.
- * Allocate a stack frame and set the fp register from the value
- * passed in by the caller.
- * R15 is used since it is writable using libunwind.
+ * R15 determines our caller. It is used since it is writable using
+ * libunwind.
* R15 == 0 means we are called by OP_CALL_HANDLER or via resume_context ()
+ * R15 != 0 means we are called by call_filter ().
*/
ia64_codegen_set_one_ins_per_bundle (code, TRUE);
ia64_cmp_eq (code, 6, 7, IA64_R15, IA64_R0);
- /* Alloc is not predictable so we have to use a branch */
- ia64_br_cond_pred (code, 6, 3);
+
+ ia64_br_cond_pred (code, 6, 6);
+
+ /*
+ * Called by call_filter:
+ * Allocate a new stack frame, and set the fp register from the
+ * value passed in by the caller.
+ * We allocate a similar frame as is done by the prolog, so
+ * if an exception is thrown while executing the filter, the
+ * unwinder can unwind through the filter frame using the unwind
+ * info for the prolog.
+ */
ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
+ ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
+ ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
ia64_mov (code, cfg->frame_reg, IA64_R15);
+ /* Signal to endfilter that we are called by call_filter */
+ ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
+
/* Save the return address */
ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
break;
}
- case CEE_ENDFINALLY: {
- MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
- /* Return the saved arp_pfs value to call_filter */
- ia64_mov (code, IA64_R9, cfg->arch.reg_saved_ar_pfs);
- ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
- ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
- ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
- ia64_br_cond_reg (code, IA64_B6);
- break;
- }
+ case CEE_ENDFINALLY:
case OP_ENDFILTER: {
- /* FIXME: Return the value */
+ /* FIXME: Return the value in ENDFILTER */
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
- /* Return the saved arp_pfs value to call_filter */
- ia64_mov (code, IA64_R9, cfg->arch.reg_saved_ar_pfs);
+
+ /* Load the return address */
ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
+
+ /* Test caller */
+ ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, IA64_R0);
+ ia64_br_cond_pred (code, 7, 4);
+
+ /* Called by call_filter */
+ /* Pop frame */
+ ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
+ ia64_mov_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
+ ia64_br_ret_reg (code, IA64_B0);
+
+ /* Called by CALL_HANDLER */
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
ia64_br_cond_reg (code, IA64_B6);
break;
#endif
static void
-ia64_emit_bundle_manual (Ia64CodegenState *code)
-{
- int j, ins_type, template;
-
- if (code->nins == 0)
- return;
-
- g_assert (code->nins == 3 || ((code->nins == 2) && code->itypes [1] == IA64_INS_TYPE_LX));
-
- /* Verify template is correct */
- template = code->template;
- for (j = 0; j < 3; ++j) {
- if (code->stops [j])
- g_assert (stops_in_template [template]);
-
- ins_type = ins_types_in_template [template][j];
- switch (code->itypes [j]) {
- case IA64_INS_TYPE_A:
- g_assert ((ins_type == IA64_INS_TYPE_I) || (ins_type == IA64_INS_TYPE_M));
- break;
- case IA64_INS_TYPE_LX:
- g_assert (j == 1);
- g_assert (ins_type == IA64_INS_TYPE_LX);
- j ++;
- break;
- default:
- g_assert (ins_type == code->itypes [j]);
- }
- }
-
- ia64_emit_bundle_template (code, template, code->instructions [0], code->instructions [1], code->instructions [2]);
- code->template = 0;
- code->nins = 0;
- code->dep_info_pos = 0;
-}
-
-static void
-ia64_analyze_deps (Ia64CodegenState *code, int *deps_start)
+ia64_analyze_deps (Ia64CodegenState *code, int *deps_start, int *stops)
{
int i, pos, ins_index, current_deps_start, current_ins_start, reg;
guint8 *deps = code->dep_info;
gboolean need_stop, no_stop;
for (i = 0; i < code->nins; ++i)
- code->stops [i] = FALSE;
+ stops [i] = FALSE;
ins_index = 0;
current_deps_start = 0;
if (need_stop && !no_stop) {
g_assert (ins_index > 0);
- code->stops [ins_index - 1] = 1;
+ stops [ins_index - 1] = 1;
DEBUG_INS_SCHED (printf ("STOP\n"));
current_deps_start = current_ins_start;
if (code->nins > 0) {
/* No dependency info for the last instruction */
- code->stops [code->nins - 1] = 1;
+ stops [code->nins - 1] = 1;
}
deps_start [code->nins] = code->dep_info_pos;
}
static void
-ia64_real_emit_bundle (Ia64CodegenState *code, int *deps_start, int n, guint64 template, guint64 ins1, guint64 ins2, guint64 ins3, guint8 nops)
+ia64_real_emit_bundle (Ia64CodegenState *code, int *deps_start, int *stops, int n, guint64 template, guint64 ins1, guint64 ins2, guint64 ins3, guint8 nops)
{
- g_assert (n <= code->nins);
int stop_pos, i, deps_to_shift, dep_shift;
+ g_assert (n <= code->nins);
+
// if (n > 1) printf ("FOUND: %ld.\n", template);
ia64_emit_bundle_template (code, template, ins1, ins2, ins3);
for (i = 0; i < code->nins + 1 - n; ++i)
deps_start [i] = deps_start [n + i] - dep_shift;
+ /* Determine the exact positions of instructions with unwind ops */
+ if (code->unw_op_count) {
+ int ins_pos [16];
+ int curr_ins, curr_ins_pos;
+
+ curr_ins = 0;
+ curr_ins_pos = ((code->buf - code->region_start - 16) / 16) * 3;
+ for (i = 0; i < 3; ++i) {
+ if (! (nops & (1 << i))) {
+ ins_pos [curr_ins] = curr_ins_pos + i;
+ curr_ins ++;
+ }
+ }
+
+ for (i = code->unw_op_pos; i < code->unw_op_count; ++i) {
+ if (code->unw_ops_pos [i] < n) {
+ code->unw_ops [i].when = ins_pos [code->unw_ops_pos [i]];
+ //printf ("UNW-OP: %d -> %d\n", code->unw_ops_pos [i], code->unw_ops [i].when);
+ }
+ }
+ if (code->unw_op_pos < code->unw_op_count)
+ code->unw_op_pos += n;
+ }
+
if (n == code->nins) {
code->template = 0;
code->nins = 0;
else {
memcpy (&code->instructions [0], &code->instructions [n], (code->nins - n) * sizeof (guint64));
memcpy (&code->itypes [0], &code->itypes [n], (code->nins - n) * sizeof (int));
- memcpy (&code->stops [0], &code->stops [n], (code->nins - n) * sizeof (int));
+ memcpy (&stops [0], &stops [n], (code->nins - n) * sizeof (int));
code->nins -= n;
}
}
{
int i, ins_type, template, nins_to_emit;
int deps_start [16];
+ int stops [16];
gboolean found;
- if (!code->automatic) {
- ia64_emit_bundle_manual (code);
- return;
- }
-
/*
* We implement a simple scheduler which tries to put three instructions
* per bundle, then two, then one.
*/
-
- ia64_analyze_deps (code, deps_start);
+ ia64_analyze_deps (code, deps_start, stops);
if ((code->nins >= 3) && !code->one_ins_per_bundle) {
/* Find a suitable template */
for (template = 0; template < 32; ++template) {
- if (stops_in_template [template][0] != code->stops [0] ||
- stops_in_template [template][1] != code->stops [1] ||
- stops_in_template [template][2] != code->stops [2])
+ if (stops_in_template [template][0] != stops [0] ||
+ stops_in_template [template][1] != stops [1] ||
+ stops_in_template [template][2] != stops [2])
continue;
found = TRUE;
found = debug_ins_sched ();
if (found) {
- ia64_real_emit_bundle (code, deps_start, 3, template, code->instructions [0], code->instructions [1], code->instructions [2], 0);
+ ia64_real_emit_bundle (code, deps_start, stops, 3, template, code->instructions [0], code->instructions [1], code->instructions [2], 0);
break;
}
}
if ((code->nins >= 2) && !code->one_ins_per_bundle) {
/* Try a nop at the end */
for (template = 0; template < 32; ++template) {
- if (stops_in_template [template][0] != code->stops [0] ||
- ((stops_in_template [template][1] != code->stops [1]) &&
- (stops_in_template [template][2] != code->stops [1])))
+ if (stops_in_template [template][0] != stops [0] ||
+ ((stops_in_template [template][1] != stops [1]) &&
+ (stops_in_template [template][2] != stops [1])))
continue;
if (!debug_ins_sched ())
continue;
- ia64_real_emit_bundle (code, deps_start, 2, template, code->instructions [0], code->instructions [1], nops_for_ins_types [ins_types_in_template [template][2]], 1 << 2);
+ ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], code->instructions [1], nops_for_ins_types [ins_types_in_template [template][2]], 1 << 2);
break;
}
}
if ((code->nins >= 2) && !code->one_ins_per_bundle) {
/* Try a nop in the middle */
for (template = 0; template < 32; ++template) {
- if (((stops_in_template [template][0] != code->stops [0]) &&
- (stops_in_template [template][1] != code->stops [0])) ||
- stops_in_template [template][2] != code->stops [1])
+ if (((stops_in_template [template][0] != stops [0]) &&
+ (stops_in_template [template][1] != stops [0])) ||
+ stops_in_template [template][2] != stops [1])
continue;
if (!ITYPE_MATCH (ins_types_in_template [template][0], code->itypes [0]) ||
if (!debug_ins_sched ())
continue;
- ia64_real_emit_bundle (code, deps_start, 2, template, code->instructions [0], nops_for_ins_types [ins_types_in_template [template][1]], code->instructions [1], 1 << 1);
+ ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], nops_for_ins_types [ins_types_in_template [template][1]], code->instructions [1], 1 << 1);
break;
}
}
if ((code->nins >= 2) && flush && !code->one_ins_per_bundle) {
/* Try a nop at the beginning */
for (template = 0; template < 32; ++template) {
- if ((stops_in_template [template][1] != code->stops [0]) ||
- (stops_in_template [template][2] != code->stops [1]))
+ if ((stops_in_template [template][1] != stops [0]) ||
+ (stops_in_template [template][2] != stops [1]))
continue;
if (!ITYPE_MATCH (ins_types_in_template [template][1], code->itypes [0]) ||
if (!debug_ins_sched ())
continue;
- ia64_real_emit_bundle (code, deps_start, 2, template, nops_for_ins_types [ins_types_in_template [template][0]], code->instructions [0], code->instructions [1], 1 << 0);
+ ia64_real_emit_bundle (code, deps_start, stops, 2, template, nops_for_ins_types [ins_types_in_template [template][0]], code->instructions [0], code->instructions [1], 1 << 0);
break;
}
}
while (nins_to_emit > 0) {
if (!debug_ins_sched ())
- code->stops [0] = 1;
+ stops [0] = 1;
switch (code->itypes [0]) {
case IA64_INS_TYPE_A:
- if (code->stops [0])
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
+ if (stops [0])
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
else
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
break;
case IA64_INS_TYPE_I:
- if (code->stops [0])
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
+ if (stops [0])
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
else
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MII, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
break;
case IA64_INS_TYPE_M:
- if (code->stops [0])
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
+ if (stops [0])
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
else
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
break;
case IA64_INS_TYPE_B:
- if (code->stops [0])
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
+ if (stops [0])
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
else
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MIB, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIB, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
break;
case IA64_INS_TYPE_F:
- if (code->stops [0])
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
+ if (stops [0])
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
else
- ia64_real_emit_bundle (code, deps_start, 1, IA64_TEMPLATE_MFI, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
+ ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFI, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
break;
case IA64_INS_TYPE_LX:
- if (code->stops [0] || code->stops [1])
- ia64_real_emit_bundle (code, deps_start, 2, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
+ if (stops [0] || stops [1])
+ ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
else
- ia64_real_emit_bundle (code, deps_start, 2, IA64_TEMPLATE_MLX, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
+ ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLX, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
nins_to_emit --;
break;
default:
}
}
+unw_dyn_region_info_t*
+mono_ia64_create_unwind_region (Ia64CodegenState *code)
+{
+ unw_dyn_region_info_t *r;
+
+ g_assert (code->nins == 0);
+ r = g_malloc0 (_U_dyn_region_info_size (code->unw_op_count));
+ memcpy (&r->op, &code->unw_ops, sizeof (unw_dyn_op_t) * code->unw_op_count);
+ r->op_count = code->unw_op_count;
+ r->insn_count = ((code->buf - code->region_start) >> 4) * 3;
+ code->unw_op_count = 0;
+ code->unw_op_pos = 0;
+ code->region_start = code->buf;
+
+ return r;
+}
static void
ia64_patch (unsigned char* code, gpointer target)
target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
+ if (patch_info->type == MONO_PATCH_INFO_NONE)
+ continue;
if (mono_compile_aot) {
NOT_IMPLEMENTED;
}
int alloc_size, pos, i;
Ia64CodegenState code;
CallInfo *cinfo;
- unw_dyn_region_info_t *r_pro;
- int unw_op_count;
sig = mono_method_signature (method);
pos = 0;
cfg->native_code = g_malloc (cfg->code_size);
ia64_codegen_init (code, cfg->native_code);
- ia64_codegen_set_automatic (code, FALSE);
alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
if (cfg->param_area)
alloc_size -= pos;
- /* Initialize unwind info */
- r_pro = g_malloc0 (_U_dyn_region_info_size (3));
- unw_op_count = 0;
-
- ia64_begin_bundle_template (code, IA64_TEMPLATE_MIIS);
+ ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + cfg->arch.reg_saved_ar_pfs);
ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
+ ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + cfg->arch.reg_saved_b0);
ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
- _U_dyn_op_save_reg (&r_pro->op[unw_op_count++], _U_QP_TRUE, /* when=*/ 0,
- /* reg=*/ UNW_IA64_AR_PFS, /* dst=*/ UNW_IA64_GR + cfg->arch.reg_saved_ar_pfs);
- _U_dyn_op_save_reg (&r_pro->op[unw_op_count++], _U_QP_TRUE, /* when=*/ 1,
- /* reg=*/ UNW_IA64_RP, /* dst=*/ UNW_IA64_GR + cfg->arch.reg_saved_b0);
-
if ((alloc_size || cinfo->stack_usage) && !cfg->arch.omit_fp) {
- ia64_mov (code, cfg->frame_reg, IA64_SP);
- _U_dyn_op_save_reg (&r_pro->op[unw_op_count++], _U_QP_TRUE, /* when=*/ 2,
- /* reg=*/ UNW_IA64_SP, /* dst=*/ UNW_IA64_GR + cfg->frame_reg);
+ ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + cfg->arch.reg_saved_sp);
+ ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
+ if (cfg->frame_reg != cfg->arch.reg_saved_sp)
+ ia64_mov (code, cfg->frame_reg, IA64_SP);
}
- else {
- if (cfg->arch.omit_fp && alloc_size && ia64_is_imm14 (-alloc_size)) {
- /* FIXME: Add unwind info */
- ia64_adds_imm (code, IA64_SP, (-alloc_size), IA64_SP);
- }
- else
- ia64_nop_i (code, 0);
- }
- ia64_stop (code);
- ia64_begin_bundle (code);
-
- /* Finish unwind info */
- r_pro->op_count = unw_op_count;
- r_pro->insn_count = (code.buf - cfg->native_code) >> 4;
-
- cfg->arch.r_pro = r_pro;
-
- ia64_codegen_set_automatic (code, TRUE);
if (alloc_size) {
- /* See mono_emit_stack_alloc */
-#if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
- NOT_IMPLEMENTED;
-#else
+ int pagesize = getpagesize ();
+#if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+ if (alloc_size >= pagesize) {
+ gint32 remaining_size = alloc_size;
+
+ /* Generate stack touching code */
+ ia64_mov (code, GP_SCRATCH_REG, IA64_SP);
+ while (remaining_size >= pagesize) {
+ ia64_movl (code, GP_SCRATCH_REG2, pagesize);
+ ia64_sub (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
+ ia64_ld8 (code, GP_SCRATCH_REG2, GP_SCRATCH_REG);
+ remaining_size -= pagesize;
+ }
+ }
+#endif
if (ia64_is_imm14 (-alloc_size)) {
if (cfg->arch.omit_fp)
- /* Already done */
- ;
- else
- ia64_adds_imm (code, IA64_SP, (-alloc_size), IA64_SP);
+ ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
+ ia64_adds_imm (code, IA64_SP, (-alloc_size), IA64_SP);
}
else {
- ia64_movl (code, GP_SCRATCH_REG, -alloc_size); ia64_stop (code);
+ ia64_movl (code, GP_SCRATCH_REG, -alloc_size);
+ if (cfg->arch.omit_fp)
+ ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
}
-#endif
}
+ ia64_begin_bundle (code);
+
+ /* Initialize unwind info */
+ cfg->arch.r_pro = mono_ia64_create_unwind_region (&code);
+
if (sig->ret->type != MONO_TYPE_VOID) {
if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
/* Save volatile arguments to the stack */
/* No LMF on IA64 */
}
+ if (strstr (cfg->method->name, "end_invoke_int_IAsyncResult"))
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_arch_break);
+
ia64_codegen_close (code);
g_free (cinfo);
g_free (cinfo);
ia64_begin_bundle (code);
- ia64_codegen_set_automatic (code, FALSE);
-
- if (cfg->arch.stack_alloc_size && cfg->arch.omit_fp && !ia64_is_imm14 (cfg->arch.stack_alloc_size)) {
- ia64_begin_bundle_template (code, IA64_TEMPLATE_MLXS);
- ia64_nop_m (code, 0);
- ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
- ia64_stop (code);
- ia64_begin_bundle (code);
- }
- ia64_begin_bundle_template (code, IA64_TEMPLATE_MII);
+ code.region_start = cfg->native_code;
+
+ /* Label the unwind state at the start of the exception throwing region */
+ //ia64_unw_label_state (code, 1234);
+
if (cfg->arch.stack_alloc_size) {
if (cfg->arch.omit_fp) {
- if (ia64_is_imm14 (cfg->arch.stack_alloc_size))
+ if (ia64_is_imm14 (cfg->arch.stack_alloc_size)) {
+ ia64_unw_pop_frames (code, 1);
ia64_adds_imm (code, IA64_SP, (cfg->arch.stack_alloc_size), IA64_SP);
- else
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
+ ia64_unw_pop_frames (code, 1);
ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
+ }
}
- else
+ else {
+ ia64_unw_pop_frames (code, 1);
ia64_mov (code, IA64_SP, cfg->arch.reg_saved_sp);
+ }
}
- else
- /* FIXME: Optimize this away */
- ia64_nop_m (code, 0);
ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
ia64_mov_ret_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
- ia64_begin_bundle (code);
-
- ia64_begin_bundle_template (code, IA64_TEMPLATE_BBBS);
ia64_br_ret_reg (code, IA64_B0);
- ia64_nop_b (code, 0);
- ia64_nop_b (code, 0); ia64_stop (code);
- ia64_begin_bundle (code);
- ia64_codegen_set_automatic (code, TRUE);
ia64_codegen_close (code);
+ cfg->arch.r_epilog = mono_ia64_create_unwind_region (&code);
+ cfg->arch.r_pro->next = cfg->arch.r_epilog;
+
cfg->code_len = code.buf - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
- int nthrows;
+ int i, nthrows;
Ia64CodegenState code;
gboolean empty = TRUE;
- /*
+ //unw_dyn_region_info_t *r_exceptions;
MonoClass *exc_classes [16];
guint8 *exc_throw_start [16], *exc_throw_end [16];
- */
guint32 code_size = 0;
/* Compute needed space */
code_size += 4 + 7; /* sizeof (float) + alignment */
}
+ if (code_size == 0)
+ return;
+
while (cfg->code_len + code_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
ia64_codegen_init (code, cfg->native_code + cfg->code_len);
+ /* The unwind state here is the same as before the epilog */
+ //ia64_unw_copy_state (code, 1234);
+
/* add code to raise exceptions */
/* FIXME: Optimize this */
nthrows = 0;
MonoClass *exc_class;
guint8* throw_ip;
guint8* buf;
+ guint64 exc_token_index;
exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
g_assert (exc_class);
+ exc_token_index = mono_metadata_token_index (exc_class->type_token);
throw_ip = cfg->native_code + patch_info->ip.i;
ia64_begin_bundle (code);
ia64_patch (cfg->native_code + patch_info->ip.i, code.buf);
- ia64_movl (code, cfg->arch.reg_out0 + 0, exc_class->type_token);
+ /* Find a throw sequence for the same exception class */
+ for (i = 0; i < nthrows; ++i)
+ if (exc_classes [i] == exc_class)
+ break;
- ia64_begin_bundle (code);
+ if (i < nthrows) {
+ gint64 offset = exc_throw_end [i] - 16 - throw_ip;
- patch_info->data.name = "mono_arch_throw_corlib_exception";
- patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
- patch_info->ip.i = code.buf - cfg->native_code;
+ if (ia64_is_adds_imm (offset))
+ ia64_adds_imm (code, cfg->arch.reg_out0 + 1, offset, IA64_R0);
+ else
+ ia64_movl (code, cfg->arch.reg_out0 + 1, offset);
- /* Indirect call */
- ia64_movl (code, GP_SCRATCH_REG, 0);
- ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
- ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
- ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
+ buf = code.buf + code.nins;
+ ia64_br_cond_pred (code, 0, 0);
+ ia64_begin_bundle (code);
+ ia64_patch (buf, exc_throw_start [i]);
- /* Compute the offset */
- buf = code.buf + 32;
- ia64_movl (code, cfg->arch.reg_out0 + 1, buf - throw_ip);
+ patch_info->type = MONO_PATCH_INFO_NONE;
+ }
+ else {
+ /* Arg1 */
+ buf = code.buf;
+ ia64_movl (code, cfg->arch.reg_out0 + 1, 0);
- ia64_br_call_reg (code, IA64_B0, IA64_B6);
+ ia64_begin_bundle (code);
+
+ if (nthrows < 16) {
+ exc_classes [nthrows] = exc_class;
+ exc_throw_start [nthrows] = code.buf;
+ }
+
+ /* Arg2 */
+ if (ia64_is_adds_imm (exc_token_index))
+ ia64_adds_imm (code, cfg->arch.reg_out0 + 0, exc_token_index, IA64_R0);
+ else
+ ia64_movl (code, cfg->arch.reg_out0 + 0, exc_token_index);
+
+ patch_info->data.name = "mono_arch_throw_corlib_exception";
+ patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
+ patch_info->ip.i = code.buf + code.nins - cfg->native_code;
+
+ /* Indirect call */
+ ia64_movl (code, GP_SCRATCH_REG, 0);
+ ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
+ ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
+ ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
+
+ ia64_br_call_reg (code, IA64_B0, IA64_B6);
+
+ /* Patch up the throw offset */
+ ia64_begin_bundle (code);
+
+ ia64_patch (buf, (gpointer)(code.buf - 16 - throw_ip));
+
+ if (nthrows < 16) {
+ exc_throw_end [nthrows] = code.buf;
+ nthrows ++;
+ }
+ }
empty = FALSE;
break;
ia64_codegen_close (code);
+ /* FIXME: */
+ //r_exceptions = mono_ia64_create_unwind_region (&code);
+ //cfg->arch.r_epilog = r_exceptions;
+
cfg->code_len = code.buf - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
di->u.pi.regions = cfg->arch.r_pro;
_U_dyn_register (di);
+
+ /*
+ {
+ unw_dyn_region_info_t *region = di->u.pi.regions;
+
+ printf ("Unwind info for method %s:\n", mono_method_full_name (cfg->method, TRUE));
+ while (region) {
+ printf (" [Region: %d]\n", region->insn_count);
+ region = region->next;
+ }
+ }
+ */
}
void
static gboolean tls_offset_inited = FALSE;
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
-
-static void
-setup_stack (MonoJitTlsData *tls)
-{
- NOT_IMPLEMENTED;
-}
-
-#endif
-
void
mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
{
appdomain_tls_offset = mono_domain_get_tls_offset ();
thread_tls_offset = mono_thread_get_tls_offset ();
}
-
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
- setup_stack (tls);
-#endif
}
void
mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
{
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
- struct sigaltstack sa;
-
- sa.ss_sp = tls->signal_stack;
- sa.ss_size = SIGNAL_STACK_SIZE;
- sa.ss_flags = SS_DISABLE;
- sigaltstack (&sa, NULL);
-
- if (tls->signal_stack)
- munmap (tls->signal_stack, SIGNAL_STACK_SIZE);
-#endif
}
void
vtarg->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, vtarg);
- mono_call_inst_add_outarg_reg (call, vtarg->dreg, out_reg, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, out_reg, FALSE);
out_reg ++;
}
this->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, this);
- mono_call_inst_add_outarg_reg (call, this->dreg, out_reg, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, call, this->dreg, out_reg, FALSE);
}
}
{
MonoInst *ins = NULL;
- /* FIXME: */
- return NULL;
-
- if (cmethod->klass == mono_defaults.math_class) {
- if (strcmp (cmethod->name, "Sin") == 0) {
- MONO_INST_NEW (cfg, ins, OP_SIN);
- ins->inst_i0 = args [0];
- } else if (strcmp (cmethod->name, "Cos") == 0) {
- MONO_INST_NEW (cfg, ins, OP_COS);
- ins->inst_i0 = args [0];
- } else if (strcmp (cmethod->name, "Tan") == 0) {
- return ins;
- MONO_INST_NEW (cfg, ins, OP_TAN);
- ins->inst_i0 = args [0];
- } else if (strcmp (cmethod->name, "Atan") == 0) {
- return ins;
- MONO_INST_NEW (cfg, ins, OP_ATAN);
- ins->inst_i0 = args [0];
- } else if (strcmp (cmethod->name, "Sqrt") == 0) {
- MONO_INST_NEW (cfg, ins, OP_SQRT);
- ins->inst_i0 = args [0];
- } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
- MONO_INST_NEW (cfg, ins, OP_ABS);
- ins->inst_i0 = args [0];
- }
-#if 0
- /* OP_FREM is not IEEE compatible */
- else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
- MONO_INST_NEW (cfg, ins, OP_FREM);
- ins->inst_i0 = args [0];
- ins->inst_i1 = args [1];
- }
-#endif
+ if (cmethod->klass == mono_defaults.thread_class &&
+ strcmp (cmethod->name, "MemoryBarrier") == 0) {
+ MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
} else if(cmethod->klass->image == mono_defaults.corlib &&
(strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
(strcmp (cmethod->klass->name, "Interlocked") == 0)) {
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
- ins_iconst->inst_c0 = 1;
+ ins_iconst->inst_imm = 1;
ins->inst_i0 = args [0];
ins->inst_i1 = ins_iconst;
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
- ins_iconst->inst_c0 = -1;
+ ins_iconst->inst_imm = -1;
ins->inst_i0 = args [0];
ins->inst_i1 = ins_iconst;
- } else if (strcmp (cmethod->name, "Add") == 0) {
- guint32 opcode;
-
- if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_I4;
- else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_I8;
- else
- g_assert_not_reached ();
-
- MONO_INST_NEW (cfg, ins, opcode);
-
- ins->inst_i0 = args [0];
- ins->inst_i1 = args [1];
+ /* FIXME: */
} else if (strcmp (cmethod->name, "Exchange") == 0) {
guint32 opcode;
MONO_INST_NEW (cfg, ins, CEE_LDIND_I8);
ins->inst_i0 = args [0];
}
-
- /*
- * Can't implement CompareExchange methods this way since they have
- * three arguments.
- */
}
return ins;