}
cindex = i;
- g_assert (real_frame_start >= stack_limit);
+ /*
+ * This is not neccessary true on x86 because frames have a different size at each
+ * call site.
+ */
+ //g_assert (real_frame_start >= stack_limit);
if (real_frame_start > stack_limit) {
/* This scans the previously skipped frames as well */
/* fp = sp + offset */
g_assert (cfg->frame_reg == AMD64_RBP);
return (- cfg->arch.sp_fp_offset + sp_offset);
+#elif defined(TARGET_X86)
+ /* The offset is computed from the sp at the start of the call sequence */
+ g_assert (cfg->frame_reg == X86_EBP);
+ return (- cfg->arch.sp_fp_offset - sp_offset);
#else
NOT_IMPLEMENTED;
return -1;
process_param_area_slots (MonoCompile *cfg)
{
MonoCompileGC *gcfg = cfg->gc_info;
- int i;
+ int cindex, i;
gboolean *is_param;
/*
is_param = mono_mempool_alloc0 (cfg->mempool, gcfg->nslots * sizeof (gboolean));
- for (i = 0; i < gcfg->ncallsites; ++i) {
- GCCallSite *callsite = gcfg->callsites [i];
+ for (cindex = 0; cindex < gcfg->ncallsites; ++cindex) {
+ GCCallSite *callsite = gcfg->callsites [cindex];
GSList *l;
for (l = callsite->param_slots; l; l = l->next) {
MonoInst *def = l->data;
+ MonoType *t = def->inst_vtype;
int sp_offset = def->inst_offset;
int fp_offset = sp_offset_to_fp_offset (cfg, sp_offset);
int slot = fp_offset_to_slot (cfg, fp_offset);
+ guint32 align;
+ guint32 size;
- g_assert (slot >= 0 && slot < gcfg->nslots);
- is_param [slot] = TRUE;
+ if (MONO_TYPE_ISSTRUCT (t)) {
+ size = mini_type_stack_size_full (cfg->generic_sharing_context, t, &align, FALSE);
+ } else {
+ size = sizeof (mgreg_t);
+ }
+
+ for (i = 0; i < size / sizeof (mgreg_t); ++i) {
+ g_assert (slot + i >= 0 && slot + i < gcfg->nslots);
+ is_param [slot + i] = TRUE;
+ }
}
}
set_slot_everywhere (gcfg, i, SLOT_NOREF);
}
- for (i = 0; i < gcfg->ncallsites; ++i) {
- GCCallSite *callsite = gcfg->callsites [i];
+ for (cindex = 0; cindex < gcfg->ncallsites; ++cindex) {
+ GCCallSite *callsite = gcfg->callsites [cindex];
GSList *l;
for (l = callsite->param_slots; l; l = l->next) {
int slot = fp_offset_to_slot (cfg, fp_offset);
GCSlotType type = type_to_gc_slot_type (cfg, t);
- /* The slot is live between the def instruction and the call */
- set_slot_in_range (gcfg, slot, def->backend.pc_offset, callsite->pc_offset + 1, type);
- if (cfg->verbose_level > 1)
- printf ("\t%s param area slot at %s0x%x(fp)=0x%x(sp) (slot = %d) [0x%x-0x%x]\n", slot_type_to_string (type), fp_offset >= 0 ? "+" : "-", ABS (fp_offset), sp_offset, slot, def->backend.pc_offset, callsite->pc_offset + 1);
+ if (MONO_TYPE_ISSTRUCT (t)) {
+ guint32 align;
+ guint32 size;
+ int size_in_slots;
+
+ size = mini_type_stack_size_full (cfg->generic_sharing_context, t, &align, FALSE);
+ size_in_slots = ALIGN_TO (size, SIZEOF_SLOT) / SIZEOF_SLOT;
+ // FIXME: slot type
+ for (i = 0; i < size_in_slots; ++i) {
+ set_slot_in_range (gcfg, slot + i, def->backend.pc_offset, callsite->pc_offset + 1, type);
+ }
+ if (cfg->verbose_level > 1)
+ printf ("\t%s param area slots at %s0x%x(fp)=0x%x(sp) (slot = %d-%d) [0x%x-0x%x]\n", slot_type_to_string (type), fp_offset >= 0 ? "+" : "-", ABS (fp_offset), sp_offset, slot, slot + (size / (int)sizeof (mgreg_t)), def->backend.pc_offset, callsite->pc_offset + 1);
+ } else {
+ /* The slot is live between the def instruction and the call */
+ set_slot_in_range (gcfg, slot, def->backend.pc_offset, callsite->pc_offset + 1, type);
+ if (cfg->verbose_level > 1)
+ printf ("\t%s param area slot at %s0x%x(fp)=0x%x(sp) (slot = %d) [0x%x-0x%x]\n", slot_type_to_string (type), fp_offset >= 0 ? "+" : "-", ABS (fp_offset), sp_offset, slot, def->backend.pc_offset, callsite->pc_offset + 1);
+ }
}
}
}
/* Param area slots */
#ifdef TARGET_AMD64
min_offset = MIN (min_offset, -cfg->arch.sp_fp_offset);
+#elif defined(TARGET_X86)
+ min_offset = MIN (min_offset, - (cfg->arch.sp_fp_offset + cfg->arch.param_area_size));
#endif
gcfg->min_offset = min_offset;
return;
}
if (locals_stack_align) {
+ int prev_offset = offset;
+
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
+
+ while (prev_offset < offset) {
+ prev_offset += 4;
+ mini_gc_set_slot_type_from_fp (cfg, - prev_offset, SLOT_NOREF);
+ }
}
cfg->locals_min_stack_offset = - (offset + locals_stack_size);
cfg->locals_max_stack_offset = - offset;
}
#endif
+static void
+emit_gc_param_slot_def (MonoCompile *cfg, int sp_offset, MonoType *t)
+{
+ if (cfg->compute_gc_maps) {
+ MonoInst *def;
+
+ /* On x86, the offsets are from the sp value before the start of the call sequence */
+ if (t == NULL)
+ t = &mono_defaults.int_class->byval_arg;
+ EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, sp_offset, t);
+ }
+}
+
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoInst *arg, *in;
MonoMethodSignature *sig;
- int i, n;
+ int i, j, n;
CallInfo *cinfo;
- int sentinelpos = 0;
+ int sentinelpos = 0, sp_offset = 0;
sig = call->signature;
n = sig->param_count + sig->hasthis;
arg->sreg1 = X86_ESP;
arg->inst_imm = cinfo->stack_align_amount;
MONO_ADD_INS (cfg->cbb, arg);
+ for (i = 0; i < cinfo->stack_align_amount; i += sizeof (mgreg_t)) {
+ sp_offset += 4;
+
+ emit_gc_param_slot_def (cfg, sp_offset, NULL);
+ }
}
if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
}
}
+ // FIXME: Emit EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF everywhere
+
/* Handle the case where there are no implicit arguments */
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) {
emit_sig_cookie (cfg, call, cinfo);
+ sp_offset += 4;
+ emit_gc_param_slot_def (cfg, sp_offset, NULL);
}
/* Arguments are pushed in the reverse order */
for (i = n - 1; i >= 0; i --) {
ArgInfo *ainfo = cinfo->args + i;
MonoType *t;
+ int argsize;
if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && i == 0) {
/* Push the vret arg before the first argument */
vtarg->type = STACK_MP;
vtarg->sreg1 = call->vret_var->dreg;
MONO_ADD_INS (cfg->cbb, vtarg);
+ sp_offset += 4;
+ emit_gc_param_slot_def (cfg, sp_offset, NULL);
}
if (i >= sig->hasthis)
arg->backend.size = size;
MONO_ADD_INS (cfg->cbb, arg);
+ sp_offset += size;
+ emit_gc_param_slot_def (cfg, sp_offset, t);
}
- }
- else {
+ } else {
+ argsize = 4;
+
switch (ainfo->storage) {
case ArgOnStack:
arg->opcode = OP_X86_PUSH;
arg->opcode = OP_STORER4_MEMBASE_REG;
arg->inst_destbasereg = X86_ESP;
arg->inst_offset = 0;
+ argsize = 4;
} else if (t->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
arg->opcode = OP_STORER8_MEMBASE_REG;
arg->inst_destbasereg = X86_ESP;
arg->inst_offset = 0;
+ argsize = 8;
} else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8) {
arg->sreg1 ++;
MONO_EMIT_NEW_UNALU (cfg, OP_X86_PUSH, -1, in->dreg + 2);
+ sp_offset += 4;
}
}
break;
}
MONO_ADD_INS (cfg->cbb, arg);
+
+ sp_offset += argsize;
+
+ if (cfg->compute_gc_maps) {
+ if (argsize == 4) {
+ /* FIXME: The == STACK_OBJ check might be fragile ? */
+ if (sig->hasthis && i == 0 && call->args [i]->type == STACK_OBJ)
+ /* this */
+ emit_gc_param_slot_def (cfg, sp_offset, &mono_defaults.object_class->byval_arg);
+ else
+ emit_gc_param_slot_def (cfg, sp_offset, t);
+ } else {
+ /* i8/r8 */
+ for (j = 0; j < argsize; j += 4)
+ emit_gc_param_slot_def (cfg, sp_offset - j, NULL);
+ }
+ }
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) {
/* Emit the signature cookie just before the implicit arguments */
emit_sig_cookie (cfg, call, cinfo);
+ sp_offset += 4;
+ emit_gc_param_slot_def (cfg, sp_offset, NULL);
}
}
vtarg->type = STACK_MP;
vtarg->sreg1 = call->vret_var->dreg;
MONO_ADD_INS (cfg->cbb, vtarg);
+ sp_offset += 4;
+ emit_gc_param_slot_def (cfg, sp_offset, NULL);
}
/* if the function returns a struct on stack, the called method already does a ret $0x4 */
}
call->stack_usage = cinfo->stack_usage;
+ cfg->arch.param_area_size = MAX (cfg->arch.param_area_size, sp_offset);
}
void
x86_push_imm_template (code);
}
cfa_offset += sizeof (gpointer);
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
/* save all caller saved regs */
x86_push_reg (code, X86_EBP);
cfa_offset += sizeof (gpointer);
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
x86_push_reg (code, X86_ESI);
cfa_offset += sizeof (gpointer);
mono_emit_unwind_op_offset (cfg, code, X86_ESI, - cfa_offset);
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
x86_push_reg (code, X86_EDI);
cfa_offset += sizeof (gpointer);
mono_emit_unwind_op_offset (cfg, code, X86_EDI, - cfa_offset);
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
x86_push_reg (code, X86_EBX);
cfa_offset += sizeof (gpointer);
mono_emit_unwind_op_offset (cfg, code, X86_EBX, - cfa_offset);
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
/*
x86_mov_reg_mem (code, X86_EAX, lmf_tls_offset, 4);
/* skip esp + method_info + lmf */
x86_alu_reg_imm (code, X86_SUB, X86_ESP, 12);
+ cfa_offset += 12;
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + 4, SLOT_NOREF);
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + 8, SLOT_NOREF);
/* push previous_lmf */
x86_push_reg (code, X86_EAX);
+ cfa_offset += 4;
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
/* new lmf = ESP */
x86_prefix (code, X86_GS_PREFIX);
x86_mov_mem_reg (code, lmf_tls_offset, X86_ESP, 4);
if (need_stack_frame)
tot += 4; /* ebp */
tot &= MONO_ARCH_FRAME_ALIGNMENT - 1;
- if (tot)
+ if (tot) {
alloc_size += MONO_ARCH_FRAME_ALIGNMENT - tot;
+ for (i = 0; i < MONO_ARCH_FRAME_ALIGNMENT - tot; i += sizeof (mgreg_t))
+ mini_gc_set_slot_type_from_fp (cfg, - (alloc_size + pos - i), SLOT_NOREF);
+ }
}
+ cfg->arch.sp_fp_offset = alloc_size + pos;
+
if (alloc_size) {
/* See mono_emit_stack_alloc */
#if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)