liverange_start: len:0
liverange_end: len:0
+gc_liveness_def: len:0
+gc_liveness_use: len:0
+gc_spill_slot_liveness_def: len:0
+gc_param_slot_liveness_def: len:0
gboolean
mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
MonoJitInfo *ji, MonoContext *ctx,
- MonoContext *new_ctx, MonoLMF **lmf,
+ MonoContext *new_ctx, MonoLMF **lmf,
+ mgreg_t **save_locations,
StackFrameInfo *frame)
{
gpointer ip = MONO_CONTEXT_GET_IP (ctx);
mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
(guint8*)ji->code_start + ji->code_size,
- ip, regs, MONO_MAX_IREGS, &cfa);
+ ip, regs, MONO_MAX_IREGS,
+ save_locations, MONO_MAX_IREGS, &cfa);
for (i = 0; i < 16; ++i)
new_ctx->regs [i] = regs [i];
gboolean
mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
MonoJitInfo *ji, MonoContext *ctx,
- MonoContext *new_ctx, MonoLMF **lmf,
+ MonoContext *new_ctx, MonoLMF **lmf,
+ mgreg_t **save_locations,
StackFrameInfo *frame)
{
gpointer ip = MONO_CONTEXT_GET_IP (ctx);
mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
(guint8*)ji->code_start + ji->code_size,
- ip, regs, ppc_lr + 1, &cfa);
+ ip, regs, ppc_lr + 1,
+ save_locations, MONO_MAX_IREGS, &cfa);
/* we substract 4, so that the IP points into the call instruction */
MONO_CONTEXT_SET_IP (new_ctx, regs [ppc_lr] - 4);
gboolean
mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
MonoJitInfo *ji, MonoContext *ctx,
- MonoContext *new_ctx, MonoLMF **lmf,
+ MonoContext *new_ctx, MonoLMF **lmf,
+ mgreg_t **save_locations,
StackFrameInfo *frame)
{
gpointer ip = MONO_CONTEXT_GET_IP (ctx);
mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
(guint8*)ji->code_start + ji->code_size,
- ip, regs, MONO_MAX_IREGS + 1, &cfa);
+ ip, regs, MONO_MAX_IREGS + 1,
+ save_locations, MONO_MAX_IREGS, &cfa);
new_ctx->eax = regs [X86_EAX];
new_ctx->ebx = regs [X86_EBX];
#define MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD 1
#define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
+#define MONO_ARCH_GC_MAPS_SUPPORTED 1
gboolean
mono_amd64_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig) MONO_INTERNAL;
int fpstack [8];
int sp = 0;
#endif
- int num_sregs;
+ int num_sregs = 0;
int sregs [MONO_MAX_SRC_REGS];
if (!bb->code)
#include <mono/metadata/gc-internal.h>
//#if 0
-#ifdef HAVE_SGEN_GC
+#if defined(HAVE_SGEN_GC) && defined(MONO_ARCH_GC_MAPS_SUPPORTED)
#include <mono/metadata/sgen-gc.h>
#include <mono/metadata/gc-internal.h>
guint8 *reg_pin_bitmap;
} MonoCompileGC;
-#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
+#define ALIGN_TO(val,align) ((((mgreg_t)val) + ((align) - 1)) & ~((align) - 1))
-#if 1
+#if 0
/* We don't support debug levels, its all-or-nothing */
#define DEBUG(s) do { s; fflush (logfile); } while (0)
#define DEBUG_ENABLED 1
gboolean has_context;
MonoJitTlsData *jit_tls;
/* For debugging */
- guint64 tid;
+ mgreg_t tid;
gpointer ref_to_track;
/* Number of frames collected during the !precise pass */
int nframes;
return 0;
else if (frame_reg == AMD64_RBP)
return 1;
+#elif defined(TARGET_X86)
+ if (frame_reg == X86_EBP)
+ return 0;
+ else if (frame_reg == X86_ESP)
+ return 1;
#else
NOT_IMPLEMENTED;
#endif
return AMD64_RSP;
else if (encoded == 1)
return AMD64_RBP;
+#elif defined(TARGET_X86)
+ if (encoded == 0)
+ return X86_EBP;
+ else if (encoded == 1)
+ return X86_ESP;
#else
NOT_IMPLEMENTED;
#endif
#else
static int callee_saved_regs [] = { AMD64_RBP, AMD64_RBX, AMD64_R12, AMD64_R13, AMD64_R14, AMD64_R15 };
#endif
+#elif defined(TARGET_X86)
+static int callee_saved_regs [] = { X86_EBX, X86_ESI, X86_EDI };
#endif
static guint32
encode_regmask (guint32 regmask)
{
-#ifdef TARGET_AMD64
int i;
guint32 res;
}
g_assert (regmask == 0);
return res;
-#else
- NOT_IMPLEMENTED;
- return regmask;
-#endif
}
static guint32
decode_regmask (guint32 regmask)
{
-#ifdef TARGET_AMD64
int i;
guint32 res;
if (regmask & (1 << i))
res |= (1 << callee_saved_regs [i]);
return res;
-#else
- NOT_IMPLEMENTED;
- return regmask;
-#endif
}
/*
}
}
+static inline mgreg_t
+get_frame_pointer (MonoContext *ctx, int frame_reg)
+{
+#if defined(TARGET_AMD64)
+ if (frame_reg == AMD64_RSP)
+ return ctx->rsp;
+ else if (frame_reg == AMD64_RBP)
+ return ctx->rbp;
+#elif defined(TARGET_X86)
+ if (frame_reg == X86_ESP)
+ return ctx->esp;
+ else if (frame_reg == X86_EBP)
+ return ctx->ebp;
+#endif
+ g_assert_not_reached ();
+ return 0;
+}
+
/*
* conservatively_pass:
*
}
}
- g_assert ((guint64)stack_limit % sizeof (mgreg_t) == 0);
+ g_assert ((mgreg_t)stack_limit % sizeof (mgreg_t) == 0);
#ifdef MONO_ARCH_HAVE_FIND_JIT_INFO_EXT
res = mono_find_jit_info_ext (frame.domain ? frame.domain : mono_domain_get (), tls->jit_tls, NULL, &ctx, &new_ctx, NULL, &lmf, new_reg_locations, &frame);
break;
#endif
- /* The last frame can be in any state so mark conservatively */
- if (last) {
- last = FALSE;
- continue;
- }
-
ji = frame.ji;
- pc_offset = (guint8*)MONO_CONTEXT_GET_IP (&ctx) - (guint8*)ji->code_start;
if (frame.type == FRAME_TYPE_MANAGED_TO_NATIVE) {
- /* These frames are unwound through an LMF, and we have no precise register tracking for those */
+ /*
+ * These frames are problematic for several reasons:
+ * - they are unwound through an LMF, and we have no precise register tracking for those.
+ * - the LMF might not contain a precise ip, so we can't compute the call site.
+ * - the LMF only unwinds to the wrapper frame, so we get these methods twice.
+ */
DEBUG (fprintf (logfile, "Mark(0): <Managed-to-native transition>\n"));
for (i = 0; i < MONO_MAX_IREGS; ++i) {
if (reg_locations [i]) {
continue;
}
+ /* The last frame can be in any state so mark conservatively */
+ if (last) {
+ if (ji) {
+ DEBUG (char *fname = mono_method_full_name (ji->method, TRUE); fprintf (logfile, "Mark(0): %s+0x%x (%p)\n", fname, pc_offset, (gpointer)MONO_CONTEXT_GET_IP (&ctx)); g_free (fname));
+ }
+ DEBUG (fprintf (logfile, "\t <Last frame>\n"));
+ last = FALSE;
+ continue;
+ }
+
+
+ pc_offset = (guint8*)MONO_CONTEXT_GET_IP (&ctx) - (guint8*)ji->code_start;
+
/* These frames are very problematic */
if (ji->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
DEBUG (char *fname = mono_method_full_name (ji->method, TRUE); fprintf (logfile, "Mark(0): %s+0x%x (%p)\n", fname, pc_offset, (gpointer)MONO_CONTEXT_GET_IP (&ctx)); g_free (fname));
p += map->callsite_entry_size * map->ncallsites;
bitmaps = p;
-#ifdef __x86_64__
- if (map->frame_reg == AMD64_RSP)
- fp = (guint8*)ctx.rsp;
- else if (map->frame_reg == AMD64_RBP)
- fp = (guint8*)ctx.rbp;
- else
- g_assert_not_reached ();
-#else
- fp = NULL;
- g_assert_not_reached ();
-#endif
+ fp = (guint8*)get_frame_pointer (&ctx, map->frame_reg);
real_frame_start = fp + map->start_offset;
frame_start = fp + map->start_offset + map->map_offset;
/* Compute min/max offsets from the fp */
/* Locals */
-#ifdef TARGET_AMD64
+#if defined(TARGET_AMD64) || defined(TARGET_X86)
locals_min_offset = ALIGN_TO (cfg->locals_min_stack_offset, sizeof (mgreg_t));
locals_max_offset = cfg->locals_max_stack_offset;
#else
p += encoded_size;
/* Callsite table */
- p = (guint8*)ALIGN_TO ((guint64)p, map->callsite_entry_size);
+ p = (guint8*)ALIGN_TO ((mgreg_t)p, map->callsite_entry_size);
if (map->callsite_entry_size == 1) {
guint8 *offsets = p;
for (i = 0; i < ncallsites; ++i)
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
+ cfg->locals_min_stack_offset = - (offset + locals_stack_size);
+ cfg->locals_max_stack_offset = - offset;
/*
* EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we
* have locals larger than 8 bytes we need to make sure that
code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
/* a pop is one byte, while an add reg, imm is 3. So if there are 4 or 8
* bytes to pop, we want to use pops. GCC does this (note it won't happen
case OP_CALL_REG:
call = (MonoCallInst*)ins;
x86_call_reg (code, ins->sreg1);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
call = (MonoCallInst*)ins;
x86_call_membase (code, ins->sreg1, ins->inst_offset);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_RETHROW: {
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CALL_HANDLER:
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
+ case OP_GC_LIVENESS_DEF:
+ case OP_GC_LIVENESS_USE:
+ case OP_GC_PARAM_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ break;
+ case OP_GC_SPILL_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
+ break;
default:
g_warning ("unknown opcode %s\n", mono_inst_name (ins->opcode));
g_assert_not_reached ();
mono_emit_unwind_op_offset (cfg, code, X86_EBP, - cfa_offset);
x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
mono_emit_unwind_op_def_cfa_reg (cfg, code, X86_EBP);
+ } else {
+ cfg->frame_reg = X86_ESP;
}
alloc_size = cfg->stack_offset;
#define MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD 1
#define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER 1
+#define MONO_ARCH_GC_MAPS_SUPPORTED 1
gboolean
mono_x86_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig) MONO_INTERNAL;