#include <mono/utils/mono-threads.h>
#include "debugger-agent.h"
#include "mini.h"
+ #include "seq-points.h"
/*
On iOS we can't use System.Environment.Exit () as it will do the wrong
#define HEADER_LENGTH 11
#define MAJOR_VERSION 2
-#define MINOR_VERSION 37
+#define MINOR_VERSION 38
typedef enum {
CMD_SET_VM = 1,
typedef enum {
CMD_STACK_FRAME_GET_VALUES = 1,
CMD_STACK_FRAME_GET_THIS = 2,
- CMD_STACK_FRAME_SET_VALUES = 3
+ CMD_STACK_FRAME_SET_VALUES = 3,
+ CMD_STACK_FRAME_GET_DOMAIN = 4,
} CmdStackFrame;
typedef enum {
register_socket_transport (void);
#endif
+static inline gboolean
+is_debugger_thread (void)
+{
+ return GetCurrentThreadId () == debugger_thread_id;
+}
+
static int
parse_address (char *address, char **host, int *port)
{
mono_native_tls_alloc (&debugger_tls_id, NULL);
+ /* Needed by the hash_table_new_type () call below */
+ mono_gc_base_init ();
+
thread_to_tls = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_KEY_GC);
MONO_GC_REGISTER_ROOT_FIXED (thread_to_tls);
breakpoints_init ();
suspend_init ();
- mini_get_debug_options ()->gen_seq_points = TRUE;
+ mini_get_debug_options ()->gen_seq_points_debug_data = TRUE;
/*
* This is needed because currently we don't handle liveness info.
*/
return count_threads_to_wait_for () == 0;
}
- static MonoSeqPointInfo*
- get_seq_points (MonoDomain *domain, MonoMethod *method)
- {
- MonoSeqPointInfo *seq_points;
-
- mono_domain_lock (domain);
- seq_points = g_hash_table_lookup (domain_jit_info (domain)->seq_points, method);
- if (!seq_points && method->is_inflated) {
- /* generic sharing + aot */
- seq_points = g_hash_table_lookup (domain_jit_info (domain)->seq_points, mono_method_get_declaring_generic_method (method));
- if (!seq_points)
- seq_points = g_hash_table_lookup (domain_jit_info (domain)->seq_points, mini_get_shared_method (method));
- }
- mono_domain_unlock (domain);
-
- return seq_points;
- }
-
static void
no_seq_points_found (MonoMethod *method)
{
printf ("Unable to find seq points for method '%s'.\n", mono_method_full_name (method, TRUE));
}
- /*
- * find_next_seq_point_for_native_offset:
- *
- * Find the first sequence point after NATIVE_OFFSET.
- */
- static SeqPoint*
- find_next_seq_point_for_native_offset (MonoDomain *domain, MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info)
- {
- MonoSeqPointInfo *seq_points;
- int i;
-
- seq_points = get_seq_points (domain, method);
- if (!seq_points) {
- if (info)
- *info = NULL;
- return NULL;
- }
- g_assert (seq_points);
- if (info)
- *info = seq_points;
-
- for (i = 0; i < seq_points->len; ++i) {
- if (seq_points->seq_points [i].native_offset >= native_offset)
- return &seq_points->seq_points [i];
- }
-
- return NULL;
- }
-
- /*
- * find_prev_seq_point_for_native_offset:
- *
- * Find the first sequence point before NATIVE_OFFSET.
- */
- static SeqPoint*
- find_prev_seq_point_for_native_offset (MonoDomain *domain, MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info)
- {
- MonoSeqPointInfo *seq_points;
- int i;
-
- seq_points = get_seq_points (domain, method);
- if (info)
- *info = seq_points;
- if (!seq_points)
- return NULL;
-
- for (i = seq_points->len - 1; i >= 0; --i) {
- if (seq_points->seq_points [i].native_offset <= native_offset)
- return &seq_points->seq_points [i];
- }
-
- return NULL;
- }
-
- /*
- * find_seq_point:
- *
- * Find the sequence point corresponding to the IL offset IL_OFFSET, which
- * should be the location of a sequence point.
- */
- static G_GNUC_UNUSED SeqPoint*
- find_seq_point (MonoDomain *domain, MonoMethod *method, gint32 il_offset, MonoSeqPointInfo **info)
- {
- MonoSeqPointInfo *seq_points;
- int i;
-
- *info = NULL;
-
- seq_points = get_seq_points (domain, method);
- if (!seq_points)
- return NULL;
- *info = seq_points;
-
- for (i = 0; i < seq_points->len; ++i) {
- if (seq_points->seq_points [i].il_offset == il_offset)
- return &seq_points->seq_points [i];
- }
-
- return NULL;
- }
-
typedef struct {
DebuggerTlsData *tls;
GSList *frames;
ComputeFramesUserData *ud = user_data;
StackFrame *frame;
MonoMethod *method, *actual_method, *api_method;
- SeqPoint *sp;
+ SeqPoint sp;
int flags = 0;
if (info->type != FRAME_TYPE_MANAGED) {
if (info->il_offset == -1) {
/* mono_debug_il_offset_from_address () doesn't seem to be precise enough (#2092) */
if (ud->frames == NULL) {
- sp = find_prev_seq_point_for_native_offset (info->domain, method, info->native_offset, NULL);
- if (sp)
- info->il_offset = sp->il_offset;
+ if (find_prev_seq_point_for_native_offset (info->domain, method, info->native_offset, NULL, &sp))
+ info->il_offset = sp.il_offset;
}
if (info->il_offset == -1)
info->il_offset = mono_debug_il_offset_from_address (method, info->domain, info->native_offset);
{
DebuggerTlsData *tls;
+ /* This might be called during shutdown on the debugger thread from the CMD_VM_EXIT code */
+ if (is_debugger_thread ())
+ return;
+
/*
* Remember the currently unloading appdomain as it is needed to generate
* proper ids for unloading assemblies.
{
DebuggerTlsData *tls;
+ if (is_debugger_thread ())
+ return;
+
tls = mono_native_tls_get_value (debugger_tls_id);
g_assert (tls);
tls->domain_unloading = NULL;
guint8 *ip;
MonoJitInfo *ji;
MonoDomain *domain;
- SeqPoint *sp;
} BreakpointInstance;
/*
static void
insert_breakpoint (MonoSeqPointInfo *seq_points, MonoDomain *domain, MonoJitInfo *ji, MonoBreakpoint *bp, MonoError *error)
{
- int i, count;
+ int count;
BreakpointInstance *inst;
- SeqPoint *sp = NULL;
+ SeqPointIterator it;
+ gboolean it_has_sp = FALSE;
if (error)
mono_error_init (error);
- for (i = 0; i < seq_points->len; ++i) {
- sp = &seq_points->seq_points [i];
-
- if (sp->il_offset == bp->il_offset)
+ seq_point_iterator_init (&it, seq_points);
+ while (seq_point_iterator_next (&it)) {
+ if (it.seq_point.il_offset == bp->il_offset) {
+ it_has_sp = TRUE;
break;
+ }
}
- if (i == seq_points->len) {
+ if (!it_has_sp) {
/*
* The set of IL offsets with seq points doesn't completely match the
* info returned by CMD_METHOD_GET_DEBUG_INFO (#407).
*/
- for (i = 0; i < seq_points->len; ++i) {
- sp = &seq_points->seq_points [i];
-
- if (sp->il_offset != METHOD_ENTRY_IL_OFFSET && sp->il_offset != METHOD_EXIT_IL_OFFSET && sp->il_offset + 1 == bp->il_offset)
+ seq_point_iterator_init (&it, seq_points);
+ while (seq_point_iterator_next (&it)) {
+ if (it.seq_point.il_offset != METHOD_ENTRY_IL_OFFSET &&
+ it.seq_point.il_offset != METHOD_EXIT_IL_OFFSET &&
+ it.seq_point.il_offset + 1 == bp->il_offset) {
+ it_has_sp = TRUE;
break;
+ }
}
}
- if (i == seq_points->len) {
- char *s = g_strdup_printf ("Unable to insert breakpoint at %s:%d, seq_points=%d\n", mono_method_full_name (jinfo_get_method (ji), TRUE), bp->il_offset, seq_points->len);
+ if (!it_has_sp) {
+ char *s = g_strdup_printf ("Unable to insert breakpoint at %s:%d", mono_method_full_name (jinfo_get_method (ji), TRUE), bp->il_offset);
- for (i = 0; i < seq_points->len; ++i)
- DEBUG (1, fprintf (log_file, "%d\n", seq_points->seq_points [i].il_offset));
+ seq_point_iterator_init (&it, seq_points);
+ while (seq_point_iterator_next (&it))
+ DEBUG (1, fprintf (log_file, "%d\n", it.seq_point.il_offset));
if (error) {
mono_error_set_error (error, MONO_ERROR_GENERIC, "%s", s);
}
inst = g_new0 (BreakpointInstance, 1);
- inst->sp = sp;
- inst->native_offset = sp->native_offset;
- inst->ip = (guint8*)ji->code_start + sp->native_offset;
+ inst->il_offset = it.seq_point.il_offset;
+ inst->native_offset = it.seq_point.native_offset;
+ inst->ip = (guint8*)ji->code_start + it.seq_point.native_offset;
inst->ji = ji;
inst->domain = domain;
g_hash_table_insert (bp_locs, inst->ip, GINT_TO_POINTER (count + 1));
dbg_unlock ();
- if (sp->native_offset == SEQ_POINT_NATIVE_OFFSET_DEAD_CODE) {
+ if (it.seq_point.native_offset == SEQ_POINT_NATIVE_OFFSET_DEAD_CODE) {
DEBUG (1, fprintf (log_file, "[dbg] Attempting to insert seq point at dead IL offset %d, ignoring.\n", (int)bp->il_offset));
} else if (count == 0) {
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
#endif
}
- DEBUG(1, fprintf (log_file, "[dbg] Inserted breakpoint at %s:0x%x [%p](%d).\n", mono_method_full_name (jinfo_get_method (ji), TRUE), (int)sp->il_offset, inst->ip, count));
+ DEBUG(1, fprintf (log_file, "[dbg] Inserted breakpoint at %s:0x%x [%p](%d).\n", mono_method_full_name (jinfo_get_method (ji), TRUE), (int)it.seq_point.il_offset, inst->ip, count));
}
static void
MonoContext *ctx = &tls->restore_ctx;
MonoMethod *method;
MonoSeqPointInfo *info;
- SeqPoint *sp;
+ SeqPoint sp;
+ gboolean found_sp;
// FIXME: Speed this up
* The ip points to the instruction causing the breakpoint event, which is after
* the offset recorded in the seq point map, so find the prev seq point before ip.
*/
- sp = find_prev_seq_point_for_native_offset (mono_domain_get (), method, native_offset, &info);
- if (!sp)
+ found_sp = find_prev_seq_point_for_native_offset (mono_domain_get (), method, native_offset, &info, &sp);
+
+ if (!found_sp)
no_seq_points_found (method);
- g_assert (sp);
- DEBUG(1, fprintf (log_file, "[%p] Breakpoint hit, method=%s, ip=%p, offset=0x%x, sp il offset=0x%x.\n", (gpointer)GetCurrentThreadId (), method->name, ip, native_offset, sp ? sp->il_offset : -1));
+ g_assert (found_sp);
+
+ DEBUG(1, fprintf (log_file, "[%p] Breakpoint hit, method=%s, ip=%p, offset=0x%x, sp il offset=0x%x.\n", (gpointer)GetCurrentThreadId (), method->name, ip, native_offset, sp.il_offset));
bp = NULL;
for (i = 0; i < breakpoints->len; ++i) {
for (j = 0; j < bp->children->len; ++j) {
inst = g_ptr_array_index (bp->children, j);
- if (inst->ji == ji && inst->sp == sp) {
+ if (inst->ji == ji && inst->il_offset == sp.il_offset && inst->native_offset == sp.native_offset) {
if (bp->req->event_kind == EVENT_KIND_STEP) {
g_ptr_array_add (ss_reqs_orig, bp->req);
} else {
}
if (bp_reqs->len == 0 && ss_reqs_orig->len == 0) {
/* Maybe a method entry/exit event */
- if (sp->il_offset == METHOD_ENTRY_IL_OFFSET)
+ if (sp.il_offset == METHOD_ENTRY_IL_OFFSET)
kind = EVENT_KIND_METHOD_ENTRY;
- else if (sp->il_offset == METHOD_EXIT_IL_OFFSET)
+ else if (sp.il_offset == METHOD_EXIT_IL_OFFSET)
kind = EVENT_KIND_METHOD_EXIT;
}
if (mono_thread_internal_current () != ss_req->thread)
continue;
- hit = ss_update (ss_req, ji, sp, tls, ctx);
+ hit = ss_update (ss_req, ji, &sp, tls, ctx);
if (hit)
g_ptr_array_add (ss_reqs, req);
/* Start single stepping again from the current sequence point */
- ss_start (ss_req, method, sp, info, ctx, tls, FALSE);
+ ss_start (ss_req, method, &sp, info, ctx, tls, FALSE);
}
if (ss_reqs->len > 0)
GSList *events;
MonoContext *ctx = &tls->restore_ctx;
MonoMethod *method;
- SeqPoint *sp;
+ SeqPoint sp;
MonoSeqPointInfo *info;
ip = MONO_CONTEXT_GET_IP (ctx);
* The ip points to the instruction causing the single step event, which is before
* the offset recorded in the seq point map, so find the next seq point after ip.
*/
- sp = find_next_seq_point_for_native_offset (domain, method, (guint8*)ip - (guint8*)ji->code_start, &info);
- if (!sp)
+ if (!find_next_seq_point_for_native_offset (domain, method, (guint8*)ip - (guint8*)ji->code_start, &info, &sp))
return;
- il_offset = sp->il_offset;
- if (!ss_update (ss_req, ji, sp, tls, ctx))
+ il_offset = sp.il_offset;
+
+ if (!ss_update (ss_req, ji, &sp, tls, ctx))
return;
/* Start single stepping again from the current sequence point */
- ss_start (ss_req, method, sp, info, ctx, tls, FALSE);
+ ss_start (ss_req, method, &sp, info, ctx, tls, FALSE);
if ((ss_req->filter & STEP_FILTER_STATIC_CTOR) &&
(method->flags & METHOD_ATTRIBUTE_SPECIAL_NAME) &&
* belong to the same thread as CTX.
*/
static void
- ss_start (SingleStepReq *ss_req, MonoMethod *method, SeqPoint *sp, MonoSeqPointInfo *info, MonoContext *ctx, DebuggerTlsData *tls, gboolean step_to_catch)
+ ss_start (SingleStepReq *ss_req, MonoMethod *method, SeqPoint* sp, MonoSeqPointInfo *info, MonoContext *ctx, DebuggerTlsData *tls, gboolean step_to_catch)
{
int i, j, frame_index;
SeqPoint *next_sp;
+ SeqPoint local_sp;
+ gboolean found_sp;
MonoBreakpoint *bp;
gboolean enable_global = FALSE;
StackFrame *frame = tls->frames [frame_index];
method = frame->method;
- sp = find_prev_seq_point_for_native_offset (frame->domain, frame->method, frame->native_offset, &info);
+ found_sp = find_prev_seq_point_for_native_offset (frame->domain, frame->method, frame->native_offset, &info, &local_sp);
+ sp = (found_sp)? &local_sp : NULL;
frame_index ++;
if (sp && sp->next_len != 0)
break;
StackFrame *frame = tls->frames [frame_index];
method = frame->method;
- sp = find_prev_seq_point_for_native_offset (frame->domain, frame->method, frame->native_offset, &info);
+ found_sp = find_prev_seq_point_for_native_offset (frame->domain, frame->method, frame->native_offset, &info, &local_sp);
+ sp = (found_sp)? &local_sp : NULL;
if (sp && sp->next_len != 0)
break;
sp = NULL;
}
if (sp && sp->next_len > 0) {
- for (i = 0; i < sp->next_len; ++i) {
- next_sp = &info->seq_points [sp->next [i]];
+ SeqPoint* next = g_new(SeqPoint, sp->next_len);
+
+ seq_point_init_next (info, *sp, next);
+ for (i = 0; i < sp->next_len; i++) {
+ next_sp = &next[i];
bp = set_breakpoint (method, next_sp->il_offset, ss_req->req, NULL);
ss_req->bps = g_slist_append (ss_req->bps, bp);
}
+ g_free (next);
}
if (ss_req->depth == STEP_DEPTH_OVER) {
for (j = 0; j < jinfo->num_clauses; ++j) {
MonoJitExceptionInfo *ei = &jinfo->clauses [j];
- sp = find_next_seq_point_for_native_offset (frame->domain, frame->method, (char*)ei->handler_start - (char*)jinfo->code_start, NULL);
+ found_sp = find_next_seq_point_for_native_offset (frame->domain, frame->method, (char*)ei->handler_start - (char*)jinfo->code_start, NULL, &local_sp);
+ sp = (found_sp)? &local_sp : NULL;
if (sp) {
bp = set_breakpoint (frame->method, sp->il_offset, ss_req->req, NULL);
ss_req->bps = g_slist_append (ss_req->bps, bp);
DebuggerTlsData *tls;
MonoSeqPointInfo *info = NULL;
SeqPoint *sp = NULL;
+ SeqPoint local_sp;
+ gboolean found_sp;
MonoMethod *method = NULL;
MonoDebugMethodInfo *minfo;
gboolean step_to_catch = FALSE;
* Find the seq point corresponding to the landing site ip, which is the first seq
* point after ip.
*/
- sp = find_next_seq_point_for_native_offset (frame.domain, frame.method, frame.native_offset, &info);
+ found_sp = find_next_seq_point_for_native_offset (frame.domain, frame.method, frame.native_offset, &info, &local_sp);
+ sp = (found_sp)? &local_sp : NULL;
if (!sp)
no_seq_points_found (frame.method);
g_assert (sp);
if (!method && frame->il_offset != -1) {
/* FIXME: Sort the table and use a binary search */
- sp = find_prev_seq_point_for_native_offset (frame->domain, frame->method, frame->native_offset, &info);
+ found_sp = find_prev_seq_point_for_native_offset (frame->domain, frame->method, frame->native_offset, &info, &local_sp);
+ sp = (found_sp)? &local_sp : NULL;
if (!sp)
no_seq_points_found (frame->method);
g_assert (sp);
MonoMethod *method;
MonoDomain *domain;
MonoSeqPointInfo *seq_points;
- SeqPoint *sp = NULL;
+ SeqPoint sp;
+ gboolean found_sp;
gint64 il_offset;
- int i;
method = decode_methodid (p, &p, end, &domain, &err);
if (err)
if (tls->frame_count == 0 || tls->frames [0]->actual_method != method)
return ERR_INVALID_ARGUMENT;
- seq_points = get_seq_points (domain, method);
- g_assert (seq_points);
+ found_sp = find_seq_point (domain, method, il_offset, &seq_points, &sp);
- for (i = 0; i < seq_points->len; ++i) {
- sp = &seq_points->seq_points [i];
+ g_assert (seq_points);
- if (sp->il_offset == il_offset)
- break;
- }
- if (i == seq_points->len)
+ if (!found_sp)
return ERR_INVALID_ARGUMENT;
// FIXME: Check that the ip change is safe
- DEBUG (1, fprintf (log_file, "[dbg] Setting IP to %s:0x%0x(0x%0x)\n", tls->frames [0]->actual_method->name, (int)sp->il_offset, (int)sp->native_offset));
- MONO_CONTEXT_SET_IP (&tls->restore_ctx, (guint8*)tls->frames [0]->ji->code_start + sp->native_offset);
+ DEBUG (1, fprintf (log_file, "[dbg] Setting IP to %s:0x%0x(0x%0x)\n", tls->frames [0]->actual_method->name, (int)sp.il_offset, (int)sp.native_offset));
+ MONO_CONTEXT_SET_IP (&tls->restore_ctx, (guint8*)tls->frames [0]->ji->code_start + sp.native_offset);
break;
}
default:
mono_metadata_free_mh (header);
break;
}
+ case CMD_STACK_FRAME_GET_DOMAIN: {
+ if (CHECK_PROTOCOL_VERSION (2, 38))
+ buffer_add_domainid (buf, frame->domain);
+ break;
+ }
default:
return ERR_NOT_IMPLEMENTED;
}
static const char* stack_frame_cmds_str[] = {
"GET_VALUES",
"GET_THIS",
- "SET_VALUES"
+ "SET_VALUES",
+ "GET_DOMAIN",
};
static const char* array_cmds_str[] = {
}
#endif
-
/* FIXME: ensure the sc call preserves all but r3 */
#define emit_darwing4_tls(code,dreg,key) do {\
int off1 = 0x48 + key * sizeof (gpointer); \
- if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r11, ppc_r3); \
+ if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r12, ppc_r3); \
ppc_li ((code), ppc_r0, 0x7FF2); \
ppc_sc ((code)); \
ppc_lwz ((code), (dreg), off1, ppc_r3); \
- if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r3, ppc_r11); \
+ if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r3, ppc_r12); \
} while (0);
#ifdef PPC_THREAD_PTR_REG
ppc_ldptr ((code), (dreg), off1, PPC_THREAD_PTR_REG); \
} else { \
int off3 = (off2 + 1) > 1; \
- ppc_addis ((code), ppc_r11, PPC_THREAD_PTR_REG, off3); \
- ppc_ldptr ((code), (dreg), off1, ppc_r11); \
+ ppc_addis ((code), ppc_r12, PPC_THREAD_PTR_REG, off3); \
+ ppc_ldptr ((code), (dreg), off1, ppc_r12); \
} \
} while (0);
#else
ppc_load (code, ppc_r0, shifted);
ppc_mtctr (code, ppc_r0);
- //g_assert (sreg == ppc_r11);
- ppc_addi (code, ppc_r12, dreg, (doffset - sizeof (gpointer)));
- ppc_addi (code, ppc_r11, sreg, (soffset - sizeof (gpointer)));
+ //g_assert (sreg == ppc_r12);
+ ppc_addi (code, ppc_r11, dreg, (doffset - sizeof (gpointer)));
+ ppc_addi (code, ppc_r12, sreg, (soffset - sizeof (gpointer)));
copy_loop_start = code;
- ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r11);
- ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r12);
+ ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r12);
+ ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r11);
copy_loop_jump = code;
ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
ppc_patch (copy_loop_jump, copy_loop_start);
size -= shifted * sizeof (gpointer);
doffset = soffset = 0;
- dreg = ppc_r12;
+ dreg = ppc_r11;
}
#ifdef __mono_ppc64__
/* the hardware has multiple load/store units and the move is long
enough to use more then one regiester, then use load/load/store/store
to execute 2 instructions per cycle. */
- if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r12) && (sreg != ppc_r12)) {
+ if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
while (size >= 16) {
ppc_ldptr (code, ppc_r0, soffset, sreg);
- ppc_ldptr (code, ppc_r12, soffset+8, sreg);
+ ppc_ldptr (code, ppc_r11, soffset+8, sreg);
ppc_stptr (code, ppc_r0, doffset, dreg);
- ppc_stptr (code, ppc_r12, doffset+8, dreg);
+ ppc_stptr (code, ppc_r11, doffset+8, dreg);
size -= 16;
soffset += 16;
doffset += 16;
doffset += 8;
}
#else
- if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r12) && (sreg != ppc_r12)) {
+ if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
while (size >= 8) {
ppc_lwz (code, ppc_r0, soffset, sreg);
- ppc_lwz (code, ppc_r12, soffset+4, sreg);
+ ppc_lwz (code, ppc_r11, soffset+4, sreg);
ppc_stw (code, ppc_r0, doffset, dreg);
- ppc_stw (code, ppc_r12, doffset+4, dreg);
+ ppc_stw (code, ppc_r11, doffset+4, dreg);
size -= 8;
soffset += 8;
doffset += 8;
for (i = 14; i < top; ++i) {
/*
* Reserve r29 for holding the vtable address for virtual calls in AOT mode,
- * since the trampolines can clobber r11.
+ * since the trampolines can clobber r12.
*/
if (!(cfg->compile_aot && i == 29))
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
ppc_load_ptr (code, ppc_r3, cfg->method);
ppc_li (code, ppc_r4, 0); /* NULL ebp for now */
- ppc_load_func (code, ppc_r0, func);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, func);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
return code;
}
}
ppc_load_ptr (code, ppc_r3, cfg->method);
- ppc_load_func (code, ppc_r0, func);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, func);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
switch (save_mode) {
static void
patch_ins (guint8 *code, guint32 ins)
{
- *(guint32*)code = GUINT32_TO_BE (ins);
+ *(guint32*)code = ins;
mono_arch_flush_icache (code, 4);
}
void
ppc_patch_full (guchar *code, const guchar *target, gboolean is_fd)
{
- guint32 ins = GUINT32_FROM_BE (*(guint32*)code);
+ guint32 ins = *(guint32*)code;
guint32 prim = ins >> 26;
guint32 ovf;
else
code -= 24;
} else {
- if (ppc_is_load_op (seq [5]) || ppc_opcode (seq [5]) == 31) /* ld || lwz || mr */
+ if (ppc_is_load_op (seq [5])
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ /* With function descs we need to do more careful
+ matches. */
+ || ppc_opcode (seq [5]) == 31 /* ld || lwz || mr */
+#endif
+ )
branch_ins = seq + 8;
else
branch_ins = seq + 6;
if (!is_fd) {
guint8 *buf = (guint8*)&seq [5];
- ppc_mr (buf, ppc_r0, ppc_r11);
+ ppc_mr (buf, PPC_CALL_REG, ppc_r12);
ppc_nop (buf);
}
} else {
}
/* FIXME: make this thread safe */
- /* FIXME: we're assuming we're using r11 here */
- ppc_load_ptr_sequence (code, ppc_r11, target);
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ /* FIXME: we're assuming we're using r12 here */
+ ppc_load_ptr_sequence (code, ppc_r12, target);
+#else
+ ppc_load_ptr_sequence (code, PPC_CALL_REG, target);
+#endif
mono_arch_flush_icache ((guint8*)seq, 28);
#else
guint32 *seq;
g_assert ((seq [2] >> 26) == 31);
g_assert (seq [3] == 0x4e800021 || seq [3] == 0x4e800020 || seq [3] == 0x4e800420);
/* FIXME: make this thread safe */
- ppc_lis (code, ppc_r0, (guint32)(target) >> 16);
- ppc_ori (code, ppc_r0, ppc_r0, (guint32)(target) & 0xffff);
+ ppc_lis (code, PPC_CALL_REG, (guint32)(target) >> 16);
+ ppc_ori (code, PPC_CALL_REG, PPC_CALL_REG, (guint32)(target) & 0xffff);
mono_arch_flush_icache (code - 8, 8);
#endif
} else {
if (ppc_is_imm16 (-size)) {
ppc_stptr_update (code, ppc_r0, -size, ppc_sp);
} else {
- ppc_load (code, ppc_r11, -size);
- ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r11);
+ ppc_load (code, ppc_r12, -size);
+ ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
}
return code;
if (ppc_is_imm16 (size)) {
ppc_stptr_update (code, ppc_r0, size, ppc_sp);
} else {
- ppc_load (code, ppc_r11, size);
- ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r11);
+ ppc_load (code, ppc_r12, size);
+ ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
}
return code;
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
+ case OP_IL_SEQ_POINT:
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
+ break;
case OP_SEQ_POINT: {
int i;
* a breakpoint is hit will step to the next IL offset.
*/
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
- ppc_load (code, ppc_r11, (gsize)ss_trigger_page);
- ppc_ldptr (code, ppc_r11, 0, ppc_r11);
+ ppc_load (code, ppc_r12, (gsize)ss_trigger_page);
+ ppc_ldptr (code, ppc_r12, 0, ppc_r12);
}
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
ppc_stb (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
ppc_sth (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
ppc_stptr (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_break");
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
*/
g_assert (!cfg->method->save_lmf);
/*
- * Note: we can use ppc_r11 here because it is dead anyway:
+ * Note: we can use ppc_r12 here because it is dead anyway:
* we're leaving the method.
*/
if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
if (ppc_is_imm16 (ret_offset)) {
ppc_ldptr (code, ppc_r0, ret_offset, cfg->frame_reg);
} else {
- ppc_load (code, ppc_r11, ret_offset);
- ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r11);
+ ppc_load (code, ppc_r12, ret_offset);
+ ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
}
ppc_mtlr (code, ppc_r0);
}
if (ppc_is_imm16 (cfg->stack_usage)) {
- ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage);
+ ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
} else {
/* cfg->stack_usage is an int, so we can use
* an addis/addi sequence here even in 64-bit. */
- ppc_addis (code, ppc_r11, cfg->frame_reg, ppc_ha(cfg->stack_usage));
- ppc_addi (code, ppc_r11, ppc_r11, cfg->stack_usage);
+ ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
+ ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
}
if (!cfg->method->save_lmf) {
pos = 0;
for (i = 31; i >= 13; --i) {
if (cfg->used_int_regs & (1 << i)) {
pos += sizeof (gpointer);
- ppc_ldptr (code, i, -pos, ppc_r11);
+ ppc_ldptr (code, i, -pos, ppc_r12);
}
}
} else {
/* Copy arguments on the stack to our argument area */
if (call->stack_usage) {
- code = emit_memcpy (code, call->stack_usage, ppc_r11, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET);
- /* r11 was clobbered */
+ code = emit_memcpy (code, call->stack_usage, ppc_r12, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET);
+ /* r12 was clobbered */
g_assert (cfg->frame_reg == ppc_sp);
if (ppc_is_imm16 (cfg->stack_usage)) {
- ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage);
+ ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
} else {
/* cfg->stack_usage is an int, so we can use
* an addis/addi sequence here even in 64-bit. */
- ppc_addis (code, ppc_r11, cfg->frame_reg, ppc_ha(cfg->stack_usage));
- ppc_addi (code, ppc_r11, ppc_r11, cfg->stack_usage);
+ ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
+ ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
}
}
- ppc_mr (code, ppc_sp, ppc_r11);
+ ppc_mr (code, ppc_sp, ppc_r12);
mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
if (cfg->compile_aot) {
/* arch_emit_got_access () patches this */
ppc_load32 (code, ppc_r0, 0);
#ifdef PPC_USES_FUNCTION_DESCRIPTOR
- ppc_ldptr_indexed (code, ppc_r11, ppc_r30, ppc_r0);
- ppc_ldptr (code, ppc_r0, 0, ppc_r11);
+ ppc_ldptr_indexed (code, ppc_r12, ppc_r30, ppc_r0);
+ ppc_ldptr (code, ppc_r0, 0, ppc_r12);
#else
ppc_ldptr_indexed (code, ppc_r0, ppc_r30, ppc_r0);
#endif
else
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
- if (cfg->compile_aot && ins->sreg1 == ppc_r11) {
+ if (cfg->compile_aot && ins->sreg1 == ppc_r12) {
/* The trampolines clobber this */
ppc_mr (code, ppc_r29, ins->sreg1);
ppc_ldptr (code, ppc_r0, ins->inst_offset, ppc_r29);
int alloca_waste = PPC_STACK_PARAM_OFFSET + cfg->param_area + 31;
int area_offset = alloca_waste;
area_offset &= ~31;
- ppc_addi (code, ppc_r11, ins->sreg1, alloca_waste + 31);
+ ppc_addi (code, ppc_r12, ins->sreg1, alloca_waste + 31);
/* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */
- ppc_clear_right_imm (code, ppc_r11, ppc_r11, 4);
+ ppc_clear_right_imm (code, ppc_r12, ppc_r12, 4);
/* use ctr to store the number of words to 0 if needed */
if (ins->flags & MONO_INST_INIT) {
/* we zero 4 bytes at a time:
ppc_mtctr (code, ppc_r0);
}
ppc_ldptr (code, ppc_r0, 0, ppc_sp);
- ppc_neg (code, ppc_r11, ppc_r11);
- ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r11);
+ ppc_neg (code, ppc_r12, ppc_r12);
+ ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
/* FIXME: make this loop work in 8 byte
increments on PPC64 */
* run at least once
*/
ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 8));
- ppc_li (code, ppc_r11, 0);
+ ppc_li (code, ppc_r12, 0);
zero_loop_start = code;
- ppc_stwu (code, ppc_r11, 4, ins->dreg);
+ ppc_stwu (code, ppc_r12, 4, ins->dreg);
zero_loop_jump = code;
ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
ppc_patch (zero_loop_jump, zero_loop_start);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
if (ppc_is_imm16 (spvar->inst_offset)) {
ppc_stptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
} else {
- ppc_load (code, ppc_r11, spvar->inst_offset);
- ppc_stptr_indexed (code, ppc_r0, ppc_r11, spvar->inst_basereg);
+ ppc_load (code, ppc_r12, spvar->inst_offset);
+ ppc_stptr_indexed (code, ppc_r0, ppc_r12, spvar->inst_basereg);
}
break;
}
if (ppc_is_imm16 (spvar->inst_offset)) {
ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
} else {
- ppc_load (code, ppc_r11, spvar->inst_offset);
- ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, spvar->inst_offset);
+ ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r12);
}
ppc_mtlr (code, ppc_r0);
ppc_blr (code);
/* FIXME: Optimize this */
ppc_bl (code, 1);
- ppc_mflr (code, ppc_r11);
+ ppc_mflr (code, ppc_r12);
ppc_b (code, 3);
*(double*)code = *(double*)ins->inst_p0;
code += 8;
- ppc_lfd (code, ins->dreg, 8, ppc_r11);
+ ppc_lfd (code, ins->dreg, 8, ppc_r12);
break;
case OP_R4CONST:
g_assert_not_reached ();
ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
ppc_lfd (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_lfdx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
ppc_stfs (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
ppc_lfs (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_lfsx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
}
#ifdef __mono_ppc64__
+#ifdef _LITTLE_ENDIAN
+#define patch_load_sequence(ip,val) do {\
+ guint16 *__load = (guint16*)(ip); \
+ g_assert (sizeof (val) == sizeof (gsize)); \
+ __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
+ __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
+ __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
+ __load [8] = ((guint64)(gsize)(val)) & 0xffff; \
+ } while (0)
+#elif defined _BIG_ENDIAN
#define patch_load_sequence(ip,val) do {\
guint16 *__load = (guint16*)(ip); \
g_assert (sizeof (val) == sizeof (gsize)); \
__load [9] = ((guint64)(gsize)(val)) & 0xffff; \
} while (0)
#else
+#error huh? No endianess defined by compiler
+#endif
+#else
#define patch_load_sequence(ip,val) do {\
guint16 *__lis_ori = (guint16*)(ip); \
__lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \
code = save_registers (cfg, code, alloc_size - pos, ppc_sp, method->save_lmf, cfg->used_int_regs, cfa_offset);
} else {
if (pos)
- ppc_addi (code, ppc_r11, ppc_sp, -pos);
+ ppc_addi (code, ppc_r12, ppc_sp, -pos);
ppc_load (code, ppc_r0, -alloc_size);
ppc_str_update_indexed (code, ppc_sp, ppc_sp, ppc_r0);
cfa_offset = alloc_size;
mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
- code = save_registers (cfg, code, 0, ppc_r11, method->save_lmf, cfg->used_int_regs, cfa_offset);
+ code = save_registers (cfg, code, 0, ppc_r12, method->save_lmf, cfg->used_int_regs, cfa_offset);
}
}
if (cfg->frame_reg != ppc_sp) {
if (ppc_is_imm16 (inst->inst_offset)) {
ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stptr_indexed (code, ainfo->reg, ppc_r11, inst->inst_basereg);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stptr_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
}
}
else if (ainfo->regtype == RegTypeFP)
ppc_fmr (code, inst->dreg, ainfo->reg);
else if (ainfo->regtype == RegTypeBase) {
- ppc_ldr (code, ppc_r11, 0, ppc_sp);
- ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r11);
+ ppc_ldr (code, ppc_r12, 0, ppc_sp);
+ ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r12);
} else
g_assert_not_reached ();
ppc_stb (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
}
}
break;
ppc_sth (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
}
}
break;
ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
}
}
break;
if (ppc_is_imm16 (inst->inst_offset)) {
ppc_str (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_str_indexed (code, ainfo->reg, ppc_r11, inst->inst_basereg);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_str_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
}
break;
#else
ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
ppc_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg);
} else {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_addi (code, ppc_r11, ppc_r11, inst->inst_offset);
- ppc_stw (code, ainfo->reg, 0, ppc_r11);
- ppc_stw (code, ainfo->reg + 1, 4, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
+ ppc_stw (code, ainfo->reg, 0, ppc_r12);
+ ppc_stw (code, ainfo->reg + 1, 4, ppc_r12);
}
break;
#endif
ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r12);
}
}
break;
}
} else if (ainfo->regtype == RegTypeBase) {
g_assert (ppc_is_imm16 (ainfo->offset));
- /* load the previous stack pointer in r11 */
- ppc_ldr (code, ppc_r11, 0, ppc_sp);
- ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r11);
+ /* load the previous stack pointer in r12 */
+ ppc_ldr (code, ppc_r12, 0, ppc_sp);
+ ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r12);
switch (ainfo->size) {
case 1:
if (ppc_is_imm16 (inst->inst_offset)) {
ppc_stb (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r12);
}
}
break;
ppc_sth (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r12);
}
}
break;
ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r12);
}
}
break;
if (ppc_is_imm16 (inst->inst_offset)) {
ppc_str (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_str_indexed (code, ppc_r0, ppc_r11, inst->inst_basereg);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_str_indexed (code, ppc_r0, ppc_r12, inst->inst_basereg);
}
break;
#else
g_assert (ppc_is_imm16 (ainfo->offset + 4));
if (ppc_is_imm16 (inst->inst_offset + 4)) {
ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
- ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r11);
+ ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r12);
ppc_stw (code, ppc_r0, inst->inst_offset + 4, inst->inst_basereg);
} else {
- /* use r12 to load the 2nd half of the long before we clobber r11. */
- ppc_lwz (code, ppc_r12, ainfo->offset + 4, ppc_r11);
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_addi (code, ppc_r11, ppc_r11, inst->inst_offset);
- ppc_stw (code, ppc_r0, 0, ppc_r11);
- ppc_stw (code, ppc_r12, 4, ppc_r11);
+ /* use r11 to load the 2nd half of the long before we clobber r12. */
+ ppc_lwz (code, ppc_r11, ainfo->offset + 4, ppc_r12);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
+ ppc_stw (code, ppc_r0, 0, ppc_r12);
+ ppc_stw (code, ppc_r11, 4, ppc_r12);
}
break;
#endif
ppc_stptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r12);
}
}
break;
/* FIXME: we need to do the shifting here, too */
if (ainfo->bytes)
NOT_IMPLEMENTED;
- /* load the previous stack pointer in r11 (r0 gets overwritten by the memcpy) */
- ppc_ldr (code, ppc_r11, 0, ppc_sp);
+ /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */
+ ppc_ldr (code, ppc_r12, 0, ppc_sp);
if ((size & MONO_PPC_32_64_CASE (3, 7)) != 0) {
code = emit_memcpy (code, size - soffset,
inst->inst_basereg, doffset,
- ppc_r11, ainfo->offset + soffset);
+ ppc_r12, ainfo->offset + soffset);
} else {
code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer),
inst->inst_basereg, doffset,
- ppc_r11, ainfo->offset + soffset);
+ ppc_r12, ainfo->offset + soffset);
}
}
} else if (ainfo->regtype == RegTypeStructByAddr) {
/* if it was originally a RegTypeBase */
if (ainfo->offset) {
- /* load the previous stack pointer in r11 */
- ppc_ldr (code, ppc_r11, 0, ppc_sp);
- ppc_ldptr (code, ppc_r11, ainfo->offset, ppc_r11);
+ /* load the previous stack pointer in r12 */
+ ppc_ldr (code, ppc_r12, 0, ppc_sp);
+ ppc_ldptr (code, ppc_r12, ainfo->offset, ppc_r12);
} else {
- ppc_mr (code, ppc_r11, ainfo->reg);
+ ppc_mr (code, ppc_r12, ainfo->reg);
}
if (cfg->tailcall_valuetype_addrs) {
MonoInst *addr = cfg->tailcall_valuetype_addrs [tailcall_struct_index];
g_assert (ppc_is_imm16 (addr->inst_offset));
- ppc_stptr (code, ppc_r11, addr->inst_offset, addr->inst_basereg);
+ ppc_stptr (code, ppc_r12, addr->inst_offset, addr->inst_basereg);
tailcall_struct_index++;
}
g_assert (ppc_is_imm16 (inst->inst_offset));
- code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r11, 0);
+ code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r12, 0);
/*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
} else
g_assert_not_reached ();
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_get_lmf_addr");
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
/* lmf_offset is the offset from the previous stack pointer,
* alloc_size is the total stack space allocated, so the offset
* of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
- * The pointer to the struct is put in ppc_r11 (new_lmf).
+ * The pointer to the struct is put in ppc_r12 (new_lmf).
* The callee-saved registers are already in the MonoLMF structure
*/
- ppc_addi (code, ppc_r11, ppc_sp, alloc_size - lmf_offset);
+ ppc_addi (code, ppc_r12, ppc_sp, alloc_size - lmf_offset);
/* ppc_r3 is the result from mono_get_lmf_addr () */
- ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
+ ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
/* new_lmf->previous_lmf = *lmf_addr */
ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
- ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
- /* *(lmf_addr) = r11 */
- ppc_stptr (code, ppc_r11, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
+ /* *(lmf_addr) = r12 */
+ ppc_stptr (code, ppc_r12, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
/* save method info */
if (cfg->compile_aot)
// FIXME:
ppc_load (code, ppc_r0, 0);
else
ppc_load_ptr (code, ppc_r0, method);
- ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r11);
- ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r11);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r12);
+ ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r12);
/* save the current IP */
if (cfg->compile_aot) {
ppc_bl (code, 1);
ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L);
#endif
}
- ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r11);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r12);
}
if (tracing)
lmf_offset = pos;
/* save the frame reg in r8 */
ppc_mr (code, ppc_r8, cfg->frame_reg);
- ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage - lmf_offset);
+ ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage - lmf_offset);
/* r5 = previous_lmf */
- ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
+ ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
/* r6 = lmf_addr */
- ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
+ ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
/* *(lmf_addr) = previous_lmf */
ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6);
/* FIXME: speedup: there is no actual need to restore the registers if
* we didn't actually change them (idea from Zoltan).
*/
/* restore iregs */
- ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r11);
+ ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r12);
/* restore fregs */
/*for (i = 14; i < 32; i++) {
- ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r11);
+ ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12);
}*/
g_assert (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET));
/* use the saved copy of the frame reg in r8 */
if (ppc_is_imm16 (return_offset)) {
ppc_ldr (code, ppc_r0, return_offset, cfg->frame_reg);
} else {
- ppc_load (code, ppc_r11, return_offset);
- ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r11);
+ ppc_load (code, ppc_r12, return_offset);
+ ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
}
ppc_mtlr (code, ppc_r0);
}
offset -= sizeof (mgreg_t);
}
if (cfg->frame_reg != ppc_sp)
- ppc_mr (code, ppc_r11, cfg->frame_reg);
+ ppc_mr (code, ppc_r12, cfg->frame_reg);
/* note r31 (possibly the frame register) is restored last */
for (i = 13; i <= 31; i++) {
if (cfg->used_int_regs & (1 << i)) {
}
}
if (cfg->frame_reg != ppc_sp)
- ppc_addi (code, ppc_sp, ppc_r11, cfg->stack_usage);
+ ppc_addi (code, ppc_sp, ppc_r12, cfg->stack_usage);
else
ppc_addi (code, ppc_sp, ppc_sp, cfg->stack_usage);
} else {
- ppc_load32 (code, ppc_r11, cfg->stack_usage);
+ ppc_load32 (code, ppc_r12, cfg->stack_usage);
if (cfg->used_int_regs) {
- ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
+ ppc_add (code, ppc_r12, cfg->frame_reg, ppc_r12);
for (i = 31; i >= 13; --i) {
if (cfg->used_int_regs & (1 << i)) {
pos += sizeof (mgreg_t);
- ppc_ldr (code, i, -pos, ppc_r11);
+ ppc_ldr (code, i, -pos, ppc_r12);
}
}
- ppc_mr (code, ppc_sp, ppc_r11);
+ ppc_mr (code, ppc_sp, ppc_r12);
} else {
- ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r11);
+ ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r12);
}
}
patch_info->data.name = "mono_arch_throw_corlib_exception";
patch_info->ip.i = code - cfg->native_code;
if (FORCE_INDIR_CALL || cfg->method->dynamic) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtctr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtctr (code, PPC_CALL_REG);
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
} else {
ppc_bl (code, 0);
start = code;
/*
- * We need to save and restore r11 because it might be
+ * We need to save and restore r12 because it might be
* used by the caller as the vtable register, so
* clobbering it will trip up the magic trampoline.
*
- * FIXME: Get rid of this by making sure that r11 is
+ * FIXME: Get rid of this by making sure that r12 is
* not used as the vtable register in interface calls.
*/
- ppc_stptr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
- ppc_load (code, ppc_r11, (gsize)(& (vtable->vtable [0])));
+ ppc_stptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
+ ppc_load (code, ppc_r12, (gsize)(& (vtable->vtable [0])));
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->has_target_code) {
ppc_load_ptr (code, ppc_r0, item->value.target_code);
} else {
- ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r11);
- ppc_ldptr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
+ ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
+ ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
}
ppc_mtctr (code, ppc_r0);
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
item->jmp_code = code;
ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
#endif
- ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r11);
- ppc_ldptr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
+ ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
+ ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
ppc_mtctr (code, ppc_r0);
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
#if ENABLE_WRONG_METHOD_CHECK
* Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
* TARGET from the mscorlib GOT in full-aot code.
* On PPC, the GOT address is assumed to be in r30, and the result is placed into
- * r11.
+ * r12.
*/
guint8*
mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target)
{
/* Load the mscorlib got address */
- ppc_ldptr (code, ppc_r11, sizeof (gpointer), ppc_r30);
+ ppc_ldptr (code, ppc_r12, sizeof (gpointer), ppc_r30);
*ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
/* arch_emit_got_access () patches this */
ppc_load32 (code, ppc_r0, 0);
- ppc_ldptr_indexed (code, ppc_r11, ppc_r11, ppc_r0);
+ ppc_ldptr_indexed (code, ppc_r12, ppc_r12, ppc_r0);
return code;
}
guint8 *code = ip;
guint8 *orig_code = code;
- ppc_load_sequence (code, ppc_r11, (gsize)bp_trigger_page);
- ppc_ldptr (code, ppc_r11, 0, ppc_r11);
+ ppc_load_sequence (code, ppc_r12, (gsize)bp_trigger_page);
+ ppc_ldptr (code, ppc_r12, 0, ppc_r12);
g_assert (code - orig_code == BREAKPOINT_SIZE);