ppc_load (code, ppc_r0, shifted);
ppc_mtctr (code, ppc_r0);
- g_assert (sreg == ppc_r11);
+ //g_assert (sreg == ppc_r11);
ppc_addi (code, ppc_r12, dreg, (doffset - sizeof (gpointer)));
ppc_addi (code, ppc_r11, sreg, (soffset - sizeof (gpointer)));
copy_loop_start = code;
* Returns the size of the activation frame.
*/
int
-mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
+mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
#ifdef __mono_ppc64__
NOT_IMPLEMENTED;
}
gpointer
-mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
+mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
{
mgreg_t *r = (mgreg_t*)regs;
void
mono_arch_cpu_init (void)
{
-#ifdef __APPLE__
+}
+
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+#if defined(MONO_CROSS_COMPILE)
+#elif defined(__APPLE__)
int mib [3];
size_t len;
mib [0] = CTL_HW;
#elif defined(G_COMPILER_CODEWARRIOR)
cachelinesize = 32;
cachelineinc = 32;
-#elif defined(MONO_CROSS_COMPILE)
#else
//#error Need a way to get cache line size
#endif
if (mono_cpu_count () > 1)
cpu_hw_caps |= PPC_SMP_CAPABLE;
-}
-
-/*
- * Initialize architecture specific code.
- */
-void
-mono_arch_init (void)
-{
InitializeCriticalSection (&mini_arch_mutex);
ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
* This function returns the optimizations supported on this cpu.
*/
guint32
-mono_arch_cpu_optimizazions (guint32 *exclude_mask)
+mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
guint32 opts = 0;
return opts;
}
+/*
+ * This function test for all SIMD functions supported.
+ *
+ * Returns a bitmask corresponding to all supported versions.
+ *
+ */
+guint32
+mono_arch_cpu_enumerate_simd_versions (void)
+{
+ /* SIMD is currently unimplemented */
+ return 0;
+}
+
#ifdef __mono_ppc64__
#define CASE_PPC32(c)
#define CASE_PPC64(c) case c:
#endif
static CallInfo*
-calculate_sizes (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig)
{
guint i, fr, gr, pstart;
int n = sig->hasthis + sig->param_count;
MonoType *simpletype;
guint32 stack_size = 0;
CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
+ gboolean is_pinvoke = sig->pinvoke;
fr = PPC_FIRST_FPARG_REG;
gr = PPC_FIRST_ARG_REG;
g_error ("Can't trampoline 0x%x", sig->params [i]->type);
}
}
+ cinfo->nargs = n;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
return cinfo;
}
-static void
-allocate_tailcall_valuetype_addrs (MonoCompile *cfg)
+gboolean
+mono_ppc_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
{
-#if !PPC_PASS_STRUCTS_BY_VALUE
- MonoMethodSignature *sig = mono_method_signature (cfg->method);
- int num_structs = 0;
+ CallInfo *c1, *c2;
+ gboolean res;
int i;
- if (!(cfg->flags & MONO_CFG_HAS_TAIL))
- return;
-
- for (i = 0; i < sig->param_count; ++i) {
- MonoType *type = mono_type_get_underlying_type (sig->params [i]);
- if (type->type == MONO_TYPE_VALUETYPE)
- num_structs++;
+ c1 = get_call_info (NULL, caller_sig);
+ c2 = get_call_info (NULL, callee_sig);
+ res = c1->stack_usage >= c2->stack_usage;
+ if (callee_sig->ret && MONO_TYPE_ISSTRUCT (callee_sig->ret))
+ /* An address on the callee's stack is passed as the first argument */
+ res = FALSE;
+ for (i = 0; i < c2->nargs; ++i) {
+ if (c2->args [i].regtype == RegTypeStructByAddr)
+ /* An address on the callee's stack is passed as the argument */
+ res = FALSE;
}
- if (num_structs) {
- cfg->tailcall_valuetype_addrs =
- mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * num_structs);
- for (i = 0; i < num_structs; ++i) {
- cfg->tailcall_valuetype_addrs [i] =
- mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- cfg->tailcall_valuetype_addrs [i]->flags |= MONO_INST_INDIRECT;
- }
- }
-#endif
+ /*
+ if (!mono_debug_count ())
+ res = FALSE;
+ */
+
+ g_free (c1);
+ g_free (c2);
+
+ return res;
}
/*
gint32 *offsets;
guint32 locals_stack_size, locals_stack_align;
- allocate_tailcall_valuetype_addrs (m);
-
m->flags |= MONO_CFG_HAS_SPILLUP;
/* allow room for the vararg method args: void* and long/double */
offset += sizeof(gpointer);
}
- offsets = mono_allocate_stack_slots_full (m, FALSE, &locals_stack_size, &locals_stack_align);
+ offsets = mono_allocate_stack_slots (m, FALSE, &locals_stack_size, &locals_stack_align);
if (locals_stack_align) {
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
if (MONO_TYPE_ISSTRUCT (sig->params [i]) && size < sizeof (gpointer))
size = align = sizeof (gpointer);
+ /*
+ * Use at least 4/8 byte alignment, since these might be passed in registers, and
+ * they are saved using std in the prolog.
+ */
+ align = sizeof (gpointer);
offset += align - 1;
offset &= ~(align - 1);
inst->inst_offset = offset;
m->stack_offset = offset;
if (sig->call_convention == MONO_CALL_VARARG) {
- CallInfo *cinfo = calculate_sizes (m->generic_sharing_context, m->method->signature, m->method->signature->pinvoke);
+ CallInfo *cinfo = get_call_info (m->generic_sharing_context, m->method->signature);
m->sig_cookie = cinfo->sig_cookie.offset;
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = calculate_sizes (cfg->generic_sharing_context, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, sig);
for (i = 0; i < n; ++i) {
ArgInfo *ainfo = cinfo->args + i;
return code;
}
-/*
- * emit_load_volatile_arguments:
- *
- * Load volatile arguments from the stack to the original input registers.
- * Required before a tail call.
- */
-static guint8*
-emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
-{
- MonoMethod *method = cfg->method;
- MonoMethodSignature *sig;
- MonoInst *inst;
- CallInfo *cinfo;
- guint32 i, pos;
- int struct_index = 0;
-
- sig = mono_method_signature (method);
-
- /* This is the opposite of the code in emit_prolog */
-
- pos = 0;
-
- cinfo = calculate_sizes (cfg->generic_sharing_context, sig, sig->pinvoke);
-
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- ArgInfo *ainfo = &cinfo->ret;
- inst = cfg->vret_addr;
- g_assert (ppc_is_imm16 (inst->inst_offset));
- ppc_ldptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
- }
- for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- ArgInfo *ainfo = cinfo->args + i;
- inst = cfg->args [pos];
-
- g_assert (inst->opcode != OP_REGVAR);
- g_assert (ppc_is_imm16 (inst->inst_offset));
-
- switch (ainfo->regtype) {
- case RegTypeGeneral:
- switch (ainfo->size) {
- case 1:
- ppc_lbz (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
- break;
- case 2:
- ppc_lhz (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
- break;
-#ifdef __mono_ppc64__
- case 4:
- ppc_lwz (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
- break;
-#endif
- default:
- ppc_ldptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
- break;
- }
- break;
-
- case RegTypeFP:
- switch (ainfo->size) {
- case 4:
- ppc_lfs (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
- break;
- case 8:
- ppc_lfd (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
- break;
- default:
- g_assert_not_reached ();
- }
- break;
-
- case RegTypeBase: {
- MonoType *type = mini_type_get_underlying_type (cfg->generic_sharing_context,
- &inst->klass->byval_arg);
-
-#ifndef __mono_ppc64__
- if (type->type == MONO_TYPE_I8)
- NOT_IMPLEMENTED;
-#endif
-
- if (MONO_TYPE_IS_REFERENCE (type) || type->type == MONO_TYPE_I8) {
- ppc_ldptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
- ppc_stptr (code, ppc_r0, ainfo->offset, ainfo->reg);
- } else if (type->type == MONO_TYPE_I4) {
- ppc_lwz (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
- ppc_stw (code, ppc_r0, ainfo->offset, ainfo->reg);
- } else {
- NOT_IMPLEMENTED;
- }
-
- break;
- }
-
- case RegTypeStructByVal: {
-#ifdef __APPLE__
- guint32 size = 0;
-#endif
- int j;
-
- /* FIXME: */
- if (ainfo->vtsize)
- NOT_IMPLEMENTED;
-#ifdef __APPLE__
- /*
- * Darwin pinvokes needs some special handling
- * for 1 and 2 byte arguments
- */
- if (method->signature->pinvoke)
- size = mono_class_native_size (inst->klass, NULL);
- if (size == 1 || size == 2) {
- /* FIXME: */
- NOT_IMPLEMENTED;
- } else
-#endif
- for (j = 0; j < ainfo->vtregs; ++j) {
- ppc_ldptr (code, ainfo->reg + j,
- inst->inst_offset + j * sizeof (gpointer),
- inst->inst_basereg);
- /* FIXME: shift to the right */
- if (ainfo->bytes)
- NOT_IMPLEMENTED;
- }
- break;
- }
-
- case RegTypeStructByAddr: {
- MonoInst *addr = cfg->tailcall_valuetype_addrs [struct_index];
-
- g_assert (ppc_is_imm16 (addr->inst_offset));
- g_assert (!ainfo->offset);
- ppc_ldptr (code, ainfo->reg, addr->inst_offset, addr->inst_basereg);
-
- struct_index++;
- break;
- }
-
- default:
- g_assert_not_reached ();
- }
-
- pos ++;
- }
-
- g_free (cinfo);
-
- return code;
-}
-
-/* This must be kept in sync with emit_load_volatile_arguments(). */
static int
ins_native_length (MonoCompile *cfg, MonoInst *ins)
{
- int len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
- MonoMethodSignature *sig;
- MonoCallInst *call;
- CallInfo *cinfo;
- int i;
-
- if (ins->opcode != OP_JMP)
- return len;
-
- call = (MonoCallInst*)ins;
- sig = mono_method_signature (cfg->method);
- cinfo = calculate_sizes (cfg->generic_sharing_context, sig, sig->pinvoke);
-
- if (MONO_TYPE_ISSTRUCT (sig->ret))
- len += 4;
- for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- ArgInfo *ainfo = cinfo->args + i;
-
- switch (ainfo->regtype) {
- case RegTypeGeneral:
- case RegTypeFP:
- len += 4;
- break;
-
- case RegTypeBase:
- len += 8;
- break;
-
- case RegTypeStructByVal:
- len += 4 * ainfo->size;
- break;
-
- case RegTypeStructByAddr:
- len += 4;
- break;
-
- default:
- g_assert_not_reached ();
- }
- }
-
- g_free (cinfo);
-
- return len;
+ return ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
static guint8*
}
break;
case OP_BREAK:
- ppc_break (code);
+ /*
+ * gdb does not like encountering a trap in the debugged code. So
+ * instead of emitting a trap, we emit a call a C function and place a
+ * breakpoint there.
+ */
+ //ppc_break (code);
+ ppc_mr (code, ppc_r3, ins->sreg1);
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"mono_break");
+ if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
+ ppc_load_func (code, ppc_r0, 0);
+ ppc_mtlr (code, ppc_r0);
+ ppc_blrl (code);
+ } else {
+ ppc_bl (code, 0);
+ }
break;
case OP_ADDCC:
case OP_IADDCC:
ppc_sldi (code, ppc_r0, ppc_r0, 32);
#endif
ppc_compare (code, 0, ins->sreg1, ppc_r0);
- EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "ArithmeticException");
+ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
ppc_patch (divisor_is_m1, code);
/* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
*/
case OP_FCONV_TO_R4:
ppc_frsp (code, ins->dreg, ins->sreg1);
break;
- case OP_JMP: {
+ case OP_TAILCALL: {
int i, pos;
-
+ MonoCallInst *call = (MonoCallInst*)ins;
+
/*
* Keep in sync with mono_arch_emit_epilog
*/
ppc_mtlr (code, ppc_r0);
}
- code = emit_load_volatile_arguments (cfg, code);
-
if (ppc_is_imm16 (cfg->stack_usage)) {
ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage);
} else {
ppc_addi (code, ppc_r11, ppc_r11, cfg->stack_usage);
}
if (!cfg->method->save_lmf) {
- /*for (i = 31; i >= 14; --i) {
- if (cfg->used_float_regs & (1 << i)) {
- pos += sizeof (double);
- ppc_lfd (code, i, -pos, cfg->frame_reg);
- }
- }*/
pos = 0;
for (i = 31; i >= 13; --i) {
if (cfg->used_int_regs & (1 << i)) {
} else {
/* FIXME restore from MonoLMF: though this can't happen yet */
}
+
+ /* Copy arguments on the stack to our argument area */
+ if (call->stack_usage) {
+ code = emit_memcpy (code, call->stack_usage, ppc_r11, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET);
+ /* r11 was clobbered */
+ g_assert (cfg->frame_reg == ppc_sp);
+ if (ppc_is_imm16 (cfg->stack_usage)) {
+ ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage);
+ } else {
+ /* cfg->stack_usage is an int, so we can use
+ * an addis/addi sequence here even in 64-bit. */
+ ppc_addis (code, ppc_r11, cfg->frame_reg, ppc_ha(cfg->stack_usage));
+ ppc_addi (code, ppc_r11, ppc_r11, cfg->stack_usage);
+ }
+ }
+
ppc_mr (code, ppc_sp, ppc_r11);
mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
if (cfg->compile_aot) {
#ifndef DISABLE_JIT
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
+mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
{
MonoJumpInfo *patch_info;
gboolean compile_aot = !run_cctors;
tracing = 1;
sig = mono_method_signature (method);
- cfg->code_size = MONO_PPC_32_64_CASE (260, 384) + sig->param_count * 20;
+ cfg->code_size = 512 + sig->param_count * 32;
code = cfg->native_code = g_malloc (cfg->code_size);
cfa_offset = 0;
/* load arguments allocated to register from the stack */
pos = 0;
- cinfo = calculate_sizes (cfg->generic_sharing_context, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, sig);
if (MONO_TYPE_ISSTRUCT (sig->ret)) {
ArgInfo *ainfo = &cinfo->ret;
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stb (code, ainfo->reg, ppc_r11, inst->inst_offset);
+ ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r11, inst->inst_offset);
ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_sth (code, ainfo->reg, ppc_r11, inst->inst_offset);
+ ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r11, inst->inst_offset);
ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stw (code, ainfo->reg, ppc_r11, inst->inst_offset);
+ ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r11, inst->inst_offset);
ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stptr (code, ainfo->reg, ppc_r11, inst->inst_offset);
+ ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r11, inst->inst_offset);
ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r11);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stb (code, ppc_r0, ppc_r11, inst->inst_offset);
+ ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r11, inst->inst_offset);
ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r11);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_sth (code, ppc_r0, ppc_r11, inst->inst_offset);
+ ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r11, inst->inst_offset);
ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r11);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stw (code, ppc_r0, ppc_r11, inst->inst_offset);
+ ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r11, inst->inst_offset);
ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r11);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stptr (code, ppc_r0, ppc_r11, inst->inst_offset);
+ ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r11, inst->inst_offset);
ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r11);
pos++;
}
- if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
- if (cfg->compile_aot)
- /* AOT code is only used in the root domain */
- ppc_load_ptr (code, ppc_r3, 0);
- else
- ppc_load_ptr (code, ppc_r3, cfg->domain);
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
- if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
- ppc_blrl (code);
- } else {
- ppc_bl (code, 0);
- }
- }
-
if (method->save_lmf) {
if (lmf_pthread_key != -1) {
emit_tls_access (code, ppc_r3, lmf_pthread_key);
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
- mono_jit_stats.code_reallocs++;
+ cfg->stat_code_reallocs++;
}
/*
return MONO_EXC_NULL_REF;
if (strcmp (name, "ArrayTypeMismatchException") == 0)
return MONO_EXC_ARRAY_TYPE_MISMATCH;
+ if (strcmp (name, "ArgumentException") == 0)
+ return MONO_EXC_ARGUMENT;
g_error ("Unknown intrinsic exception %s\n", name);
return 0;
}
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
- mono_jit_stats.code_reallocs++;
+ cfg->stat_code_reallocs++;
}
code = cfg->native_code + cfg->code_len;
mono_domain_get_tls_offset returning -1) then use keyed access. */
if (monodomain_key == -1) {
ptk = mono_domain_get_tls_key ();
- if (ptk < 1024) {
- ptk = mono_pthread_key_for_tls (ptk);
- if (ptk < 1024) {
- monodomain_key = ptk;
- }
- }
+ if (ptk < 1024)
+ monodomain_key = ptk;
}
if ((lmf_pthread_key == -1) && (tls_mode == TLS_MODE_NPTL)) {
lmf_pthread_key = mono_get_lmf_addr_tls_offset();
}
+
+#if 0
/* if not TLS_MODE_NPTL or local dynamic (as indicated by
mono_get_lmf_addr_tls_offset returning -1) then use keyed access. */
if (lmf_pthread_key == -1) {
- ptk = mono_pthread_key_for_tls (mono_jit_tls_id);
+ ptk = mono_jit_tls_id;
if (ptk < 1024) {
/*g_print ("MonoLMF at: %d\n", ptk);*/
/*if (!try_offset_access (mono_get_lmf_addr (), ptk)) {
}
}
#endif
+
+#endif
}
void
-mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
+mono_arch_finish_init (void)
{
setup_tls_access ();
}
return ins;
}
-gpointer
+mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
if (reg == ppc_r1)
- return MONO_CONTEXT_GET_SP (ctx);
+ return (mgreg_t)MONO_CONTEXT_GET_SP (ctx);
g_assert (reg >= ppc_r13);
- return (gpointer)(gsize)ctx->regs [reg - ppc_r13];
+ return ctx->regs [reg - ppc_r13];
}
guint32
return FALSE;
}
-/*
- * mono_arch_get_ip_for_breakpoint:
- *
- * See mini-amd64.c for docs.
- */
-guint8*
-mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
-{
- guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
-
- /* ip points at the ldptr instruction */
- ip -= PPC_LOAD_SEQUENCE_LENGTH;
-
- return ip;
-}
-
/*
* mono_arch_skip_breakpoint:
*
* See mini-amd64.c for docs.
*/
void
-mono_arch_skip_breakpoint (MonoContext *ctx)
+mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
/* skip the ldptr */
MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
return FALSE;
}
-/*
- * mono_arch_get_ip_for_single_step:
- *
- * See mini-amd64.c for docs.
- */
-guint8*
-mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
-{
- guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
-
- /* ip points after the ldptr instruction */
- return ip;
-}
-
/*
* mono_arch_skip_single_step:
*