static gboolean v6_supported = FALSE;
static gboolean v7_supported = FALSE;
static gboolean v7s_supported = FALSE;
+static gboolean v7k_supported = FALSE;
static gboolean thumb_supported = FALSE;
static gboolean thumb2_supported = FALSE;
/*
#define MAX_ARCH_DELEGATE_PARAMS 3
static gpointer
-get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
+get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count)
{
guint8 *code, *start;
+ GSList *unwind_ops = mono_arch_get_cie_program ();
if (has_target) {
start = code = mono_global_codeman_reserve (12);
mono_arch_flush_icache (start, size);
}
+ if (has_target) {
+ *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
+ } else {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
+ *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
+ g_free (name);
+ }
+
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
- if (code_size)
- *code_size = code - start;
return start;
}
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
- guint8 *code;
- guint32 code_len;
+ MonoTrampInfo *info;
int i;
- char *tramp_name;
- code = get_delegate_invoke_impl (TRUE, 0, &code_len);
- res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
+ get_delegate_invoke_impl (&info, TRUE, 0);
+ res = g_slist_prepend (res, info);
for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
- code = get_delegate_invoke_impl (FALSE, i, &code_len);
- tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ get_delegate_invoke_impl (&info, FALSE, i);
+ res = g_slist_prepend (res, info);
}
return res;
return cached;
}
- if (mono_aot_only)
+ if (mono_aot_only) {
start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
- else
- start = get_delegate_invoke_impl (TRUE, 0, NULL);
+ } else {
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, TRUE, 0);
+ mono_tramp_info_register (info, NULL);
+ }
cached = start;
mono_mini_arch_unlock ();
return cached;
start = mono_aot_get_trampoline (name);
g_free (name);
} else {
- start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
+ mono_tramp_info_register (info, NULL);
}
cache [sig->param_count] = start;
mono_mini_arch_unlock ();
#if defined(ENABLE_GSHAREDVT)
mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
#endif
+ mono_aot_register_jit_icall ("mono_arm_unaligned_stack", mono_arm_unaligned_stack);
#if defined(__ARM_EABI__)
eabi_supported = TRUE;
v5_supported = mono_hwcap_arm_is_v5;
v6_supported = mono_hwcap_arm_is_v6;
v7_supported = mono_hwcap_arm_is_v7;
- v7s_supported = mono_hwcap_arm_is_v7s;
#if defined(__APPLE__)
/* iOS is special-cased here because we don't yet
v6_supported = cpu_arch [4] >= '6';
v7_supported = cpu_arch [4] >= '7';
v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
+ v7k_supported = strncmp (cpu_arch, "armv7k", 6) == 0;
}
thumb_supported = strstr (cpu_arch, "thumb") != NULL;
gboolean
mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
{
- if (v7s_supported) {
+ if (v7s_supported || v7k_supported) {
switch (opcode) {
case OP_IDIV:
case OP_IREM:
typedef enum {
RegTypeNone,
+ /* Passed/returned in an ireg */
RegTypeGeneral,
+ /* Passed/returned in a pair of iregs */
RegTypeIRegPair,
+ /* Passed on the stack */
RegTypeBase,
+ /* First word in r3, second word on the stack */
RegTypeBaseGen,
+ /* FP value passed in either an ireg or a vfp reg */
RegTypeFP,
RegTypeStructByVal,
RegTypeStructByAddr,
cinfo->ret.reg = ARMREG_R0;
break;
}
- // FIXME: Only for variable types
- if (mini_is_gsharedvt_type (t)) {
+ if (mini_is_gsharedvt_variable_type (t)) {
cinfo->ret.storage = RegTypeStructByAddr;
break;
}
add_general (&gr, &stack_size, ainfo, TRUE);
break;
}
- if (mini_is_gsharedvt_type (t)) {
+ if (mini_is_gsharedvt_variable_type (t)) {
/* gsharedvt arguments are passed by ref */
g_assert (mini_is_gsharedvt_type (t));
add_general (&gr, &stack_size, ainfo, TRUE);
* - we only pass/receive them in registers in some cases, and only
* in 1 or 2 integer registers.
*/
- if (cinfo->ret.storage == RegTypeStructByAddr) {
+ switch (cinfo->ret.storage) {
+ case RegTypeGeneral:
+ case RegTypeNone:
+ case RegTypeFP:
+ case RegTypeIRegPair:
+ break;
+ case RegTypeStructByAddr:
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
linfo->vret_arg_index = cinfo->vret_arg_index;
- } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
- cfg->exception_message = g_strdup ("unknown ret conv");
+ break;
+ default:
+ cfg->exception_message = g_strdup_printf ("unknown ret conv (%d)", cinfo->ret.storage);
cfg->disable_llvm = TRUE;
return linfo;
}
case RegTypeGeneral:
case RegTypeIRegPair:
case RegTypeBase:
+ case RegTypeBaseGen:
linfo->args [i].storage = LLVMArgInIReg;
break;
case RegTypeStructByVal:
switch (ainfo->storage) {
case RegTypeGeneral:
- break;
case RegTypeIRegPair:
+ case RegTypeBaseGen:
break;
case RegTypeBase:
if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
int slot = -1;
- if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
+ if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal) {
slot = ainfo->reg;
- else if (ainfo->storage == RegTypeBase)
+ } else if (ainfo->storage == RegTypeBase) {
slot = PARAM_REGS + (ainfo->offset / 4);
- else
+ } else if (ainfo->storage == RegTypeBaseGen) {
+ /* slot + 1 is the first stack slot, so the code below will work */
+ slot = 3;
+ } else {
g_assert_not_reached ();
+ }
if (t->byref) {
p->regs [slot] = (mgreg_t)*arg;
/* Free entry */
target_thunk = p;
break;
+ } else if (((guint32*)p) [2] == (guint32)target) {
+ /* Thunk already points to target */
+ target_thunk = p;
+ break;
}
}
}
ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_IDIV:
- g_assert (v7s_supported);
+ g_assert (v7s_supported || v7k_supported);
ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IDIV_UN:
- g_assert (v7s_supported);
+ g_assert (v7s_supported || v7k_supported);
ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IREM:
- g_assert (v7s_supported);
+ g_assert (v7s_supported || v7k_supported);
ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
break;
case OP_IREM_UN:
- g_assert (v7s_supported);
+ g_assert (v7s_supported || v7k_supported);
ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
break;
ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
} else {
code = mono_arm_patchable_b (code, ARMCOND_AL);
+ cfg->thunk_area += THUNK_SIZE;
}
break;
}
}
case OP_LOCALLOC: {
/* round the size to 8 bytes */
- ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
- ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
+ ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1));
+ ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, (MONO_ARCH_FRAME_ALIGNMENT - 1));
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
/* memzero the area: dreg holds the size, sp is the pointer */
if (ins->flags & MONO_INST_INIT) {
}
case OP_START_HANDLER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
int i, rot_amount;
/* Reserve a param area, see filter-stack.exe */
- if (cfg->param_area) {
- if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
+ if (param_area) {
+ if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
- code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
}
}
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
int i, rot_amount;
/* Free the param area */
- if (cfg->param_area) {
- if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
+ if (param_area) {
+ if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
- code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
}
}
case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
int i, rot_amount;
/* Free the param area */
- if (cfg->param_area) {
- if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
+ if (param_area) {
+ if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
- code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
}
ins->backend.pc_offset = code - cfg->native_code;
bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
break;
+ case OP_GC_SAFE_POINT: {
+#if defined (USE_COOP_GC)
+ const char *polling_func = NULL;
+ guint8 *buf [1];
+
+ polling_func = "mono_threads_state_poll";
+ ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0);
+ ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
+ buf [0] = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, polling_func);
+ code = emit_call_seq (cfg, code);
+ arm_patch (buf [0], code);
+#endif
+ break;
+ }
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
/* The signature doesn't matter */
mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
+ mono_register_jit_icall (mono_arm_unaligned_stack, "mono_arm_unaligned_stack", mono_create_icall_signature ("void"), TRUE);
#ifndef MONO_CROSS_COMPILE
if (mono_arm_have_tls_get ()) {
if (mono_arm_have_fast_tls ()) {
mono_register_jit_icall (mono_fast_get_tls_key, "mono_get_tls_key", mono_create_icall_signature ("ptr ptr"), TRUE);
mono_register_jit_icall (mono_fast_set_tls_key, "mono_set_tls_key", mono_create_icall_signature ("void ptr ptr"), TRUE);
+
+ mono_tramp_info_register (
+ mono_tramp_info_create (
+ "mono_get_tls_key",
+ (guint8*)mono_fast_get_tls_key,
+ (guint8*)mono_fast_get_tls_key_end - (guint8*)mono_fast_get_tls_key,
+ NULL,
+ mono_arch_get_cie_program ()
+ ),
+ NULL
+ );
+ mono_tramp_info_register (
+ mono_tramp_info_create (
+ "mono_set_tls_key",
+ (guint8*)mono_fast_set_tls_key,
+ (guint8*)mono_fast_set_tls_key_end - (guint8*)mono_fast_set_tls_key,
+ NULL,
+ mono_arch_get_cie_program ()
+ ),
+ NULL
+ );
} else {
g_warning ("No fast tls on device. Using fallbacks.");
mono_register_jit_icall (mono_fallback_get_tls_key, "mono_get_tls_key", mono_create_icall_signature ("ptr ptr"), TRUE);
}
}
+void
+mono_arm_unaligned_stack (MonoMethod *method)
+{
+ g_assert_not_reached ();
+}
+
#ifndef DISABLE_JIT
/*
* FIXME: Optimize this.
*/
ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
- ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
prev_sp_offset += 8; /* r7 and lr */
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
+ ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
}
if (!method->save_lmf) {
}
/* the stack used in the pushed regs */
- if (prev_sp_offset & 4)
- alloc_size += 4;
+ alloc_size += ALIGN_TO (prev_sp_offset, MONO_ARCH_FRAME_ALIGNMENT) - prev_sp_offset;
cfg->stack_usage = alloc_size;
if (alloc_size) {
if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
+ /* stack alignment check */
+ /*
+ {
+ guint8 *buf [16];
+ ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
+ ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
+ ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
+ buf [0] = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+ if (cfg->compile_aot)
+ ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
+ else
+ code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, "mono_arm_unaligned_stack");
+ code = emit_call_seq (cfg, code);
+ arm_patch (buf [0], code);
+ }
+ */
+
/* store runtime generic context */
if (cfg->rgctx_var) {
MonoInst *ins = cfg->rgctx_var;
*/
code = cfg->native_code + cfg->code_len;
+ /* Save the uwind state which is needed by the out-of-line code */
+ mono_emit_unwind_op_remember_state (cfg, code);
+
if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
}
}
if (method->save_lmf) {
- int lmf_offset, reg, sp_adj, regmask;
+ int lmf_offset, reg, sp_adj, regmask, nused_int_regs = 0;
/* all but r0-r3, sp and pc */
pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
lmf_offset = pos;
regmask &= ~(1 << ARMREG_PC);
/* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
+ for (i = 0; i < 16; i++) {
+ if (regmask & (1 << i))
+ nused_int_regs ++;
+ }
+ mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, ((iphone_abi ? 3 : 0) + nused_int_regs) * 4);
/* restore iregs */
ARM_POP (code, regmask);
if (iphone_abi) {
+ for (i = 0; i < 16; i++) {
+ if (regmask & (1 << i))
+ mono_emit_unwind_op_same_value (cfg, code, i);
+ }
/* Restore saved r7, restore LR to PC */
/* Skip lr from the lmf */
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, 3 * 4);
ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
}
} else {
+ int i, nused_int_regs = 0;
+
+ for (i = 0; i < 16; i++) {
+ if (cfg->used_int_regs & (1 << i))
+ nused_int_regs ++;
+ }
+
if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
} else {
ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
}
+ if (cfg->frame_reg != ARMREG_SP) {
+ mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_SP);
+ }
+
if (iphone_abi) {
/* Restore saved gregs */
- if (cfg->used_int_regs)
+ if (cfg->used_int_regs) {
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, (2 + nused_int_regs) * 4);
ARM_POP (code, cfg->used_int_regs);
+ for (i = 0; i < 16; i++) {
+ if (cfg->used_int_regs & (1 << i))
+ mono_emit_unwind_op_same_value (cfg, code, i);
+ }
+ }
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
/* Restore saved r7, restore LR to PC */
ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
} else {
+ mono_emit_unwind_op_def_cfa_offset (cfg, code, (nused_int_regs + 1) * 4);
ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
}
}
+ /* Restore the unwind state to be the same as before the epilog */
+ mono_emit_unwind_op_restore_state (cfg, code);
+
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
+GSList*
+mono_arch_get_cie_program (void)
+{
+ GSList *l = NULL;
+
+ mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0);
+
+ return l;
+}
+
/* #define ENABLE_WRONG_METHOD_CHECK 1 */
#define BASE_SIZE (6 * 4)
#define BSEARCH_ENTRY_SIZE (4 * 4)
#ifdef ENABLE_WRONG_METHOD_CHECK
char * cond;
#endif
+ GSList *unwind_ops;
size = BASE_SIZE;
#ifdef USE_JUMP_TABLES
code = mono_domain_code_reserve (domain, size);
start = code;
+ unwind_ops = mono_arch_get_cie_program ();
+
#ifdef DEBUG_IMT
g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
for (i = 0; i < count; ++i) {
#ifdef USE_JUMP_TABLES
ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 3 * sizeof (mgreg_t));
#define VTABLE_JTI 0
#define IMT_METHOD_OFFSET 0
#define TARGET_CODE_OFFSET 1
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
set_jumptable_element (jte, VTABLE_JTI, vtable);
#else
- if (large_offsets)
+ if (large_offsets) {
ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
- else
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 4 * sizeof (mgreg_t));
+ } else {
ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (mgreg_t));
+ }
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
vtable_target = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
/* Restore registers */
ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
/* And branch */
ARM_BX (code, ARMREG_R1);
set_jumptable_element (jte, target_code_jti, item->value.target_code);
ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
/* Restore registers and branch */
ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
ARM_BX (code, ARMREG_IP);
#else
vtable_offset_ins = code;
#ifdef USE_JUMP_TABLES
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
ARM_BX (code, ARMREG_IP);
#else
ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
- if (large_offsets)
+ if (large_offsets) {
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (mgreg_t));
ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
+ }
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
#endif
}
code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
/* Restore registers */
ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
/* And branch */
ARM_BX (code, ARMREG_R1);
set_jumptable_element (jte, target_code_jti, fail_tramp);
mono_stats.imt_thunks_size += code - start;
g_assert (DISTANCE (start, code) <= size);
+
+ mono_tramp_info_register (mono_tramp_info_create (NULL, (guint8*)start, DISTANCE (start, code), NULL, unwind_ops), domain);
+
return start;
}
if (strstr (mtriple, "armv7s")) {
v7s_supported = TRUE;
}
+ if (strstr (mtriple, "armv7k")) {
+ v7k_supported = TRUE;
+ }
if (strstr (mtriple, "thumbv7s")) {
v5_supported = TRUE;
v6_supported = TRUE;