/* Stack size for trampoline function
*/
-#define STACK ALIGN_TO (sizeof (MonoLMF), 8)
+#define STACK ALIGN_TO (sizeof (MonoLMF), MONO_ARCH_FRAME_ALIGNMENT)
/* Method-specific trampoline code fragment size */
#define METHOD_TRAMPOLINE_SIZE 64
/* The offset where lr was saved inside the regsave area */
lr_offset = 13 * sizeof (mgreg_t);
- // FIXME: Finish the unwind info, the current info allows us to unwind
- // when the trampoline is not in the epilog
-
// CFA = SP + (num registers pushed) * 4
cfa_offset = 14 * sizeof (mgreg_t);
mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, cfa_offset);
* preceeding the got slot where the value is stored. The offset can be
* found at [lr + 0].
*/
+ /* See if emit_trampolines () in aot-compiler.c for the '2' */
if (aot == 2) {
ARM_MOV_REG_REG (code, ARMREG_V2, ARMREG_R1);
} else {
}
ARM_LDR_IMM (code, ARMREG_V3, ARMREG_SP, lr_offset);
+ /* we build the MonoLMF structure on the stack - see mini-arm.h
+ * The pointer to the struct is put in r1.
+ * the iregs array is already allocated on the stack by push.
+ */
+ code = mono_arm_emit_load_imm (code, ARMREG_R2, STACK - regsave_size);
+ ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R2);
+ cfa_offset += STACK - regsave_size;
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
+ /* V1 == lmf */
+ code = mono_arm_emit_load_imm (code, ARMREG_R2, STACK - sizeof (MonoLMF));
+ ARM_ADD_REG_REG (code, ARMREG_V1, ARMREG_SP, ARMREG_R2);
+
/* ok, now we can continue with the MonoLMF setup, mostly untouched
* from emit_prolog in mini-arm.c
* This is a synthetized call to mono_get_lmf_addr ()
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
code = emit_bx (code, ARMREG_R0);
- /* we build the MonoLMF structure on the stack - see mini-arm.h
- * The pointer to the struct is put in r1.
- * the iregs array is already allocated on the stack by push.
- */
- code = mono_arm_emit_load_imm (code, ARMREG_R2, STACK - regsave_size);
- ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R2);
- cfa_offset += STACK - regsave_size;
- mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
- /* V1 == lmf */
- code = mono_arm_emit_load_imm (code, ARMREG_R2, STACK - sizeof (MonoLMF));
- ARM_ADD_REG_REG (code, ARMREG_V1, ARMREG_SP, ARMREG_R2);
-
/*
* The stack now looks like:
* <saved regs>
* Note that IP has been conveniently set to the method addr.
*/
ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, STACK - regsave_size);
+ cfa_offset -= STACK - regsave_size;
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
ARM_POP_NWB (code, 0x5fff);
+ mono_add_unwind_op_same_value (unwind_ops, code, buf, ARMREG_LR);
if (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_IP);
ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, regsave_size);
+ cfa_offset -= regsave_size;
+ g_assert (cfa_offset == 0);
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type))
code = emit_bx (code, ARMREG_LR);
else
{
guint8 *code, *start;
MonoDomain *domain = mono_domain_get ();
+ GSList *unwind_ops;
#ifdef USE_JUMP_TABLES
gpointer *jte;
guint32 size = 20;
start = code = mono_domain_code_reserve (domain, size);
+ unwind_ops = mono_arch_get_cie_program ();
+
#ifdef USE_JUMP_TABLES
jte = mono_jumptable_add_entry ();
code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
/*g_print ("unbox trampoline at %d for %s:%s\n", this_pos, m->klass->name, m->name);
g_print ("unbox code is at %p for method at %p\n", start, addr);*/
+ mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
+
return start;
}
mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
{
guint8 *code, *start;
+ GSList *unwind_ops;
#ifdef USE_JUMP_TABLES
int buf_len = 20;
gpointer *jte;
start = code = mono_domain_code_reserve (domain, buf_len);
+ unwind_ops = mono_arch_get_cie_program ();
+
#ifdef USE_JUMP_TABLES
jte = mono_jumptable_add_entries (2);
code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
+ mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
+
return start;
}
code = buf = mono_global_codeman_reserve (tramp_size);
- mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, 0);
+ unwind_ops = mono_arch_get_cie_program ();
rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
njumps = 0;
code = buf = mono_global_codeman_reserve (tramp_size);
- mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, 0);
+ unwind_ops = mono_arch_get_cie_program ();
// FIXME: Currently, we always go to the slow path.
/* Load trampoline addr */
code = buf = mono_global_codeman_reserve (tramp_size);
+ unwind_ops = mono_arch_get_cie_program ();
+
tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD, NULL, NULL);
/*
mono_arch_flush_icache (buf, code - buf);
mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
+ mono_tramp_info_register (mono_tramp_info_create (NULL, buf, code - buf, NULL, NULL), domain);
+
return buf;
}