#include "mini.h"
#include "mini-arm.h"
+static guint8* nullified_class_init_trampoline;
+
/*
* Return the instruction to jump from code to target, 0 if not
* reachable with a single instruction
/*
* mono_arch_get_unbox_trampoline:
+ * @gsctx: the generic sharing context
* @m: method pointer
* @addr: pointer to native code for @m
*
* unboxing before calling the method
*/
gpointer
-mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
+mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
{
guint8 *code, *start;
int this_pos = 0;
MonoDomain *domain = mono_domain_get ();
- if (!mono_method_signature (m)->ret->byref && MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
+ if (MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
this_pos = 1;
mono_domain_lock (domain);
}
void
-mono_arch_patch_callsite (guint8 *code_ptr, guint8 *addr)
+mono_arch_patch_callsite (guint8 *method_start, guint8 *code_ptr, guint8 *addr)
{
guint32 *code = (guint32*)code_ptr;
*/
if ((((*code) >> 25) & 7) == 5) {
/*g_print ("direct patching\n");*/
- arm_patch ((char*)code, addr);
- mono_arch_flush_icache ((char*)code, 4);
+ arm_patch ((guint8*)code, addr);
+ mono_arch_flush_icache ((guint8*)code, 4);
return;
}
if ((((*code) >> 20) & 0xFF) == 0x12) {
/*g_print ("patching bx\n");*/
- arm_patch ((char*)code, addr);
- mono_arch_flush_icache ((char*)(code - 2), 4);
+ arm_patch ((guint8*)code, addr);
+ mono_arch_flush_icache ((guint8*)(code - 2), 4);
return;
}
void
mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
{
- guint32 ins = branch_for_target_reachable (code, addr);
+ /* Patch the jump table entry used by the plt entry */
+ guint32 offset = ((guint32*)code)[3];
+ guint8 *jump_entry = code + offset + 12;
- if (ins)
- /* Patch the branch */
- ((guint32*)code) [0] = ins;
- else
- /* Patch the jump address */
- ((guint32*)code) [1] = addr;
- mono_arch_flush_icache ((char*)code, 4);
+ *(guint8**)jump_entry = addr;
}
void
mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
{
- return;
+ mono_arch_patch_callsite (NULL, code, nullified_class_init_trampoline);
}
void
mono_arch_nullify_plt_entry (guint8 *code)
{
- guint8 buf [4];
- guint8 *p;
-
- p = buf;
- ARM_MOV_REG_REG (p, ARMREG_PC, ARMREG_LR);
+ if (mono_aot_only && !nullified_class_init_trampoline)
+ nullified_class_init_trampoline = mono_aot_get_named_code ("nullified_class_init_trampoline");
- ((guint32*)code) [0] = ((guint32*)buf) [0];
- mono_arch_flush_icache ((char*)code, 4);
+ mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
}
-
/* Stack size for trampoline function
*/
#define STACK (sizeof (MonoLMF))
/* Jump-specific trampoline code fragment size */
#define JUMP_TRAMPOLINE_SIZE 64
-#define GEN_TRAMP_SIZE 148
+#define GEN_TRAMP_SIZE 192
/*
* Stack frame description when the generic trampoline is called.
*/
guchar*
mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
+{
+ MonoJumpInfo *ji;
+ guint32 code_size;
+
+ return mono_arch_create_trampoline_code_full (tramp_type, &code_size, &ji, FALSE);
+}
+
+guchar*
+mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
guint8 *buf, *code = NULL;
- int i, offset;
guint8 *load_get_lmf_addr, *load_trampoline;
gpointer *constants;
+ *ji = NULL;
+
/* Now we'll create in 'buf' the ARM trampoline code. This
is the trampoline code common to all methods */
*/
#define LR_OFFSET (sizeof (gpointer) * 13)
ARM_MOV_REG_REG (buf, ARMREG_V1, ARMREG_SP);
- ARM_LDR_IMM (buf, ARMREG_V2, ARMREG_LR, 0);
+ if (aot) {
+ /*
+ * The trampoline contains a pc-relative offset to the got slot where the
+ * value is stored. The offset can be found at [lr + 4].
+ */
+ g_assert (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT);
+ ARM_LDR_IMM (buf, ARMREG_V2, ARMREG_LR, 4);
+ ARM_LDR_REG_REG (buf, ARMREG_V2, ARMREG_V2, ARMREG_LR);
+ } else {
+ if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT)
+ ARM_LDR_IMM (buf, ARMREG_V2, ARMREG_LR, 0);
+ else
+ ARM_MOV_REG_REG (buf, ARMREG_V2, MONO_ARCH_VTABLE_REG);
+ }
ARM_LDR_IMM (buf, ARMREG_V3, ARMREG_SP, LR_OFFSET);
/* ok, now we can continue with the MonoLMF setup, mostly untouched
* from emit_prolog in mini-arm.c
- * This is a sinthetized call to mono_get_lmf_addr ()
+ * This is a synthetized call to mono_get_lmf_addr ()
*/
- load_get_lmf_addr = buf;
- buf += 4;
+ if (aot) {
+ *ji = mono_patch_info_list_prepend (*ji, buf - code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
+ ARM_LDR_IMM (buf, ARMREG_R0, ARMREG_PC, 0);
+ ARM_B (buf, 0);
+ *(gpointer*)buf = NULL;
+ buf += 4;
+ ARM_LDR_REG_REG (buf, ARMREG_R0, ARMREG_PC, ARMREG_R0);
+ } else {
+ load_get_lmf_addr = buf;
+ buf += 4;
+ }
ARM_MOV_REG_REG (buf, ARMREG_LR, ARMREG_PC);
ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_R0);
/* *(lmf_addr) = r1 */
ARM_STR_IMM (buf, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* save method info (it's in v2) */
- if ((tramp_type == MONO_TRAMPOLINE_GENERIC) || (tramp_type == MONO_TRAMPOLINE_JUMP))
+ if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP))
ARM_STR_IMM (buf, ARMREG_V2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, method));
ARM_STR_IMM (buf, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
/* save the IP (caller ip) */
*/
ARM_MOV_REG_REG (buf, ARMREG_R2, ARMREG_V2);
- load_trampoline = buf;
- buf += 4;
+ if (aot) {
+ char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
+ *ji = mono_patch_info_list_prepend (*ji, buf - code, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
+ ARM_LDR_IMM (buf, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (buf, 0);
+ *(gpointer*)buf = NULL;
+ buf += 4;
+ ARM_LDR_REG_REG (buf, ARMREG_IP, ARMREG_PC, ARMREG_IP);
+ } else {
+ load_trampoline = buf;
+ buf += 4;
+ }
ARM_MOV_REG_REG (buf, ARMREG_LR, ARMREG_PC);
ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_IP);
*/
ARM_STR_IMM (buf, ARMREG_R0, ARMREG_V1, (ARMREG_R12 * 4));
+ /* Check for thread interruption */
+ /* This is not perf critical code so no need to check the interrupt flag */
+ /*
+ * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
+ */
+ if (aot) {
+ *ji = mono_patch_info_list_prepend (*ji, buf - code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
+ ARM_LDR_IMM (buf, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (buf, 0);
+ *(gpointer*)buf = NULL;
+ buf += 4;
+ ARM_LDR_REG_REG (buf, ARMREG_IP, ARMREG_PC, ARMREG_IP);
+ } else {
+ ARM_LDR_IMM (buf, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (buf, 0);
+ *(gpointer*)buf = mono_thread_force_interruption_checkpoint;
+ buf += 4;
+ }
+ ARM_MOV_REG_REG (buf, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_IP);
+
/*
* Now we restore the MonoLMF (see emit_epilogue in mini-arm.c)
* and the rest of the registers, so the method called will see
*/
ARM_ADD_REG_IMM8 (buf, ARMREG_SP, ARMREG_SP, sizeof (MonoLMF) - sizeof (guint) * 14);
ARM_POP_NWB (buf, 0x5fff);
+ if (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
+ ARM_MOV_REG_REG (buf, ARMREG_R0, ARMREG_IP);
/* do we need to set sp? */
ARM_ADD_REG_IMM8 (buf, ARMREG_SP, ARMREG_SP, (14 * 4));
- if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT)
+ if ((tramp_type == MONO_TRAMPOLINE_CLASS_INIT) || (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) || (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH))
ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_LR);
else
ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_IP);
constants = (gpointer*)buf;
constants [0] = mono_get_lmf_addr;
- constants [1] = mono_get_trampoline_func (tramp_type);
+ constants [1] = (gpointer)mono_get_trampoline_func (tramp_type);
- /* backpatch by emitting the missing instructions skipped above */
- ARM_LDR_IMM (load_get_lmf_addr, ARMREG_R0, ARMREG_PC, (buf - load_get_lmf_addr - 8));
- ARM_LDR_IMM (load_trampoline, ARMREG_IP, ARMREG_PC, (buf + 4 - load_trampoline - 8));
+ if (!aot) {
+ /* backpatch by emitting the missing instructions skipped above */
+ ARM_LDR_IMM (load_get_lmf_addr, ARMREG_R0, ARMREG_PC, (buf - load_get_lmf_addr - 8));
+ ARM_LDR_IMM (load_trampoline, ARMREG_IP, ARMREG_PC, (buf + 4 - load_trampoline - 8));
+ }
buf += 8;
/* Sanity check */
g_assert ((buf - code) <= GEN_TRAMP_SIZE);
+ *code_size = buf - code;
+
+ if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
+ guint32 code_len;
+
+ /* Initialize the nullified class init trampoline used in the AOT case */
+ nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
+ }
+
+ return code;
+}
+
+gpointer
+mono_arch_get_nullified_class_init_trampoline (guint32 *code_len)
+{
+ guint8 *buf, *code;
+
+ code = buf = mono_global_codeman_reserve (16);
+
+ ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_LR);
+
+ mono_arch_flush_icache (code, buf - code);
+
+ *code_len = buf - code;
+
return code;
}
return code;
}
-/*
- * This method is only called when running in the Mono Debugger.
- */
+#define arm_is_imm12(v) ((int)(v) > -4096 && (int)(v) < 4096)
+
gpointer
-mono_debugger_create_notification_function (void)
+mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
{
- guint8 *ptr, *buf;
+ guint8 *tramp;
+ guint8 *code, *buf;
+ int tramp_size;
+ guint32 code_len;
+ guint8 **rgctx_null_jumps;
+ int depth, index;
+ int i, njumps;
+ gboolean mrgctx;
+
+ mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
+ index = MONO_RGCTX_SLOT_INDEX (slot);
+ if (mrgctx)
+ index += sizeof (MonoMethodRuntimeGenericContext) / sizeof (gpointer);
+ for (depth = 0; ; ++depth) {
+ int size = mono_class_rgctx_get_array_size (depth, mrgctx);
+
+ if (index < size - 1)
+ break;
+ index -= size - 1;
+ }
- ptr = buf = mono_global_codeman_reserve (8);
- //FIXME: ARM_SWI (buf, 0x9F0001);
- ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_LR);
- mono_arch_flush_icache (ptr, buf - ptr);
+ tramp_size = 64 + 16 * depth;
+
+ code = buf = mono_global_codeman_reserve (tramp_size);
+
+ rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
+ njumps = 0;
+
+ /* The vtable/mrgctx is in R0 */
+ g_assert (MONO_ARCH_VTABLE_REG == ARMREG_R0);
+
+ if (mrgctx) {
+ /* get mrgctx ptr */
+ ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
+ } else {
+ /* load rgctx ptr from vtable */
+ g_assert (arm_is_imm12 (G_STRUCT_OFFSET (MonoVTable, runtime_generic_context)));
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
+ /* is the rgctx ptr null? */
+ ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
+ /* if yes, jump to actual trampoline */
+ rgctx_null_jumps [njumps ++] = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+ }
+
+ for (i = 0; i < depth; ++i) {
+ /* load ptr to next array */
+ if (mrgctx && i == 0) {
+ g_assert (arm_is_imm12 (sizeof (MonoMethodRuntimeGenericContext)));
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, sizeof (MonoMethodRuntimeGenericContext));
+ } else {
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, 0);
+ }
+ /* is the ptr null? */
+ ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
+ /* if yes, jump to actual trampoline */
+ rgctx_null_jumps [njumps ++] = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+ }
+
+ /* fetch slot */
+ code = mono_arm_emit_load_imm (code, ARMREG_R2, sizeof (gpointer) * (index + 1));
+ ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_R1, ARMREG_R2);
+ /* is the slot null? */
+ ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
+ /* if yes, jump to actual trampoline */
+ rgctx_null_jumps [njumps ++] = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+ /* otherwise return, result is in R1 */
+ ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_R1);
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_LR);
- return ptr;
+ g_assert (njumps <= depth + 2);
+ for (i = 0; i < njumps; ++i)
+ arm_patch (rgctx_null_jumps [i], code);
+
+ g_free (rgctx_null_jumps);
+
+ /* Slowpath */
+
+ /* The vtable/mrgctx is still in R0 */
+
+ tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), &code_len);
+
+ /* Jump to the actual trampoline */
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_R1);
+ *(guint32*)code = tramp;
+ code += 4;
+
+ mono_arch_flush_icache (buf, code - buf);
+
+ g_assert (code - buf <= tramp_size);
+
+ return buf;
}
+#define arm_is_imm8(v) ((v) > -256 && (v) < 256)
+
+gpointer
+mono_arch_create_generic_class_init_trampoline (void)
+{
+ guint8 *tramp;
+ guint8 *code, *buf;
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ guint8 *jump;
+ int tramp_size;
+ guint32 code_len, imm8;
+ gint rot_amount;
+
+ tramp_size = 64;
+
+ code = buf = mono_global_codeman_reserve (tramp_size);
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+
+ g_assert (arm_is_imm8 (byte_offset));
+ ARM_LDRSB_IMM (code, ARMREG_IP, MONO_ARCH_VTABLE_REG, byte_offset);
+ imm8 = mono_arm_is_rotated_imm8 (bitmask, &rot_amount);
+ g_assert (imm8 >= 0);
+ ARM_AND_REG_IMM (code, ARMREG_IP, ARMREG_IP, imm8, rot_amount);
+ ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
+ jump = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+
+ /* Initialized case */
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_LR);
+
+ /* Uninitialized case */
+ arm_patch (jump, code);
+
+ tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), &code_len);
+
+ /* Jump to the actual trampoline */
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_R1);
+ *(guint32*)code = tramp;
+ code += 4;
+
+ mono_arch_flush_icache (buf, code - buf);
+
+ g_assert (code - buf <= tramp_size);
+
+ return buf;
+}