#include "mini.h"
#include "mini-arm.h"
+static guint8* nullified_class_init_trampoline;
+
+/*
+ * Return the instruction to jump from code to target, 0 if not
+ * reachable with a single instruction
+ */
+static guint32
+branch_for_target_reachable (guint8 *branch, guint8 *target)
+{
+ gint diff = target - branch - 8;
+ g_assert ((diff & 3) == 0);
+ if (diff >= 0) {
+ if (diff <= 33554431)
+ return (ARMCOND_AL << ARMCOND_SHIFT) | (ARM_BR_TAG) | (diff >> 2);
+ } else {
+ /* diff between 0 and -33554432 */
+ if (diff >= -33554432)
+ return (ARMCOND_AL << ARMCOND_SHIFT) | (ARM_BR_TAG) | ((diff >> 2) & ~0xff000000);
+ }
+ return 0;
+}
+
/*
- * get_unbox_trampoline:
+ * mono_arch_get_unbox_trampoline:
+ * @gsctx: the generic sharing context
* @m: method pointer
* @addr: pointer to native code for @m
*
* this argument. This method returns a pointer to a trampoline which does
* unboxing before calling the method
*/
-static gpointer
-get_unbox_trampoline (MonoMethod *m, gpointer addr)
+gpointer
+mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
{
guint8 *code, *start;
int this_pos = 0;
MonoDomain *domain = mono_domain_get ();
- if (!mono_method_signature (m)->ret->byref && MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
+ if (MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
this_pos = 1;
mono_domain_lock (domain);
return start;
}
-/* Stack size for trampoline function
- */
-#define STACK (sizeof (MonoLMF))
-
-/* Method-specific trampoline code fragment size */
-#define METHOD_TRAMPOLINE_SIZE 64
-
-/* Jump-specific trampoline code fragment size */
-#define JUMP_TRAMPOLINE_SIZE 64
-
-/**
- * arm_magic_trampoline:
- * @code: pointer into caller code
- * @method: the method to translate
- * @sp: stack pointer
- *
- * This method is called by the function 'arch_create_jit_trampoline', which in
- * turn is called by the trampoline functions for virtual methods.
- * After having called the JIT compiler to compile the method, it inspects the
- * caller code to find the address of the method-specific part of the
- * trampoline vtable slot for this method, updates it with a fragment that calls
- * the newly compiled code and returns this address of the compiled code to
- * 'arch_create_jit_trampoline'
- */
-static gpointer
-arm_magic_trampoline (MonoMethod *method, guint32 *code, gchar **sp)
+void
+mono_arch_patch_callsite (guint8 *method_start, guint8 *code_ptr, guint8 *addr)
{
- char *o = NULL;
- gpointer addr;
- MonoJitInfo *ji, *target_ji;
- int reg, offset = 0;
-
- addr = mono_compile_method (method);
- /*g_print ("method code at %p for %s:%s\n", addr, method->klass->name, method->name);*/
- g_assert(addr);
+ guint32 *code = (guint32*)code_ptr;
- if (!code) {
- return addr;
- }
-
- /* We can't trampoline across domains */
- ji = mono_jit_info_table_find (mono_domain_get (), code);
- target_ji = mono_jit_info_table_find (mono_domain_get (), addr);
- if (!mono_method_same_domain (ji, target_ji))
- return addr;
-
- /* Locate the address of the method-specific trampoline. The call using
- the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
- looks something like this:
-
- ldr rA, rX, #offset
- mov lr, pc
- mov pc, rA
- or better:
- mov lr, pc
- ldr pc, rX, #offset
-
- The call sequence could be also:
- ldr ip, pc, 0
- b skip
- function pointer literal
- skip:
- mov lr, pc
- mov pc, ip
- Note that on ARM5+ we can use one instruction instead of the last two.
- Therefore, we need to locate the 'ldr rA' instruction to know which
- register was used to hold the method addrs.
- */
-
/* This is the 'bl' or the 'mov pc' instruction */
--code;
*/
if ((((*code) >> 25) & 7) == 5) {
/*g_print ("direct patching\n");*/
- arm_patch ((char*)code, addr);
- mono_arch_flush_icache ((char*)code, 4);
- return addr;
+ arm_patch ((guint8*)code, addr);
+ mono_arch_flush_icache ((guint8*)code, 4);
+ return;
}
- /* ldr pc, rX, #offset */
-#define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
-#define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
- if ((*code & LDR_MASK) == LDR_PC_VAL) {
- reg = (*code >> 16 ) & 0xf;
- offset = *code & 0xfff;
- /*g_print ("found vcall at r%d + %d\n", reg, offset);*/
- o = sp [reg];
+ if ((((*code) >> 20) & 0xFF) == 0x12) {
+ /*g_print ("patching bx\n");*/
+ arm_patch ((guint8*)code, addr);
+ mono_arch_flush_icache ((guint8*)(code - 2), 4);
+ return;
}
- /* this is not done for non-virtual calls, because in that case
- we won't have an object, but the actual pointer to the
- valuetype as the this argument
- */
- if (method->klass->valuetype && !mono_aot_is_got_entry (code, o))
- addr = get_unbox_trampoline (method, addr);
+ g_assert_not_reached ();
+}
- if (o) {
- o += offset;
- if (mono_aot_is_got_entry (code, o) || mono_domain_owns_vtable_slot (mono_domain_get (), o))
- *((gpointer *)o) = addr;
- } else {
- /*g_print ("no callsite patching\n");
- mono_disassemble_code (code -3, 16, "callsite");*/
- }
+void
+mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
+{
+ /* Patch the jump table entry used by the plt entry */
+ guint32 offset = ((guint32*)code)[3];
+ guint8 *jump_entry = code + offset + 12;
- return addr;
+ *(guint8**)jump_entry = addr;
}
-static void
-arm_class_init_trampoline (void *vtable, guint32 *code, char *sp)
+void
+mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
{
- mono_runtime_class_init (vtable);
+ mono_arch_patch_callsite (NULL, code, nullified_class_init_trampoline);
+}
-#if 0
- /* This is the 'bl' instruction */
- --code;
-
- if ((((*code) >> 25) & 7) == 5) {
- ARM_NOP (code); /* nop */
- mono_arch_flush_icache (code, 4);
- return;
- } else {
- g_assert_not_reached ();
- }
-#endif
+void
+mono_arch_nullify_plt_entry (guint8 *code)
+{
+ if (mono_aot_only && !nullified_class_init_trampoline)
+ nullified_class_init_trampoline = mono_aot_get_named_code ("nullified_class_init_trampoline");
+
+ mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
}
+/* Stack size for trampoline function
+ */
+#define STACK (sizeof (MonoLMF))
+
+/* Method-specific trampoline code fragment size */
+#define METHOD_TRAMPOLINE_SIZE 64
+
+/* Jump-specific trampoline code fragment size */
+#define JUMP_TRAMPOLINE_SIZE 64
+
+#define GEN_TRAMP_SIZE 192
+
/*
* Stack frame description when the generic trampoline is called.
* caller frame
*/
guchar*
mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
+{
+ MonoJumpInfo *ji;
+ guint32 code_size;
+
+ return mono_arch_create_trampoline_code_full (tramp_type, &code_size, &ji, FALSE);
+}
+
+guchar*
+mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
guint8 *buf, *code = NULL;
- int i, offset;
guint8 *load_get_lmf_addr, *load_trampoline;
gpointer *constants;
+ *ji = NULL;
+
/* Now we'll create in 'buf' the ARM trampoline code. This
is the trampoline code common to all methods */
- code = buf = g_malloc (148);
+ code = buf = mono_global_codeman_reserve (GEN_TRAMP_SIZE);
/*
- * At this point r0 has the method and sp points to the saved
- * regs on the stack (all but PC and SP).
+ * At this point lr points to the specific arg and sp points to the saved
+ * regs on the stack (all but PC and SP). The original LR value has been
+ * saved as sp + LR_OFFSET by the push in the specific trampoline
*/
+#define LR_OFFSET (sizeof (gpointer) * 13)
ARM_MOV_REG_REG (buf, ARMREG_V1, ARMREG_SP);
- ARM_MOV_REG_REG (buf, ARMREG_V2, ARMREG_R0);
- ARM_MOV_REG_REG (buf, ARMREG_V3, ARMREG_LR);
+ if (aot) {
+ /*
+ * The trampoline contains a pc-relative offset to the got slot where the
+ * value is stored. The offset can be found at [lr + 4].
+ */
+ g_assert (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT);
+ ARM_LDR_IMM (buf, ARMREG_V2, ARMREG_LR, 4);
+ ARM_LDR_REG_REG (buf, ARMREG_V2, ARMREG_V2, ARMREG_LR);
+ } else {
+ if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT)
+ ARM_LDR_IMM (buf, ARMREG_V2, ARMREG_LR, 0);
+ else
+ ARM_MOV_REG_REG (buf, ARMREG_V2, MONO_ARCH_VTABLE_REG);
+ }
+ ARM_LDR_IMM (buf, ARMREG_V3, ARMREG_SP, LR_OFFSET);
/* ok, now we can continue with the MonoLMF setup, mostly untouched
* from emit_prolog in mini-arm.c
- * This is a sinthetized call to mono_get_lmf_addr ()
+ * This is a synthetized call to mono_get_lmf_addr ()
*/
- load_get_lmf_addr = buf;
- buf += 4;
+ if (aot) {
+ *ji = mono_patch_info_list_prepend (*ji, buf - code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
+ ARM_LDR_IMM (buf, ARMREG_R0, ARMREG_PC, 0);
+ ARM_B (buf, 0);
+ *(gpointer*)buf = NULL;
+ buf += 4;
+ ARM_LDR_REG_REG (buf, ARMREG_R0, ARMREG_PC, ARMREG_R0);
+ } else {
+ load_get_lmf_addr = buf;
+ buf += 4;
+ }
ARM_MOV_REG_REG (buf, ARMREG_LR, ARMREG_PC);
ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_R0);
/* *(lmf_addr) = r1 */
ARM_STR_IMM (buf, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* save method info (it's in v2) */
- ARM_STR_IMM (buf, ARMREG_V2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, method));
+ if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP))
+ ARM_STR_IMM (buf, ARMREG_V2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, method));
ARM_STR_IMM (buf, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
/* save the IP (caller ip) */
if (tramp_type == MONO_TRAMPOLINE_JUMP) {
ARM_STR_IMM (buf, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
/*
- * Now we're ready to call arm_magic_trampoline ().
+ * Now we're ready to call xxx_trampoline ().
*/
- /* Arg 1: MonoMethod *method. It was put in v2 */
- ARM_MOV_REG_REG (buf, ARMREG_R0, ARMREG_V2);
+ /* Arg 1: the saved registers. It was put in v1 */
+ ARM_MOV_REG_REG (buf, ARMREG_R0, ARMREG_V1);
/* Arg 2: code (next address to the instruction that called us) */
if (tramp_type == MONO_TRAMPOLINE_JUMP) {
ARM_MOV_REG_REG (buf, ARMREG_R1, ARMREG_V3);
}
- /* Arg 3: stack pointer so that the magic trampoline can access the
- * registers we saved above
+ /* Arg 3: the specific argument, stored in v2
*/
- ARM_MOV_REG_REG (buf, ARMREG_R2, ARMREG_V1);
-
- load_trampoline = buf;
- buf += 4;
+ ARM_MOV_REG_REG (buf, ARMREG_R2, ARMREG_V2);
+
+ if (aot) {
+ char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
+ *ji = mono_patch_info_list_prepend (*ji, buf - code, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
+ ARM_LDR_IMM (buf, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (buf, 0);
+ *(gpointer*)buf = NULL;
+ buf += 4;
+ ARM_LDR_REG_REG (buf, ARMREG_IP, ARMREG_PC, ARMREG_IP);
+ } else {
+ load_trampoline = buf;
+ buf += 4;
+ }
ARM_MOV_REG_REG (buf, ARMREG_LR, ARMREG_PC);
ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_IP);
*/
ARM_STR_IMM (buf, ARMREG_R0, ARMREG_V1, (ARMREG_R12 * 4));
+ /* Check for thread interruption */
+ /* This is not perf critical code so no need to check the interrupt flag */
+ /*
+ * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
+ */
+ if (aot) {
+ *ji = mono_patch_info_list_prepend (*ji, buf - code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
+ ARM_LDR_IMM (buf, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (buf, 0);
+ *(gpointer*)buf = NULL;
+ buf += 4;
+ ARM_LDR_REG_REG (buf, ARMREG_IP, ARMREG_PC, ARMREG_IP);
+ } else {
+ ARM_LDR_IMM (buf, ARMREG_IP, ARMREG_PC, 0);
+ ARM_B (buf, 0);
+ *(gpointer*)buf = mono_thread_force_interruption_checkpoint;
+ buf += 4;
+ }
+ ARM_MOV_REG_REG (buf, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_IP);
+
/*
* Now we restore the MonoLMF (see emit_epilogue in mini-arm.c)
* and the rest of the registers, so the method called will see
*/
ARM_ADD_REG_IMM8 (buf, ARMREG_SP, ARMREG_SP, sizeof (MonoLMF) - sizeof (guint) * 14);
ARM_POP_NWB (buf, 0x5fff);
+ if (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
+ ARM_MOV_REG_REG (buf, ARMREG_R0, ARMREG_IP);
/* do we need to set sp? */
ARM_ADD_REG_IMM8 (buf, ARMREG_SP, ARMREG_SP, (14 * 4));
- ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_IP);
+ if ((tramp_type == MONO_TRAMPOLINE_CLASS_INIT) || (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) || (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH))
+ ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_LR);
+ else
+ ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_IP);
constants = (gpointer*)buf;
constants [0] = mono_get_lmf_addr;
- if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
- constants [1] = arm_class_init_trampoline;
- } else {
- constants [1] = arm_magic_trampoline;
- }
+ constants [1] = (gpointer)mono_get_trampoline_func (tramp_type);
- /* backpatch by emitting the missing instructions skipped above */
- ARM_LDR_IMM (load_get_lmf_addr, ARMREG_R0, ARMREG_PC, (buf - load_get_lmf_addr - 8));
- ARM_LDR_IMM (load_trampoline, ARMREG_IP, ARMREG_PC, (buf + 4 - load_trampoline - 8));
+ if (!aot) {
+ /* backpatch by emitting the missing instructions skipped above */
+ ARM_LDR_IMM (load_get_lmf_addr, ARMREG_R0, ARMREG_PC, (buf - load_get_lmf_addr - 8));
+ ARM_LDR_IMM (load_trampoline, ARMREG_IP, ARMREG_PC, (buf + 4 - load_trampoline - 8));
+ }
buf += 8;
mono_arch_flush_icache (code, buf - code);
/* Sanity check */
- g_assert ((buf - code) <= 512);
+ g_assert ((buf - code) <= GEN_TRAMP_SIZE);
+
+ *code_size = buf - code;
+
+ if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
+ guint32 code_len;
+
+ /* Initialize the nullified class init trampoline used in the AOT case */
+ nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
+ }
return code;
}
-static MonoJitInfo*
-create_specific_tramp (MonoMethod *method, guint8* tramp, MonoDomain *domain) {
- guint8 *code, *buf;
- MonoJitInfo *ji;
+gpointer
+mono_arch_get_nullified_class_init_trampoline (guint32 *code_len)
+{
+ guint8 *buf, *code;
+
+ code = buf = mono_global_codeman_reserve (16);
+
+ ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_LR);
+
+ mono_arch_flush_icache (code, buf - code);
+
+ *code_len = buf - code;
+
+ return code;
+}
+
+#define SPEC_TRAMP_SIZE 24
+
+gpointer
+mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
+{
+ guint8 *code, *buf, *tramp;
gpointer *constants;
+ guint32 short_branch, size = SPEC_TRAMP_SIZE;
+
+ tramp = mono_get_trampoline_code (tramp_type);
mono_domain_lock (domain);
- code = buf = mono_code_manager_reserve (domain->code_mp, 24);
+ code = buf = mono_code_manager_reserve_align (domain->code_mp, size, 4);
+ if ((short_branch = branch_for_target_reachable (code + 8, tramp))) {
+ size = 12;
+ mono_code_manager_commit (domain->code_mp, code, SPEC_TRAMP_SIZE, size);
+ }
mono_domain_unlock (domain);
/* we could reduce this to 12 bytes if tramp is within reach:
*/
/* We save all the registers, except PC and SP */
ARM_PUSH (buf, 0x5fff);
- ARM_LDR_IMM (buf, ARMREG_R0, ARMREG_PC, 4); /* method is the only arg */
- ARM_LDR_IMM (buf, ARMREG_R1, ARMREG_PC, 4); /* temp reg */
- ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_R1);
-
- constants = (gpointer*)buf;
- constants [0] = method;
- constants [1] = tramp;
- buf += 8;
+ if (short_branch) {
+ constants = (gpointer*)buf;
+ constants [0] = GUINT_TO_POINTER (short_branch | (1 << 24));
+ constants [1] = arg1;
+ buf += 8;
+ } else {
+ ARM_LDR_IMM (buf, ARMREG_R1, ARMREG_PC, 8); /* temp reg */
+ ARM_MOV_REG_REG (buf, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_R1);
+
+ constants = (gpointer*)buf;
+ constants [0] = arg1;
+ constants [1] = tramp;
+ buf += 8;
+ }
/* Flush instruction cache, since we've generated code */
mono_arch_flush_icache (code, buf - code);
- g_assert ((buf - code) <= 24);
-
- ji = g_new0 (MonoJitInfo, 1);
- ji->method = method;
- ji->code_start = code;
- ji->code_size = buf - code;
+ g_assert ((buf - code) <= size);
- mono_jit_stats.method_trampolines++;
+ if (code_len)
+ *code_len = buf - code;
- return ji;
+ return code;
}
-MonoJitInfo*
-mono_arch_create_jump_trampoline (MonoMethod *method)
-{
- guint8 *tramp;
- MonoDomain* domain = mono_domain_get ();
-
- tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_JUMP);
- return create_specific_tramp (method, tramp, domain);
-}
+#define arm_is_imm12(v) ((int)(v) > -4096 && (int)(v) < 4096)
-/**
- * arch_create_jit_trampoline:
- * @method: pointer to the method info
- *
- * Creates a trampoline function for virtual methods. If the created
- * code is called it first starts JIT compilation of method,
- * and then calls the newly created method. It also replaces the
- * corresponding vtable entry (see arm_magic_trampoline).
- *
- * A trampoline consists of two parts: a main fragment, shared by all method
- * trampolines, and some code specific to each method, which hard-codes a
- * reference to that method and then calls the main fragment.
- *
- * The main fragment contains a call to 'arm_magic_trampoline', which performs
- * call to the JIT compiler and substitutes the method-specific fragment with
- * some code that directly calls the JIT-compiled method.
- *
- * Returns: a pointer to the newly created code
- */
gpointer
-mono_arch_create_jit_trampoline (MonoMethod *method)
+mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
{
guint8 *tramp;
- MonoJitInfo *ji;
- MonoDomain* domain = mono_domain_get ();
- gpointer code_start;
+ guint8 *code, *buf;
+ int tramp_size;
+ guint32 code_len;
+ guint8 **rgctx_null_jumps;
+ int depth, index;
+ int i, njumps;
+ gboolean mrgctx;
+
+ mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
+ index = MONO_RGCTX_SLOT_INDEX (slot);
+ if (mrgctx)
+ index += sizeof (MonoMethodRuntimeGenericContext) / sizeof (gpointer);
+ for (depth = 0; ; ++depth) {
+ int size = mono_class_rgctx_get_array_size (depth, mrgctx);
+
+ if (index < size - 1)
+ break;
+ index -= size - 1;
+ }
- tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC);
- /* FIXME: should pass the domain down to this function */
- ji = create_specific_tramp (method, tramp, domain);
- code_start = ji->code_start;
- g_free (ji);
+ tramp_size = 64 + 16 * depth;
- return code_start;
-}
+ code = buf = mono_global_codeman_reserve (tramp_size);
-/**
- * mono_arch_create_class_init_trampoline:
- * @vtable: the type to initialize
- *
- * Creates a trampoline function to run a type initializer.
- * If the trampoline is called, it calls mono_runtime_class_init with the
- * given vtable, then patches the caller code so it does not get called any
- * more.
- *
- * Returns: a pointer to the newly created code
- */
-gpointer
-mono_arch_create_class_init_trampoline (MonoVTable *vtable)
-{
- guint8 *code, *buf, *tramp;
- gpointer *constants;
+ rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
+ njumps = 0;
- tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_CLASS_INIT);
+ /* The vtable/mrgctx is in R0 */
+ g_assert (MONO_ARCH_VTABLE_REG == ARMREG_R0);
- mono_domain_lock (vtable->domain);
- code = buf = mono_code_manager_reserve (vtable->domain->code_mp, METHOD_TRAMPOLINE_SIZE);
- mono_domain_unlock (vtable->domain);
+ if (mrgctx) {
+ /* get mrgctx ptr */
+ ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
+ } else {
+ /* load rgctx ptr from vtable */
+ g_assert (arm_is_imm12 (G_STRUCT_OFFSET (MonoVTable, runtime_generic_context)));
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
+ /* is the rgctx ptr null? */
+ ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
+ /* if yes, jump to actual trampoline */
+ rgctx_null_jumps [njumps ++] = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+ }
- ARM_MOV_REG_REG (buf, ARMREG_IP, ARMREG_SP);
- ARM_PUSH (buf, ((1 << ARMREG_IP) | (1 << ARMREG_LR)));
- ARM_MOV_REG_REG (buf, ARMREG_R1, ARMREG_LR);
- ARM_LDR_IMM (buf, ARMREG_R0, ARMREG_PC, 12); /* load vtable */
- ARM_LDR_IMM (buf, ARMREG_R3, ARMREG_PC, 12); /* load the func address */
- /* make the call */
- ARM_MOV_REG_REG (buf, ARMREG_LR, ARMREG_PC);
- ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_R3);
+ for (i = 0; i < depth; ++i) {
+ /* load ptr to next array */
+ if (mrgctx && i == 0) {
+ g_assert (arm_is_imm12 (sizeof (MonoMethodRuntimeGenericContext)));
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, sizeof (MonoMethodRuntimeGenericContext));
+ } else {
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, 0);
+ }
+ /* is the ptr null? */
+ ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
+ /* if yes, jump to actual trampoline */
+ rgctx_null_jumps [njumps ++] = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+ }
- /* restore and return */
- ARM_POP_NWB (buf, ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
- constants = (gpointer*)buf;
- constants [0] = vtable;
- constants [1] = arm_class_init_trampoline;
- buf += 8;
+ /* fetch slot */
+ code = mono_arm_emit_load_imm (code, ARMREG_R2, sizeof (gpointer) * (index + 1));
+ ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_R1, ARMREG_R2);
+ /* is the slot null? */
+ ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
+ /* if yes, jump to actual trampoline */
+ rgctx_null_jumps [njumps ++] = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+ /* otherwise return, result is in R1 */
+ ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_R1);
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_LR);
- /* Flush instruction cache, since we've generated code */
- mono_arch_flush_icache (code, buf - code);
-
- /* Sanity check */
- g_assert ((buf - code) <= METHOD_TRAMPOLINE_SIZE);
+ g_assert (njumps <= depth + 2);
+ for (i = 0; i < njumps; ++i)
+ arm_patch (rgctx_null_jumps [i], code);
- mono_jit_stats.method_trampolines++;
+ g_free (rgctx_null_jumps);
- return code;
+ /* Slowpath */
+
+ /* The vtable/mrgctx is still in R0 */
+
+ tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), &code_len);
+
+ /* Jump to the actual trampoline */
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_R1);
+ *(guint32*)code = tramp;
+ code += 4;
+
+ mono_arch_flush_icache (buf, code - buf);
+
+ g_assert (code - buf <= tramp_size);
+
+ return buf;
}
-/*
- * This method is only called when running in the Mono Debugger.
- */
+#define arm_is_imm8(v) ((v) > -256 && (v) < 256)
+
gpointer
-mono_debugger_create_notification_function (MonoCodeManager *codeman)
+mono_arch_create_generic_class_init_trampoline (void)
{
- guint8 *ptr, *buf;
+ guint8 *tramp;
+ guint8 *code, *buf;
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ guint8 *jump;
+ int tramp_size;
+ guint32 code_len, imm8;
+ gint rot_amount;
- ptr = buf = mono_code_manager_reserve (codeman, 8);
- //FIXME: ARM_SWI (buf, 0x9F0001);
- ARM_MOV_REG_REG (buf, ARMREG_PC, ARMREG_LR);
- mono_arch_flush_icache (ptr, buf - ptr);
+ tramp_size = 64;
- return ptr;
-}
+ code = buf = mono_global_codeman_reserve (tramp_size);
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+ g_assert (arm_is_imm8 (byte_offset));
+ ARM_LDRSB_IMM (code, ARMREG_IP, MONO_ARCH_VTABLE_REG, byte_offset);
+ imm8 = mono_arm_is_rotated_imm8 (bitmask, &rot_amount);
+ g_assert (imm8 >= 0);
+ ARM_AND_REG_IMM (code, ARMREG_IP, ARMREG_IP, imm8, rot_amount);
+ ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
+ jump = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+
+ /* Initialized case */
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_LR);
+
+ /* Uninitialized case */
+ arm_patch (jump, code);
+
+ tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), &code_len);
+
+ /* Jump to the actual trampoline */
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
+ ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_R1);
+ *(guint32*)code = tramp;
+ code += 4;
+
+ mono_arch_flush_icache (buf, code - buf);
+
+ g_assert (code - buf <= tramp_size);
+
+ return buf;
+}