#include <config.h>
#include <glib.h>
+#include <mono/metadata/abi-details.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/tabledefs.h>
#include <mono/arch/arm/arm-codegen.h>
+#include <mono/arch/arm/arm-vfp-codegen.h>
#include "mini.h"
#include "mini-arm.h"
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
-static guint8* nullified_class_init_trampoline;
+#ifdef USE_JUMP_TABLES
+
+static guint16
+decode_imm16 (guint32 insn)
+{
+ return (((insn >> 16) & 0xf) << 12) | (insn & 0xfff);
+}
+
+#define INSN_MASK 0xff00000
+#define MOVW_MASK ((3 << 24) | (0 << 20))
+#define MOVT_MASK ((3 << 24) | (4 << 20))
+
+gpointer*
+mono_arch_jumptable_entry_from_code (guint8 *code)
+{
+ guint32 insn1 = ((guint32*)code) [0];
+ guint32 insn2 = ((guint32*)code) [1];
+
+ if (((insn1 & INSN_MASK) == MOVW_MASK) &&
+ ((insn2 & INSN_MASK) == MOVT_MASK) ) {
+ guint32 imm_lo = decode_imm16 (insn1);
+ guint32 imm_hi = decode_imm16 (insn2);
+ return (gpointer*) GUINT_TO_POINTER (imm_lo | (imm_hi << 16));
+ } else {
+ g_assert_not_reached ();
+ return NULL;
+ }
+}
+
+#undef INSN_MASK
+#undef MOVW_MASK
+#undef MOVT_MASK
+void
+mono_arch_patch_callsite (guint8 *method_start, guint8 *code_ptr, guint8 *addr)
+{
+ gpointer *jte;
+ /*
+ * code_ptr is 4 instructions after MOVW/MOVT used to address
+ * jumptable entry.
+ */
+ jte = mono_jumptable_get_entry (code_ptr - 16);
+ g_assert ( jte != NULL);
+ *jte = addr;
+}
+#else
void
mono_arch_patch_callsite (guint8 *method_start, guint8 *code_ptr, guint8 *addr)
{
g_assert_not_reached ();
}
+#endif
void
mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
void
mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
{
- mono_arch_patch_callsite (NULL, code, nullified_class_init_trampoline);
-}
-
-void
-mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs)
-{
- if (mono_aot_only && !nullified_class_init_trampoline)
- nullified_class_init_trampoline = mono_aot_get_trampoline ("nullified_class_init_trampoline");
-
- mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline);
+ mono_arch_patch_callsite (NULL, code, mini_get_nullified_class_init_trampoline ());
}
#ifndef DISABLE_JIT
#define arm_is_imm12(v) ((int)(v) > -4096 && (int)(v) < 4096)
+#ifndef USE_JUMP_TABLES
/*
* Return the instruction to jump from code to target, 0 if not
* reachable with a single instruction
}
return 0;
}
+#endif
static inline guint8*
emit_bx (guint8* code, int reg)
return code;
}
-/* Stack size for trampoline function
+/* Stack size for trampoline function
*/
#define STACK ALIGN_TO (sizeof (MonoLMF), 8)
guchar*
mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
{
+ char *tramp_name;
guint8 *buf, *code = NULL;
- guint8 *load_get_lmf_addr, *load_trampoline;
+#ifdef USE_JUMP_TABLES
+ gpointer *load_get_lmf_addr = NULL, *load_trampoline = NULL;
+#else
+ guint8 *load_get_lmf_addr = NULL, *load_trampoline = NULL;
gpointer *constants;
- int cfa_offset, lmf_offset, regsave_size, lr_offset;
+#endif
+
+ int cfa_offset, regsave_size, lr_offset;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
int buf_len;
+#ifdef USE_JUMP_TABLES
+ g_assert (!aot);
+#endif
+
/* Now we'll create in 'buf' the ARM trampoline code. This
is the trampoline code common to all methods */
- buf_len = 212;
+ buf_len = 272;
+
+ /* Add space for saving/restoring VFP regs. */
+ if (mono_arm_is_hard_float ())
+ buf_len += 8 * 2;
+
code = buf = mono_global_codeman_reserve (buf_len);
/*
* saved as sp + LR_OFFSET by the push in the specific trampoline
*/
- /* The offset of lmf inside the stack frame */
- lmf_offset = STACK - sizeof (MonoLMF);
/* The size of the area already allocated by the push in the specific trampoline */
regsave_size = 14 * sizeof (mgreg_t);
/* The offset where lr was saved inside the regsave area */
ARM_LDR_REG_REG (code, ARMREG_V2, ARMREG_V2, ARMREG_LR);
}
} else {
- if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT)
+ if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
+ }
else
ARM_MOV_REG_REG (code, ARMREG_V2, MONO_ARCH_VTABLE_REG);
}
code += 4;
ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
} else {
+#ifdef USE_JUMP_TABLES
+ load_get_lmf_addr = mono_jumptable_add_entry ();
+ code = mono_arm_load_jumptable_entry (code, load_get_lmf_addr, ARMREG_R0);
+#else
load_get_lmf_addr = code;
code += 4;
+#endif
}
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
code = emit_bx (code, ARMREG_R0);
* The pointer to the struct is put in r1.
* the iregs array is already allocated on the stack by push.
*/
- ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, STACK - regsave_size);
+ code = mono_arm_emit_load_imm (code, ARMREG_R2, STACK - regsave_size);
+ ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R2);
cfa_offset += STACK - regsave_size;
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
/* V1 == lmf */
- ARM_ADD_REG_IMM8 (code, ARMREG_V1, ARMREG_SP, STACK - sizeof (MonoLMF));
+ code = mono_arm_emit_load_imm (code, ARMREG_R2, STACK - sizeof (MonoLMF));
+ ARM_ADD_REG_REG (code, ARMREG_V1, ARMREG_SP, ARMREG_R2);
/*
* The stack now looks like:
*/
/* r0 is the result from mono_get_lmf_addr () */
- ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
/* new_lmf->previous_lmf = *lmf_addr */
- ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
- ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* *(lmf_addr) = r1 */
- ARM_STR_IMM (code, ARMREG_V1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ ARM_STR_IMM (code, ARMREG_V1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* save method info (it's in v2) */
if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP))
- ARM_STR_IMM (code, ARMREG_V2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, method));
+ ARM_STR_IMM (code, ARMREG_V2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, method));
else {
ARM_MOV_REG_IMM8 (code, ARMREG_R2, 0);
- ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, method));
+ ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, method));
}
/* save caller SP */
- ARM_ADD_REG_IMM8 (code, ARMREG_R2, ARMREG_SP, cfa_offset);
- ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, sp));
+ code = mono_arm_emit_load_imm (code, ARMREG_R2, cfa_offset);
+ ARM_ADD_REG_REG (code, ARMREG_R2, ARMREG_SP, ARMREG_R2);
+ ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, sp));
/* save caller FP */
- ARM_LDR_IMM (code, ARMREG_R2, ARMREG_V1, (G_STRUCT_OFFSET (MonoLMF, iregs) + ARMREG_FP*4));
- ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, fp));
+ ARM_LDR_IMM (code, ARMREG_R2, ARMREG_V1, (MONO_STRUCT_OFFSET (MonoLMF, iregs) + ARMREG_FP*4));
+ ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, fp));
/* save the IP (caller ip) */
if (tramp_type == MONO_TRAMPOLINE_JUMP) {
ARM_MOV_REG_IMM8 (code, ARMREG_R2, 0);
} else {
- ARM_LDR_IMM (code, ARMREG_R2, ARMREG_V1, (G_STRUCT_OFFSET (MonoLMF, iregs) + 13*4));
+ ARM_LDR_IMM (code, ARMREG_R2, ARMREG_V1, (MONO_STRUCT_OFFSET (MonoLMF, iregs) + 13*4));
+ }
+ ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, ip));
+
+ /* Save VFP registers. */
+ if (mono_arm_is_hard_float ()) {
+ /*
+ * Strictly speaking, we don't have to save d0-d7 in the LMF, but
+ * it's easier than attempting to store them on the stack since
+ * this trampoline code is pretty messy.
+ */
+ ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, fregs));
+ ARM_FSTMD (code, ARM_VFP_D0, 8, ARMREG_R0);
}
- ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, ip));
/*
* Now we're ready to call xxx_trampoline ().
*/
/* Arg 1: the saved registers */
- ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, iregs));
+ ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, iregs), 0);
/* Arg 2: code (next address to the instruction that called us) */
if (tramp_type == MONO_TRAMPOLINE_JUMP) {
code += 4;
ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP);
} else {
+#ifdef USE_JUMP_TABLES
+ load_trampoline = mono_jumptable_add_entry ();
+ code = mono_arm_load_jumptable_entry (code, load_trampoline, ARMREG_IP);
+#else
load_trampoline = code;
code += 4;
+#endif
}
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
* clobbered). This way we can just restore all the regs in one inst
* and branch to IP.
*/
- ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, iregs) + (ARMREG_R12 * sizeof (mgreg_t)));
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, iregs) + (ARMREG_R12 * sizeof (mgreg_t)));
/* Check for thread interruption */
/* This is not perf critical code so no need to check the interrupt flag */
code += 4;
ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP);
} else {
+#ifdef USE_JUMP_TABLES
+ gpointer *jte = mono_jumptable_add_entry ();
+ code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
+ jte [0] = mono_thread_force_interruption_checkpoint;
+#else
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = mono_thread_force_interruption_checkpoint;
code += 4;
+#endif
}
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
code = emit_bx (code, ARMREG_IP);
* the same state as before we executed.
*/
/* ip = previous_lmf */
- ARM_LDR_IMM (code, ARMREG_IP, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* lr = lmf_addr */
- ARM_LDR_IMM (code, ARMREG_LR, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
+ ARM_LDR_IMM (code, ARMREG_LR, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
/* *(lmf_addr) = previous_lmf */
- ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
+ ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
+
+ /* Restore VFP registers. */
+ if (mono_arm_is_hard_float ()) {
+ ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, fregs));
+ ARM_FLDMD (code, ARM_VFP_D0, 8, ARMREG_R0);
+ }
/* Non-standard function epilogue. Instead of doing a proper
* return, we just jump to the compiled code.
else
code = emit_bx (code, ARMREG_IP);
+#ifdef USE_JUMP_TABLES
+ load_get_lmf_addr [0] = mono_get_lmf_addr;
+ load_trampoline [0] = (gpointer)mono_get_trampoline_func (tramp_type);
+#else
constants = (gpointer*)code;
constants [0] = mono_get_lmf_addr;
constants [1] = (gpointer)mono_get_trampoline_func (tramp_type);
}
code += 8;
+#endif
/* Flush instruction cache, since we've generated code */
mono_arch_flush_icache (buf, code - buf);
/* Sanity check */
g_assert ((code - buf) <= buf_len);
- if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT)
- /* Initialize the nullified class init trampoline used in the AOT case */
- nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (NULL);
-
- if (info)
- *info = mono_tramp_info_create (mono_get_generic_trampoline_name (tramp_type), buf, code - buf, ji, unwind_ops);
+ if (info) {
+ tramp_name = mono_get_generic_trampoline_name (tramp_type);
+ *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
+ g_free (tramp_name);
+ }
return buf;
}
mono_arch_flush_icache (buf, code - buf);
if (info)
- *info = mono_tramp_info_create (g_strdup_printf ("nullified_class_init_trampoline"), buf, code - buf, NULL, NULL);
+ *info = mono_tramp_info_create ("nullified_class_init_trampoline", buf, code - buf, NULL, NULL);
return buf;
}
{
guint8 *code, *buf, *tramp;
gpointer *constants;
- guint32 short_branch, size = SPEC_TRAMP_SIZE;
+#ifndef USE_JUMP_TABLES
+ guint32 short_branch;
+#endif
+ guint32 size = SPEC_TRAMP_SIZE;
tramp = mono_get_trampoline_code (tramp_type);
mono_domain_lock (domain);
+#ifdef USE_JUMP_TABLES
+ code = buf = mono_domain_code_reserve_align (domain, size, 4);
+#else
code = buf = mono_domain_code_reserve_align (domain, size, 4);
if ((short_branch = branch_for_target_reachable (code + 4, tramp))) {
size = 12;
mono_domain_code_commit (domain, code, SPEC_TRAMP_SIZE, size);
}
+#endif
mono_domain_unlock (domain);
+#ifdef USE_JUMP_TABLES
+ /* For jumptables case we always generate the same code for trampolines,
+ * namely
+ * push {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
+ * movw lr, lo(jte)
+ * movt lr, hi(jte)
+ * ldr r1, [lr + 4]
+ * bx r1
+ */
+ ARM_PUSH (code, 0x5fff);
+ constants = mono_jumptable_add_entries (2);
+ code = mono_arm_load_jumptable_entry_addr (code, constants, ARMREG_LR);
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, 4);
+ code = emit_bx (code, ARMREG_R1);
+ constants [0] = arg1;
+ constants [1] = tramp;
+#else
/* we could reduce this to 12 bytes if tramp is within reach:
* ARM_PUSH ()
* ARM_BL ()
constants [1] = tramp;
code += 8;
}
+#endif
/* Flush instruction cache, since we've generated code */
mono_arch_flush_icache (buf, code - buf);
{
guint8 *code, *start;
MonoDomain *domain = mono_domain_get ();
+#ifdef USE_JUMP_TABLES
+ gpointer *jte;
+ guint32 size = 20;
+#else
+ guint32 size = 16;
+#endif
- start = code = mono_domain_code_reserve (domain, 16);
+ start = code = mono_domain_code_reserve (domain, size);
+#ifdef USE_JUMP_TABLES
+ jte = mono_jumptable_add_entry ();
+ code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
+ ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, sizeof (MonoObject));
+ code = emit_bx (code, ARMREG_IP);
+ jte [0] = addr;
+#else
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 4);
ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, sizeof (MonoObject));
code = emit_bx (code, ARMREG_IP);
*(guint32*)code = (guint32)addr;
code += 4;
+#endif
mono_arch_flush_icache (start, code - start);
- g_assert ((code - start) <= 16);
+ g_assert ((code - start) <= size);
/*g_print ("unbox trampoline at %d for %s:%s\n", this_pos, m->klass->name, m->name);
g_print ("unbox code is at %p for method at %p\n", start, addr);*/
mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
{
guint8 *code, *start;
- int buf_len;
-
+#ifdef USE_JUMP_TABLES
+ int buf_len = 20;
+ gpointer *jte;
+#else
+ int buf_len = 16;
+#endif
MonoDomain *domain = mono_domain_get ();
- buf_len = 16;
-
start = code = mono_domain_code_reserve (domain, buf_len);
+#ifdef USE_JUMP_TABLES
+ jte = mono_jumptable_add_entries (2);
+ code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
+ ARM_LDR_IMM (code, MONO_ARCH_RGCTX_REG, ARMREG_IP, 0);
+ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 4);
+ ARM_BX (code, ARMREG_IP);
+ jte [0] = mrgctx;
+ jte [1] = addr;
+#else
ARM_LDR_IMM (code, MONO_ARCH_RGCTX_REG, ARMREG_PC, 0);
ARM_LDR_IMM (code, ARMREG_PC, ARMREG_PC, 0);
*(guint32*)code = (guint32)mrgctx;
code += 4;
*(guint32*)code = (guint32)addr;
code += 4;
+#endif
g_assert ((code - start) <= buf_len);
gboolean mrgctx;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
+#ifdef USE_JUMP_TABLES
+ gpointer *jte;
+#endif
mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
index = MONO_RGCTX_SLOT_INDEX (slot);
ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
} else {
/* load rgctx ptr from vtable */
- g_assert (arm_is_imm12 (G_STRUCT_OFFSET (MonoVTable, runtime_generic_context)));
- ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
+ g_assert (arm_is_imm12 (MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context)));
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
/* is the rgctx ptr null? */
ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
/* if yes, jump to actual trampoline */
tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), &code_len);
/* Jump to the actual trampoline */
+#ifdef USE_JUMP_TABLES
+ jte = mono_jumptable_add_entry ();
+ jte [0] = tramp;
+ code = mono_arm_load_jumptable_entry (code, jte, ARMREG_R1);
+ code = emit_bx (code, ARMREG_R1);
+#else
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
code = emit_bx (code, ARMREG_R1);
*(gpointer*)code = tramp;
code += 4;
+#endif
}
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
- if (info)
- *info = mono_tramp_info_create (mono_get_rgctx_fetch_trampoline_name (slot), buf, code - buf, ji, unwind_ops);
+ if (info) {
+ char *name = mono_get_rgctx_fetch_trampoline_name (slot);
+ *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
+ g_free (name);
+ }
return buf;
}
code += 4;
ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_R1);
} else {
+#ifdef USE_JUMP_TABLES
+ gpointer *jte = mono_jumptable_add_entry ();
+#endif
tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), &code_len);
/* Jump to the actual trampoline */
+#ifdef USE_JUMP_TABLES
+ code = mono_arm_load_jumptable_entry (code, jte, ARMREG_R1);
+ jte [0] = tramp;
+ code = emit_bx (code, ARMREG_R1);
+#else
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
code = emit_bx (code, ARMREG_R1);
*(gpointer*)code = tramp;
code += 4;
+#endif
}
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
if (info)
- *info = mono_tramp_info_create (g_strdup_printf ("generic_class_init_trampoline"), buf, code - buf, ji, unwind_ops);
+ *info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops);
return buf;
}
g_assert_not_reached ();
return NULL;
}
+
+gpointer
+mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
#endif /* DISABLE_JIT */
return start;
}
+#else
+
+gpointer
+mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
#endif
-#if defined(MONOTOUCH) || defined(MONO_EXTENSIONS)
+#if defined(ENABLE_GSHAREDVT)
#include "../../../mono-extensions/mono/mini/tramp-arm-gsharedvt.c"
#else
gpointer
-mono_arm_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer *caller_regs, gpointer *callee_regs, gpointer mrgctx_reg)
+mono_arm_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg)
{
g_assert_not_reached ();
return NULL;