2 * tramp-arm.c: JIT trampoline code for ARM
5 * Paolo Molaro (lupus@ximian.com)
7 * (C) 2001 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/marshal.h>
15 #include <mono/metadata/tabledefs.h>
16 #include <mono/arch/arm/arm-codegen.h>
21 static guint8* nullified_class_init_trampoline;
24 * Return the instruction to jump from code to target, 0 if not
25 * reachable with a single instruction
28 branch_for_target_reachable (guint8 *branch, guint8 *target)
30 gint diff = target - branch - 8;
31 g_assert ((diff & 3) == 0);
34 return (ARMCOND_AL << ARMCOND_SHIFT) | (ARM_BR_TAG) | (diff >> 2);
36 /* diff between 0 and -33554432 */
37 if (diff >= -33554432)
38 return (ARMCOND_AL << ARMCOND_SHIFT) | (ARM_BR_TAG) | ((diff >> 2) & ~0xff000000);
44 emit_bx (guint8* code, int reg)
46 if (mono_arm_thumb_supported ())
49 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
54 * mono_arch_get_unbox_trampoline:
55 * @gsctx: the generic sharing context
57 * @addr: pointer to native code for @m
59 * when value type methods are called through the vtable we need to unbox the
60 * this argument. This method returns a pointer to a trampoline which does
61 * unboxing before calling the method
64 mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
68 MonoDomain *domain = mono_domain_get ();
70 if (MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
73 start = code = mono_domain_code_reserve (domain, 16);
75 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 4);
76 ARM_ADD_REG_IMM8 (code, this_pos, this_pos, sizeof (MonoObject));
77 code = emit_bx (code, ARMREG_IP);
78 *(guint32*)code = (guint32)addr;
80 mono_arch_flush_icache (start, code - start);
81 g_assert ((code - start) <= 16);
82 /*g_print ("unbox trampoline at %d for %s:%s\n", this_pos, m->klass->name, m->name);
83 g_print ("unbox code is at %p for method at %p\n", start, addr);*/
89 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
94 MonoDomain *domain = mono_domain_get ();
98 start = code = mono_domain_code_reserve (domain, buf_len);
100 ARM_LDR_IMM (code, MONO_ARCH_RGCTX_REG, ARMREG_PC, 0);
101 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_PC, 0);
102 *(guint32*)code = (guint32)mrgctx;
104 *(guint32*)code = (guint32)addr;
107 g_assert ((code - start) <= buf_len);
109 mono_arch_flush_icache (start, code - start);
115 mono_arch_patch_callsite (guint8 *method_start, guint8 *code_ptr, guint8 *addr)
117 guint32 *code = (guint32*)code_ptr;
119 /* This is the 'bl' or the 'mov pc' instruction */
123 * Note that methods are called also with the bl opcode.
125 if ((((*code) >> 25) & 7) == 5) {
126 /*g_print ("direct patching\n");*/
127 arm_patch ((guint8*)code, addr);
128 mono_arch_flush_icache ((guint8*)code, 4);
132 if ((((*code) >> 20) & 0xFF) == 0x12) {
133 /*g_print ("patching bx\n");*/
134 arm_patch ((guint8*)code, addr);
135 mono_arch_flush_icache ((guint8*)(code - 2), 4);
139 g_assert_not_reached ();
143 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
147 /* Patch the jump table entry used by the plt entry */
148 if (*(guint32*)code == 0xe59fc000) {
149 /* ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); */
150 guint32 offset = ((guint32*)code)[2];
152 jump_entry = code + offset + 12;
154 g_assert_not_reached ();
157 *(guint8**)jump_entry = addr;
161 mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
163 mono_arch_patch_callsite (NULL, code, nullified_class_init_trampoline);
167 mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs)
169 if (mono_aot_only && !nullified_class_init_trampoline)
170 nullified_class_init_trampoline = mono_aot_get_named_code ("nullified_class_init_trampoline");
172 mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline);
175 /* Stack size for trampoline function
177 #define STACK (sizeof (MonoLMF))
179 /* Method-specific trampoline code fragment size */
180 #define METHOD_TRAMPOLINE_SIZE 64
182 /* Jump-specific trampoline code fragment size */
183 #define JUMP_TRAMPOLINE_SIZE 64
185 #define GEN_TRAMP_SIZE 196
188 * Stack frame description when the generic trampoline is called.
190 * ------------------- old sp
192 * ------------------- sp
195 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
200 GSList *unwind_ops, *l;
202 code = mono_arch_create_trampoline_code_full (tramp_type, &code_size, &ji, &unwind_ops, FALSE);
204 mono_save_trampoline_xdebug_info ("<generic_trampoline>", code, code_size, unwind_ops);
206 for (l = unwind_ops; l; l = l->next)
208 g_slist_free (unwind_ops);
214 mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, GSList **out_unwind_ops, gboolean aot)
216 guint8 *buf, *code = NULL;
217 guint8 *load_get_lmf_addr, *load_trampoline;
219 GSList *unwind_ops = NULL;
224 /* Now we'll create in 'buf' the ARM trampoline code. This
225 is the trampoline code common to all methods */
227 code = buf = mono_global_codeman_reserve (GEN_TRAMP_SIZE);
230 * At this point lr points to the specific arg and sp points to the saved
231 * regs on the stack (all but PC and SP). The original LR value has been
232 * saved as sp + LR_OFFSET by the push in the specific trampoline
234 #define LR_OFFSET (sizeof (gpointer) * 13)
236 // FIXME: Finish the unwind info, the current info allows us to unwind
237 // when the trampoline is not in the epilog
239 // CFA = SP + (num registers pushed) * 4
240 cfa_offset = 14 * sizeof (gpointer);
241 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, cfa_offset);
242 // PC saved at sp+LR_OFFSET
243 mono_add_unwind_op_offset (unwind_ops, code, buf, ARMREG_LR, -4);
245 ARM_MOV_REG_REG (code, ARMREG_V1, ARMREG_SP);
246 if (aot && tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
248 * The trampoline contains a pc-relative offset to the got slot
249 * preceeding the got slot where the value is stored. The offset can be
252 ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
253 ARM_ADD_REG_IMM (code, ARMREG_V2, ARMREG_V2, 4, 0);
254 ARM_LDR_REG_REG (code, ARMREG_V2, ARMREG_V2, ARMREG_LR);
256 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT)
257 ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
259 ARM_MOV_REG_REG (code, ARMREG_V2, MONO_ARCH_VTABLE_REG);
261 ARM_LDR_IMM (code, ARMREG_V3, ARMREG_SP, LR_OFFSET);
263 /* ok, now we can continue with the MonoLMF setup, mostly untouched
264 * from emit_prolog in mini-arm.c
265 * This is a synthetized call to mono_get_lmf_addr ()
268 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
269 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
271 *(gpointer*)code = NULL;
273 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
275 load_get_lmf_addr = code;
278 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
279 code = emit_bx (code, ARMREG_R0);
281 /* we build the MonoLMF structure on the stack - see mini-arm.h
282 * The pointer to the struct is put in r1.
283 * the iregs array is already allocated on the stack by push.
285 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoLMF) - sizeof (guint) * 14);
286 cfa_offset += sizeof (MonoLMF) - sizeof (guint) * 14;
287 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
288 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_SP, STACK - sizeof (MonoLMF));
289 /* r0 is the result from mono_get_lmf_addr () */
290 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
291 /* new_lmf->previous_lmf = *lmf_addr */
292 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
293 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
294 /* *(lmf_addr) = r1 */
295 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
296 /* save method info (it's in v2) */
297 if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP))
298 ARM_STR_IMM (code, ARMREG_V2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, method));
300 ARM_MOV_REG_IMM8 (code, ARMREG_R2, 0);
301 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, method));
303 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
304 /* save the IP (caller ip) */
305 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
306 ARM_MOV_REG_IMM8 (code, ARMREG_R2, 0);
308 /* assumes STACK == sizeof (MonoLMF) */
309 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_SP, (G_STRUCT_OFFSET (MonoLMF, iregs) + 13*4));
311 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
314 * Now we're ready to call xxx_trampoline ().
316 /* Arg 1: the saved registers. It was put in v1 */
317 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V1);
319 /* Arg 2: code (next address to the instruction that called us) */
320 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
321 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0);
323 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_V3);
326 /* Arg 3: the specific argument, stored in v2
328 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_V2);
331 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
332 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
333 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
335 *(gpointer*)code = NULL;
337 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP);
339 load_trampoline = code;
343 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
344 code = emit_bx (code, ARMREG_IP);
346 /* OK, code address is now on r0. Move it to the place on the stack
347 * where IP was saved (it is now no more useful to us and it can be
348 * clobbered). This way we can just restore all the regs in one inst
351 ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, (ARMREG_R12 * 4));
353 /* Check for thread interruption */
354 /* This is not perf critical code so no need to check the interrupt flag */
356 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
359 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
360 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
362 *(gpointer*)code = NULL;
364 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP);
366 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
368 *(gpointer*)code = mono_thread_force_interruption_checkpoint;
371 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
372 code = emit_bx (code, ARMREG_IP);
375 * Now we restore the MonoLMF (see emit_epilogue in mini-arm.c)
376 * and the rest of the registers, so the method called will see
377 * the same state as before we executed.
378 * The pointer to MonoLMF is in r2.
380 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_SP);
381 /* ip = previous_lmf */
382 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
384 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
385 /* *(lmf_addr) = previous_lmf */
386 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
388 /* Non-standard function epilogue. Instead of doing a proper
389 * return, we just jump to the compiled code.
391 /* Restore the registers and jump to the code:
392 * Note that IP has been conveniently set to the method addr.
394 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoLMF) - sizeof (guint) * 14);
395 ARM_POP_NWB (code, 0x5fff);
396 if (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
397 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_IP);
398 /* do we need to set sp? */
399 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, (14 * 4));
400 if ((tramp_type == MONO_TRAMPOLINE_CLASS_INIT) || (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) || (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH))
401 code = emit_bx (code, ARMREG_LR);
403 code = emit_bx (code, ARMREG_IP);
405 constants = (gpointer*)code;
406 constants [0] = mono_get_lmf_addr;
407 constants [1] = (gpointer)mono_get_trampoline_func (tramp_type);
410 /* backpatch by emitting the missing instructions skipped above */
411 ARM_LDR_IMM (load_get_lmf_addr, ARMREG_R0, ARMREG_PC, (code - load_get_lmf_addr - 8));
412 ARM_LDR_IMM (load_trampoline, ARMREG_IP, ARMREG_PC, (code + 4 - load_trampoline - 8));
417 /* Flush instruction cache, since we've generated code */
418 mono_arch_flush_icache (buf, code - buf);
421 g_assert ((code - buf) <= GEN_TRAMP_SIZE);
423 *code_size = code - buf;
425 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
428 /* Initialize the nullified class init trampoline used in the AOT case */
429 nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
432 *out_unwind_ops = unwind_ops;
438 mono_arch_get_nullified_class_init_trampoline (guint32 *code_len)
442 code = buf = mono_global_codeman_reserve (16);
444 code = emit_bx (code, ARMREG_LR);
446 mono_arch_flush_icache (buf, code - buf);
448 *code_len = code - buf;
453 #define SPEC_TRAMP_SIZE 24
456 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
458 guint8 *code, *buf, *tramp;
460 guint32 short_branch, size = SPEC_TRAMP_SIZE;
462 tramp = mono_get_trampoline_code (tramp_type);
464 mono_domain_lock (domain);
465 code = buf = mono_domain_code_reserve_align (domain, size, 4);
466 if ((short_branch = branch_for_target_reachable (code + 8, tramp))) {
468 mono_domain_code_commit (domain, code, SPEC_TRAMP_SIZE, size);
470 mono_domain_unlock (domain);
472 /* we could reduce this to 12 bytes if tramp is within reach:
476 * The called code can access method using the lr register
477 * A 20 byte sequence could be:
479 * ARM_MOV_REG_REG (lr, pc)
480 * ARM_LDR_IMM (pc, pc, 0)
484 /* We save all the registers, except PC and SP */
485 ARM_PUSH (code, 0x5fff);
487 constants = (gpointer*)code;
488 constants [0] = GUINT_TO_POINTER (short_branch | (1 << 24));
489 constants [1] = arg1;
492 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 8); /* temp reg */
493 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
494 code = emit_bx (code, ARMREG_R1);
496 constants = (gpointer*)code;
497 constants [0] = arg1;
498 constants [1] = tramp;
502 /* Flush instruction cache, since we've generated code */
503 mono_arch_flush_icache (buf, code - buf);
505 g_assert ((code - buf) <= size);
508 *code_len = code - buf;
513 #define arm_is_imm12(v) ((int)(v) > -4096 && (int)(v) < 4096)
516 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
521 return mono_arch_create_rgctx_lazy_fetch_trampoline_full (slot, &code_size, &ji, FALSE);
525 mono_arch_create_rgctx_lazy_fetch_trampoline_full (guint32 slot, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
531 guint8 **rgctx_null_jumps;
538 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
539 index = MONO_RGCTX_SLOT_INDEX (slot);
541 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
542 for (depth = 0; ; ++depth) {
543 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
545 if (index < size - 1)
550 tramp_size = 64 + 16 * depth;
552 code = buf = mono_global_codeman_reserve (tramp_size);
554 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
557 /* The vtable/mrgctx is in R0 */
558 g_assert (MONO_ARCH_VTABLE_REG == ARMREG_R0);
562 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
564 /* load rgctx ptr from vtable */
565 g_assert (arm_is_imm12 (G_STRUCT_OFFSET (MonoVTable, runtime_generic_context)));
566 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
567 /* is the rgctx ptr null? */
568 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
569 /* if yes, jump to actual trampoline */
570 rgctx_null_jumps [njumps ++] = code;
571 ARM_B_COND (code, ARMCOND_EQ, 0);
574 for (i = 0; i < depth; ++i) {
575 /* load ptr to next array */
576 if (mrgctx && i == 0) {
577 g_assert (arm_is_imm12 (MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT));
578 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
580 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, 0);
582 /* is the ptr null? */
583 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
584 /* if yes, jump to actual trampoline */
585 rgctx_null_jumps [njumps ++] = code;
586 ARM_B_COND (code, ARMCOND_EQ, 0);
590 code = mono_arm_emit_load_imm (code, ARMREG_R2, sizeof (gpointer) * (index + 1));
591 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_R1, ARMREG_R2);
592 /* is the slot null? */
593 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
594 /* if yes, jump to actual trampoline */
595 rgctx_null_jumps [njumps ++] = code;
596 ARM_B_COND (code, ARMCOND_EQ, 0);
597 /* otherwise return, result is in R1 */
598 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_R1);
599 code = emit_bx (code, ARMREG_LR);
601 g_assert (njumps <= depth + 2);
602 for (i = 0; i < njumps; ++i)
603 arm_patch (rgctx_null_jumps [i], code);
605 g_free (rgctx_null_jumps);
609 /* The vtable/mrgctx is still in R0 */
612 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
613 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
615 *(gpointer*)code = NULL;
617 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_R1);
619 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), &code_len);
621 /* Jump to the actual trampoline */
622 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
623 code = emit_bx (code, ARMREG_R1);
624 *(gpointer*)code = tramp;
628 mono_arch_flush_icache (buf, code - buf);
630 g_assert (code - buf <= tramp_size);
632 *code_size = code - buf;
637 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
640 mono_arch_create_generic_class_init_trampoline (void)
645 return mono_arch_create_generic_class_init_trampoline_full (&code_size, &ji, FALSE);
649 mono_arch_create_generic_class_init_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
653 static int byte_offset = -1;
654 static guint8 bitmask;
657 guint32 code_len, imm8;
664 code = buf = mono_global_codeman_reserve (tramp_size);
667 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
669 g_assert (arm_is_imm8 (byte_offset));
670 ARM_LDRSB_IMM (code, ARMREG_IP, MONO_ARCH_VTABLE_REG, byte_offset);
671 imm8 = mono_arm_is_rotated_imm8 (bitmask, &rot_amount);
672 g_assert (imm8 >= 0);
673 ARM_AND_REG_IMM (code, ARMREG_IP, ARMREG_IP, imm8, rot_amount);
674 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
676 ARM_B_COND (code, ARMCOND_EQ, 0);
678 /* Initialized case */
679 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_LR);
681 /* Uninitialized case */
682 arm_patch (jump, code);
685 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
686 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
688 *(gpointer*)code = NULL;
690 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_R1);
692 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), &code_len);
694 /* Jump to the actual trampoline */
695 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
696 code = emit_bx (code, ARMREG_R1);
697 *(gpointer*)code = tramp;
701 mono_arch_flush_icache (buf, code - buf);
703 g_assert (code - buf <= tramp_size);
705 *code_size = code - buf;