2 * tramp-arm.c: JIT trampoline code for ARM
5 * Paolo Molaro (lupus@ximian.com)
7 * (C) 2001 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/marshal.h>
15 #include <mono/metadata/tabledefs.h>
16 #include <mono/arch/arm/arm-codegen.h>
21 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
23 static guint8* nullified_class_init_trampoline;
26 mono_arch_patch_callsite (guint8 *method_start, guint8 *code_ptr, guint8 *addr)
28 guint32 *code = (guint32*)code_ptr;
30 /* This is the 'bl' or the 'mov pc' instruction */
34 * Note that methods are called also with the bl opcode.
36 if ((((*code) >> 25) & 7) == 5) {
37 /*g_print ("direct patching\n");*/
38 arm_patch ((guint8*)code, addr);
39 mono_arch_flush_icache ((guint8*)code, 4);
43 if ((((*code) >> 20) & 0xFF) == 0x12) {
44 /*g_print ("patching bx\n");*/
45 arm_patch ((guint8*)code, addr);
46 mono_arch_flush_icache ((guint8*)(code - 2), 4);
50 g_assert_not_reached ();
54 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
58 /* Patch the jump table entry used by the plt entry */
59 if (*(guint32*)code == 0xe59fc000) {
60 /* ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); */
61 guint32 offset = ((guint32*)code)[2];
63 jump_entry = code + offset + 12;
64 } else if (*(guint16*)(code - 4) == 0xf8df) {
66 * Thumb PLT entry, begins with ldr.w ip, [pc, #8], code points to entry + 4, see
67 * mono_arm_get_thumb_plt_entry ().
72 offset = *(guint32*)(code + 12);
73 jump_entry = code + offset + 8;
75 g_assert_not_reached ();
78 *(guint8**)jump_entry = addr;
82 mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
84 mono_arch_patch_callsite (NULL, code, nullified_class_init_trampoline);
88 mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs)
90 if (mono_aot_only && !nullified_class_init_trampoline)
91 nullified_class_init_trampoline = mono_aot_get_trampoline ("nullified_class_init_trampoline");
93 mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline);
98 #define arm_is_imm12(v) ((int)(v) > -4096 && (int)(v) < 4096)
101 * Return the instruction to jump from code to target, 0 if not
102 * reachable with a single instruction
105 branch_for_target_reachable (guint8 *branch, guint8 *target)
107 gint diff = target - branch - 8;
108 g_assert ((diff & 3) == 0);
110 if (diff <= 33554431)
111 return (ARMCOND_AL << ARMCOND_SHIFT) | (ARM_BR_TAG) | (diff >> 2);
113 /* diff between 0 and -33554432 */
114 if (diff >= -33554432)
115 return (ARMCOND_AL << ARMCOND_SHIFT) | (ARM_BR_TAG) | ((diff >> 2) & ~0xff000000);
120 static inline guint8*
121 emit_bx (guint8* code, int reg)
123 if (mono_arm_thumb_supported ())
126 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
130 /* Stack size for trampoline function
132 #define STACK ALIGN_TO (sizeof (MonoLMF), 8)
134 /* Method-specific trampoline code fragment size */
135 #define METHOD_TRAMPOLINE_SIZE 64
137 /* Jump-specific trampoline code fragment size */
138 #define JUMP_TRAMPOLINE_SIZE 64
141 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
143 guint8 *buf, *code = NULL;
144 guint8 *load_get_lmf_addr, *load_trampoline;
146 int cfa_offset, lmf_offset, regsave_size, lr_offset;
147 GSList *unwind_ops = NULL;
148 MonoJumpInfo *ji = NULL;
151 /* Now we'll create in 'buf' the ARM trampoline code. This
152 is the trampoline code common to all methods */
155 code = buf = mono_global_codeman_reserve (buf_len);
158 * At this point lr points to the specific arg and sp points to the saved
159 * regs on the stack (all but PC and SP). The original LR value has been
160 * saved as sp + LR_OFFSET by the push in the specific trampoline
163 /* The offset of lmf inside the stack frame */
164 lmf_offset = STACK - sizeof (MonoLMF);
165 /* The size of the area already allocated by the push in the specific trampoline */
166 regsave_size = 14 * sizeof (mgreg_t);
167 /* The offset where lr was saved inside the regsave area */
168 lr_offset = 13 * sizeof (mgreg_t);
170 // FIXME: Finish the unwind info, the current info allows us to unwind
171 // when the trampoline is not in the epilog
173 // CFA = SP + (num registers pushed) * 4
174 cfa_offset = 14 * sizeof (mgreg_t);
175 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, cfa_offset);
176 // PC saved at sp+LR_OFFSET
177 mono_add_unwind_op_offset (unwind_ops, code, buf, ARMREG_LR, -4);
179 if (aot && tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
181 * The trampoline contains a pc-relative offset to the got slot
182 * preceeding the got slot where the value is stored. The offset can be
185 ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
186 ARM_ADD_REG_IMM (code, ARMREG_V2, ARMREG_V2, 4, 0);
187 ARM_LDR_REG_REG (code, ARMREG_V2, ARMREG_V2, ARMREG_LR);
189 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT)
190 ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
192 ARM_MOV_REG_REG (code, ARMREG_V2, MONO_ARCH_VTABLE_REG);
194 ARM_LDR_IMM (code, ARMREG_V3, ARMREG_SP, lr_offset);
196 /* ok, now we can continue with the MonoLMF setup, mostly untouched
197 * from emit_prolog in mini-arm.c
198 * This is a synthetized call to mono_get_lmf_addr ()
201 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
202 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
204 *(gpointer*)code = NULL;
206 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
208 load_get_lmf_addr = code;
211 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
212 code = emit_bx (code, ARMREG_R0);
214 /* we build the MonoLMF structure on the stack - see mini-arm.h
215 * The pointer to the struct is put in r1.
216 * the iregs array is already allocated on the stack by push.
218 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, STACK - regsave_size);
219 cfa_offset += STACK - regsave_size;
220 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
222 ARM_ADD_REG_IMM8 (code, ARMREG_V1, ARMREG_SP, STACK - sizeof (MonoLMF));
225 * The stack now looks like:
227 * v1 -> <rest of LMF>
231 /* r0 is the result from mono_get_lmf_addr () */
232 ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
233 /* new_lmf->previous_lmf = *lmf_addr */
234 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
235 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
236 /* *(lmf_addr) = r1 */
237 ARM_STR_IMM (code, ARMREG_V1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
238 /* save method info (it's in v2) */
239 if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP))
240 ARM_STR_IMM (code, ARMREG_V2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, method));
242 ARM_MOV_REG_IMM8 (code, ARMREG_R2, 0);
243 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, method));
246 ARM_ADD_REG_IMM8 (code, ARMREG_R2, ARMREG_SP, cfa_offset);
247 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, sp));
249 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_V1, (G_STRUCT_OFFSET (MonoLMF, iregs) + ARMREG_FP*4));
250 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, fp));
251 /* save the IP (caller ip) */
252 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
253 ARM_MOV_REG_IMM8 (code, ARMREG_R2, 0);
255 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_V1, (G_STRUCT_OFFSET (MonoLMF, iregs) + 13*4));
257 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, ip));
260 * Now we're ready to call xxx_trampoline ().
262 /* Arg 1: the saved registers */
263 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, iregs));
265 /* Arg 2: code (next address to the instruction that called us) */
266 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
267 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0);
269 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_V3);
272 /* Arg 3: the specific argument, stored in v2
274 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_V2);
277 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
278 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
279 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
281 *(gpointer*)code = NULL;
283 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP);
285 load_trampoline = code;
289 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
290 code = emit_bx (code, ARMREG_IP);
292 /* OK, code address is now on r0. Move it to the place on the stack
293 * where IP was saved (it is now no more useful to us and it can be
294 * clobbered). This way we can just restore all the regs in one inst
297 ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, iregs) + (ARMREG_R12 * sizeof (mgreg_t)));
299 /* Check for thread interruption */
300 /* This is not perf critical code so no need to check the interrupt flag */
302 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
305 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
306 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
308 *(gpointer*)code = NULL;
310 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP);
312 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
314 *(gpointer*)code = mono_thread_force_interruption_checkpoint;
317 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
318 code = emit_bx (code, ARMREG_IP);
321 * Now we restore the MonoLMF (see emit_epilogue in mini-arm.c)
322 * and the rest of the registers, so the method called will see
323 * the same state as before we executed.
325 /* ip = previous_lmf */
326 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
328 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_V1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
329 /* *(lmf_addr) = previous_lmf */
330 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
332 /* Non-standard function epilogue. Instead of doing a proper
333 * return, we just jump to the compiled code.
335 /* Restore the registers and jump to the code:
336 * Note that IP has been conveniently set to the method addr.
338 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, STACK - regsave_size);
339 ARM_POP_NWB (code, 0x5fff);
340 if (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
341 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_IP);
342 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, regsave_size);
343 if ((tramp_type == MONO_TRAMPOLINE_CLASS_INIT) || (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) || (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH))
344 code = emit_bx (code, ARMREG_LR);
346 code = emit_bx (code, ARMREG_IP);
348 constants = (gpointer*)code;
349 constants [0] = mono_get_lmf_addr;
350 constants [1] = (gpointer)mono_get_trampoline_func (tramp_type);
353 /* backpatch by emitting the missing instructions skipped above */
354 ARM_LDR_IMM (load_get_lmf_addr, ARMREG_R0, ARMREG_PC, (code - load_get_lmf_addr - 8));
355 ARM_LDR_IMM (load_trampoline, ARMREG_IP, ARMREG_PC, (code + 4 - load_trampoline - 8));
360 /* Flush instruction cache, since we've generated code */
361 mono_arch_flush_icache (buf, code - buf);
364 g_assert ((code - buf) <= buf_len);
366 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT)
367 /* Initialize the nullified class init trampoline used in the AOT case */
368 nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (NULL);
371 *info = mono_tramp_info_create (mono_get_generic_trampoline_name (tramp_type), buf, code - buf, ji, unwind_ops);
377 mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
381 code = buf = mono_global_codeman_reserve (16);
383 code = emit_bx (code, ARMREG_LR);
385 mono_arch_flush_icache (buf, code - buf);
388 *info = mono_tramp_info_create (g_strdup_printf ("nullified_class_init_trampoline"), buf, code - buf, NULL, NULL);
393 #define SPEC_TRAMP_SIZE 24
396 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
398 guint8 *code, *buf, *tramp;
400 guint32 short_branch, size = SPEC_TRAMP_SIZE;
402 tramp = mono_get_trampoline_code (tramp_type);
404 mono_domain_lock (domain);
405 code = buf = mono_domain_code_reserve_align (domain, size, 4);
406 if ((short_branch = branch_for_target_reachable (code + 4, tramp))) {
408 mono_domain_code_commit (domain, code, SPEC_TRAMP_SIZE, size);
410 mono_domain_unlock (domain);
412 /* we could reduce this to 12 bytes if tramp is within reach:
416 * The called code can access method using the lr register
417 * A 20 byte sequence could be:
419 * ARM_MOV_REG_REG (lr, pc)
420 * ARM_LDR_IMM (pc, pc, 0)
424 /* We save all the registers, except PC and SP */
425 ARM_PUSH (code, 0x5fff);
427 constants = (gpointer*)code;
428 constants [0] = GUINT_TO_POINTER (short_branch | (1 << 24));
429 constants [1] = arg1;
432 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 8); /* temp reg */
433 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
434 code = emit_bx (code, ARMREG_R1);
436 constants = (gpointer*)code;
437 constants [0] = arg1;
438 constants [1] = tramp;
442 /* Flush instruction cache, since we've generated code */
443 mono_arch_flush_icache (buf, code - buf);
445 g_assert ((code - buf) <= size);
448 *code_len = code - buf;
454 * mono_arch_get_unbox_trampoline:
456 * @addr: pointer to native code for @m
458 * when value type methods are called through the vtable we need to unbox the
459 * this argument. This method returns a pointer to a trampoline which does
460 * unboxing before calling the method
463 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
465 guint8 *code, *start;
466 MonoDomain *domain = mono_domain_get ();
468 start = code = mono_domain_code_reserve (domain, 16);
470 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 4);
471 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, sizeof (MonoObject));
472 code = emit_bx (code, ARMREG_IP);
473 *(guint32*)code = (guint32)addr;
475 mono_arch_flush_icache (start, code - start);
476 g_assert ((code - start) <= 16);
477 /*g_print ("unbox trampoline at %d for %s:%s\n", this_pos, m->klass->name, m->name);
478 g_print ("unbox code is at %p for method at %p\n", start, addr);*/
484 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
486 guint8 *code, *start;
489 MonoDomain *domain = mono_domain_get ();
493 start = code = mono_domain_code_reserve (domain, buf_len);
495 ARM_LDR_IMM (code, MONO_ARCH_RGCTX_REG, ARMREG_PC, 0);
496 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_PC, 0);
497 *(guint32*)code = (guint32)mrgctx;
499 *(guint32*)code = (guint32)addr;
502 g_assert ((code - start) <= buf_len);
504 mono_arch_flush_icache (start, code - start);
510 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
516 guint8 **rgctx_null_jumps;
520 MonoJumpInfo *ji = NULL;
521 GSList *unwind_ops = NULL;
523 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
524 index = MONO_RGCTX_SLOT_INDEX (slot);
526 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
527 for (depth = 0; ; ++depth) {
528 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
530 if (index < size - 1)
535 tramp_size = 64 + 16 * depth;
537 code = buf = mono_global_codeman_reserve (tramp_size);
539 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, 0);
541 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
544 /* The vtable/mrgctx is in R0 */
545 g_assert (MONO_ARCH_VTABLE_REG == ARMREG_R0);
549 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
551 /* load rgctx ptr from vtable */
552 g_assert (arm_is_imm12 (G_STRUCT_OFFSET (MonoVTable, runtime_generic_context)));
553 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
554 /* is the rgctx ptr null? */
555 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
556 /* if yes, jump to actual trampoline */
557 rgctx_null_jumps [njumps ++] = code;
558 ARM_B_COND (code, ARMCOND_EQ, 0);
561 for (i = 0; i < depth; ++i) {
562 /* load ptr to next array */
563 if (mrgctx && i == 0) {
564 g_assert (arm_is_imm12 (MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT));
565 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
567 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, 0);
569 /* is the ptr null? */
570 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
571 /* if yes, jump to actual trampoline */
572 rgctx_null_jumps [njumps ++] = code;
573 ARM_B_COND (code, ARMCOND_EQ, 0);
577 code = mono_arm_emit_load_imm (code, ARMREG_R2, sizeof (gpointer) * (index + 1));
578 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_R1, ARMREG_R2);
579 /* is the slot null? */
580 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
581 /* if yes, jump to actual trampoline */
582 rgctx_null_jumps [njumps ++] = code;
583 ARM_B_COND (code, ARMCOND_EQ, 0);
584 /* otherwise return, result is in R1 */
585 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_R1);
586 code = emit_bx (code, ARMREG_LR);
588 g_assert (njumps <= depth + 2);
589 for (i = 0; i < njumps; ++i)
590 arm_patch (rgctx_null_jumps [i], code);
592 g_free (rgctx_null_jumps);
596 /* The vtable/mrgctx is still in R0 */
599 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
600 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
602 *(gpointer*)code = NULL;
604 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_R1);
606 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), &code_len);
608 /* Jump to the actual trampoline */
609 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
610 code = emit_bx (code, ARMREG_R1);
611 *(gpointer*)code = tramp;
615 mono_arch_flush_icache (buf, code - buf);
617 g_assert (code - buf <= tramp_size);
620 *info = mono_tramp_info_create (mono_get_rgctx_fetch_trampoline_name (slot), buf, code - buf, ji, unwind_ops);
625 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
628 mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
632 static int byte_offset = -1;
633 static guint8 bitmask;
636 guint32 code_len, imm8;
638 GSList *unwind_ops = NULL;
639 MonoJumpInfo *ji = NULL;
643 code = buf = mono_global_codeman_reserve (tramp_size);
646 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
648 g_assert (arm_is_imm8 (byte_offset));
649 ARM_LDRSB_IMM (code, ARMREG_IP, MONO_ARCH_VTABLE_REG, byte_offset);
650 imm8 = mono_arm_is_rotated_imm8 (bitmask, &rot_amount);
651 g_assert (imm8 >= 0);
652 ARM_AND_REG_IMM (code, ARMREG_IP, ARMREG_IP, imm8, rot_amount);
653 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
655 ARM_B_COND (code, ARMCOND_EQ, 0);
657 /* Initialized case */
658 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_LR);
660 /* Uninitialized case */
661 arm_patch (jump, code);
664 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
665 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
667 *(gpointer*)code = NULL;
669 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_R1);
671 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), &code_len);
673 /* Jump to the actual trampoline */
674 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
675 code = emit_bx (code, ARMREG_R1);
676 *(gpointer*)code = tramp;
680 mono_arch_flush_icache (buf, code - buf);
682 g_assert (code - buf <= tramp_size);
685 *info = mono_tramp_info_create (g_strdup_printf ("generic_class_init_trampoline"), buf, code - buf, ji, unwind_ops);
693 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
695 g_assert_not_reached ();
700 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
702 g_assert_not_reached ();
707 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
709 g_assert_not_reached ();
714 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
716 g_assert_not_reached ();
721 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
723 g_assert_not_reached ();
728 mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
730 g_assert_not_reached ();
734 #endif /* DISABLE_JIT */
737 mono_arch_get_call_target (guint8 *code)
739 guint32 ins = ((guint32*)(gpointer)code) [-1];
741 /* Should be a 'bl' */
742 if ((((ins >> 25) & 0x7) == 0x5) && (((ins >> 24) & 0x1) == 0x1)) {
743 gint32 disp = ((gint32)ins) & 0xffffff;
744 guint8 *target = code - 4 + 8 + (disp * 4);
753 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
755 /* The offset is stored as the 4th word of the plt entry */
756 return ((guint32*)plt_entry) [3];
760 * Return the address of the PLT entry called by the thumb code CODE.
763 mono_arm_get_thumb_plt_entry (guint8 *code)
765 int s, j1, j2, imm10, imm11, i1, i2, imm32;
770 /* code should be right after a BL */
771 code = (guint8*)((mgreg_t)code & ~1);
772 base = (guint8*)((mgreg_t)code & ~3);
774 t1 = ((guint16*)bl) [0];
775 t2 = ((guint16*)bl) [1];
777 g_assert ((t1 >> 11) == 0x1e);
779 s = (t1 >> 10) & 0x1;
780 imm10 = (t1 >> 0) & 0x3ff;
781 j1 = (t2 >> 13) & 0x1;
782 j2 = (t2 >> 11) & 0x1;
785 i1 = (s ^ j1) ? 0 : 1;
786 i2 = (s ^ j2) ? 0 : 1;
788 imm32 = (imm11 << 1) | (imm10 << 12) | (i2 << 22) | (i1 << 23);
792 target = code + imm32;
794 /* target now points to the thumb plt entry */
795 /* ldr.w r12, [pc, #8] */
796 g_assert (((guint16*)target) [0] == 0xf8df);
797 g_assert (((guint16*)target) [1] == 0xc008);
800 * The PLT info offset is at offset 16, but mono_arch_get_plt_entry_offset () returns
801 * the 3rd word, so compensate by returning a different value.