2 * mini-ppc.c: PowerPC backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Andreas Faerber <andreas.faerber@web.de>
9 * (C) 2003 Ximian, Inc.
10 * (C) 2007-2008 Andreas Faerber
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/utils/mono-proclib.h>
18 #include <mono/utils/mono-mmap.h>
21 #ifdef TARGET_POWERPC64
22 #include "cpu-ppc64.h"
29 #include <sys/sysctl.h>
35 #define FORCE_INDIR_CALL 1
46 /* cpu_hw_caps contains the flags defined below */
47 static int cpu_hw_caps = 0;
48 static int cachelinesize = 0;
49 static int cachelineinc = 0;
51 PPC_ICACHE_SNOOP = 1 << 0,
52 PPC_MULTIPLE_LS_UNITS = 1 << 1,
53 PPC_SMP_CAPABLE = 1 << 2,
56 PPC_MOVE_FPR_GPR = 1 << 5,
60 #define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
62 /* This mutex protects architecture specific caches */
63 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
64 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
65 static CRITICAL_SECTION mini_arch_mutex;
67 int mono_exc_esp_offset = 0;
68 static int tls_mode = TLS_MODE_DETECT;
69 static int lmf_pthread_key = -1;
70 static int monodomain_key = -1;
73 * The code generated for sequence points reads from this location, which is
74 * made read-only when single stepping is enabled.
76 static gpointer ss_trigger_page;
78 /* Enabled breakpoints read from this trigger page */
79 static gpointer bp_trigger_page;
82 offsets_from_pthread_key (guint32 key, int *offset2)
86 *offset2 = idx2 * sizeof (gpointer);
87 return 284 + idx1 * sizeof (gpointer);
90 #define emit_linuxthreads_tls(code,dreg,key) do {\
92 off1 = offsets_from_pthread_key ((key), &off2); \
93 ppc_ldptr ((code), (dreg), off1, ppc_r2); \
94 ppc_ldptr ((code), (dreg), off2, (dreg)); \
97 #define emit_darwing5_tls(code,dreg,key) do {\
98 int off1 = 0x48 + key * sizeof (gpointer); \
99 ppc_mfspr ((code), (dreg), 104); \
100 ppc_ldptr ((code), (dreg), off1, (dreg)); \
103 /* FIXME: ensure the sc call preserves all but r3 */
104 #define emit_darwing4_tls(code,dreg,key) do {\
105 int off1 = 0x48 + key * sizeof (gpointer); \
106 if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r11, ppc_r3); \
107 ppc_li ((code), ppc_r0, 0x7FF2); \
109 ppc_lwz ((code), (dreg), off1, ppc_r3); \
110 if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r3, ppc_r11); \
113 #ifdef PPC_THREAD_PTR_REG
114 #define emit_nptl_tls(code,dreg,key) do { \
116 int off2 = key >> 15; \
117 if ((off2 == 0) || (off2 == -1)) { \
118 ppc_ldptr ((code), (dreg), off1, PPC_THREAD_PTR_REG); \
120 int off3 = (off2 + 1) > 1; \
121 ppc_addis ((code), ppc_r11, PPC_THREAD_PTR_REG, off3); \
122 ppc_ldptr ((code), (dreg), off1, ppc_r11); \
126 #define emit_nptl_tls(code,dreg,key) do { \
127 g_assert_not_reached (); \
131 #define emit_tls_access(code,dreg,key) do { \
132 switch (tls_mode) { \
133 case TLS_MODE_LTHREADS: emit_linuxthreads_tls(code,dreg,key); break; \
134 case TLS_MODE_NPTL: emit_nptl_tls(code,dreg,key); break; \
135 case TLS_MODE_DARWIN_G5: emit_darwing5_tls(code,dreg,key); break; \
136 case TLS_MODE_DARWIN_G4: emit_darwing4_tls(code,dreg,key); break; \
137 default: g_assert_not_reached (); \
141 #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \
143 MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \
144 inst->type = STACK_R8; \
146 inst->inst_p0 = (void*)(addr); \
147 mono_bblock_add_inst (cfg->cbb, inst); \
151 mono_arch_regname (int reg) {
152 static const char rnames[][4] = {
153 "r0", "sp", "r2", "r3", "r4",
154 "r5", "r6", "r7", "r8", "r9",
155 "r10", "r11", "r12", "r13", "r14",
156 "r15", "r16", "r17", "r18", "r19",
157 "r20", "r21", "r22", "r23", "r24",
158 "r25", "r26", "r27", "r28", "r29",
161 if (reg >= 0 && reg < 32)
167 mono_arch_fregname (int reg) {
168 static const char rnames[][4] = {
169 "f0", "f1", "f2", "f3", "f4",
170 "f5", "f6", "f7", "f8", "f9",
171 "f10", "f11", "f12", "f13", "f14",
172 "f15", "f16", "f17", "f18", "f19",
173 "f20", "f21", "f22", "f23", "f24",
174 "f25", "f26", "f27", "f28", "f29",
177 if (reg >= 0 && reg < 32)
182 /* this function overwrites r0, r11, r12 */
184 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
186 /* unrolled, use the counter in big */
187 if (size > sizeof (gpointer) * 5) {
188 long shifted = size / SIZEOF_VOID_P;
189 guint8 *copy_loop_start, *copy_loop_jump;
191 ppc_load (code, ppc_r0, shifted);
192 ppc_mtctr (code, ppc_r0);
193 g_assert (sreg == ppc_r11);
194 ppc_addi (code, ppc_r12, dreg, (doffset - sizeof (gpointer)));
195 ppc_addi (code, ppc_r11, sreg, (soffset - sizeof (gpointer)));
196 copy_loop_start = code;
197 ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r11);
198 ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r12);
199 copy_loop_jump = code;
200 ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
201 ppc_patch (copy_loop_jump, copy_loop_start);
202 size -= shifted * sizeof (gpointer);
203 doffset = soffset = 0;
206 #ifdef __mono_ppc64__
207 /* the hardware has multiple load/store units and the move is long
208 enough to use more then one regiester, then use load/load/store/store
209 to execute 2 instructions per cycle. */
210 if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r12) && (sreg != ppc_r12)) {
212 ppc_ldptr (code, ppc_r0, soffset, sreg);
213 ppc_ldptr (code, ppc_r12, soffset+8, sreg);
214 ppc_stptr (code, ppc_r0, doffset, dreg);
215 ppc_stptr (code, ppc_r12, doffset+8, dreg);
222 ppc_ldr (code, ppc_r0, soffset, sreg);
223 ppc_str (code, ppc_r0, doffset, dreg);
229 if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r12) && (sreg != ppc_r12)) {
231 ppc_lwz (code, ppc_r0, soffset, sreg);
232 ppc_lwz (code, ppc_r12, soffset+4, sreg);
233 ppc_stw (code, ppc_r0, doffset, dreg);
234 ppc_stw (code, ppc_r12, doffset+4, dreg);
242 ppc_lwz (code, ppc_r0, soffset, sreg);
243 ppc_stw (code, ppc_r0, doffset, dreg);
249 ppc_lhz (code, ppc_r0, soffset, sreg);
250 ppc_sth (code, ppc_r0, doffset, dreg);
256 ppc_lbz (code, ppc_r0, soffset, sreg);
257 ppc_stb (code, ppc_r0, doffset, dreg);
266 * mono_arch_get_argument_info:
267 * @csig: a method signature
268 * @param_count: the number of parameters to consider
269 * @arg_info: an array to store the result infos
271 * Gathers information on parameters such as size, alignment and
272 * padding. arg_info should be large enought to hold param_count + 1 entries.
274 * Returns the size of the activation frame.
277 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
279 #ifdef __mono_ppc64__
283 int k, frame_size = 0;
284 int size, align, pad;
287 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
288 frame_size += sizeof (gpointer);
292 arg_info [0].offset = offset;
295 frame_size += sizeof (gpointer);
299 arg_info [0].size = frame_size;
301 for (k = 0; k < param_count; k++) {
304 size = mono_type_native_stack_size (csig->params [k], (guint32*)&align);
306 size = mini_type_stack_size (NULL, csig->params [k], &align);
308 /* ignore alignment for now */
311 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
312 arg_info [k].pad = pad;
314 arg_info [k + 1].pad = 0;
315 arg_info [k + 1].size = size;
317 arg_info [k + 1].offset = offset;
321 align = MONO_ARCH_FRAME_ALIGNMENT;
322 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
323 arg_info [k].pad = pad;
329 #ifdef __mono_ppc64__
331 is_load_sequence (guint32 *seq)
333 return ppc_opcode (seq [0]) == 15 && /* lis */
334 ppc_opcode (seq [1]) == 24 && /* ori */
335 ppc_opcode (seq [2]) == 30 && /* sldi */
336 ppc_opcode (seq [3]) == 25 && /* oris */
337 ppc_opcode (seq [4]) == 24; /* ori */
340 #define ppc_load_get_dest(l) (((l)>>21) & 0x1f)
341 #define ppc_load_get_off(l) ((gint16)((l) & 0xffff))
344 /* code must point to the blrl */
346 mono_ppc_is_direct_call_sequence (guint32 *code)
348 #ifdef __mono_ppc64__
349 g_assert(*code == 0x4e800021 || *code == 0x4e800020 || *code == 0x4e800420);
351 /* the thunk-less direct call sequence: lis/ori/sldi/oris/ori/mtlr/blrl */
352 if (ppc_opcode (code [-1]) == 31) { /* mtlr */
353 if (ppc_opcode (code [-2]) == 58 && ppc_opcode (code [-3]) == 58) { /* ld/ld */
354 if (!is_load_sequence (&code [-8]))
356 /* one of the loads must be "ld r2,8(rX)" */
357 return (ppc_load_get_dest (code [-2]) == ppc_r2 && ppc_load_get_off (code [-2]) == 8) ||
358 (ppc_load_get_dest (code [-3]) == ppc_r2 && ppc_load_get_off (code [-3]) == 8);
360 if (ppc_opcode (code [-2]) == 24 && ppc_opcode (code [-3]) == 31) /* mr/nop */
361 return is_load_sequence (&code [-8]);
363 return is_load_sequence (&code [-6]);
367 g_assert(*code == 0x4e800021);
369 /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */
370 return ppc_opcode (code [-1]) == 31 &&
371 ppc_opcode (code [-2]) == 24 &&
372 ppc_opcode (code [-3]) == 15;
377 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
381 guint32* code = (guint32*)code_ptr;
382 mgreg_t *r = (mgreg_t*)regs;
386 /* This is the 'blrl' instruction */
389 /* Sanity check: instruction must be 'blrl' */
390 if (*code != 0x4e800021)
393 if (mono_ppc_is_direct_call_sequence (code))
396 /* FIXME: more sanity checks here */
397 /* OK, we're now at the 'blrl' instruction. Now walk backwards
398 till we get to a 'mtlr rA' */
400 if((*code & 0x7c0803a6) == 0x7c0803a6) {
402 /* Here we are: we reached the 'mtlr rA'.
403 Extract the register from the instruction */
404 reg = (*code & 0x03e00000) >> 21;
406 /* ok, this is a lwz reg, offset (vtreg)
407 * it is emitted with:
408 * ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d))
410 soff = (*code & 0xffff);
412 reg = (*code >> 16) & 0x1f;
413 g_assert (reg != ppc_r1);
414 /*g_print ("patching reg is %d\n", reg);*/
415 o = (gpointer)(gsize)r [reg];
419 *displacement = offset;
423 #define MAX_ARCH_DELEGATE_PARAMS 7
426 get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len, gboolean aot)
428 guint8 *code, *start;
431 int size = MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE;
433 start = code = mono_global_codeman_reserve (size);
435 code = mono_ppc_create_pre_code_ftnptr (code);
437 /* Replace the this argument with the target */
438 ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
439 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
440 /* it's a function descriptor */
441 /* Can't use ldptr as it doesn't work with r0 */
442 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
444 ppc_mtctr (code, ppc_r0);
445 ppc_ldptr (code, ppc_r3, G_STRUCT_OFFSET (MonoDelegate, target), ppc_r3);
446 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
448 g_assert ((code - start) <= size);
450 mono_arch_flush_icache (start, size);
454 size = MONO_PPC_32_64_CASE (32, 32) + param_count * 4 + PPC_FTNPTR_SIZE;
455 start = code = mono_global_codeman_reserve (size);
457 code = mono_ppc_create_pre_code_ftnptr (code);
459 ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
460 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
461 /* it's a function descriptor */
462 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
464 ppc_mtctr (code, ppc_r0);
465 /* slide down the arguments */
466 for (i = 0; i < param_count; ++i) {
467 ppc_mr (code, (ppc_r3 + i), (ppc_r3 + i + 1));
469 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
471 g_assert ((code - start) <= size);
473 mono_arch_flush_icache (start, size);
477 *code_len = code - start;
483 mono_arch_get_delegate_invoke_impls (void)
490 code = get_delegate_invoke_impl (TRUE, 0, &code_len, TRUE);
491 res = g_slist_prepend (res, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len, NULL, NULL));
493 for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
494 code = get_delegate_invoke_impl (FALSE, i, &code_len, TRUE);
495 res = g_slist_prepend (res, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len, NULL, NULL));
502 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
504 guint8 *code, *start;
506 /* FIXME: Support more cases */
507 if (MONO_TYPE_ISSTRUCT (sig->ret))
511 static guint8* cached = NULL;
517 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
519 start = get_delegate_invoke_impl (TRUE, 0, NULL, FALSE);
521 mono_memory_barrier ();
525 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
528 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
530 for (i = 0; i < sig->param_count; ++i)
531 if (!mono_is_regsize_var (sig->params [i]))
535 code = cache [sig->param_count];
540 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
541 start = mono_aot_get_named_code (name);
544 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL, FALSE);
547 mono_memory_barrier ();
549 cache [sig->param_count] = start;
555 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
557 mgreg_t *r = (mgreg_t*)regs;
559 /* FIXME: handle returning a struct */
560 if (MONO_TYPE_ISSTRUCT (sig->ret))
561 return (gpointer)(gsize)r [ppc_r4];
562 return (gpointer)(gsize)r [ppc_r3];
570 #ifdef USE_ENVIRON_HACK
572 linux_find_auxv (int *count)
576 char **result = __environ;
577 /* Scan over the env vector looking for the ending NULL */
578 for (; *result != NULL; ++result) {
580 /* Bump the pointer one more step, which should be the auxv. */
582 vec = (AuxVec *)result;
583 if (vec->type != 22 /*AT_IGNOREPPC*/) {
587 while (vec->type != 0 /*AT_NULL*/) {
592 return (AuxVec *)result;
596 #define MAX_AUX_ENTRIES 128
598 * PPC_FEATURE_POWER4, PPC_FEATURE_POWER5, PPC_FEATURE_POWER5_PLUS, PPC_FEATURE_CELL,
599 * PPC_FEATURE_PA6T, PPC_FEATURE_ARCH_2_05 are considered supporting 2X ISA features
601 #define ISA_2X (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000 | 0x00000800 | 0x00001000)
603 /* define PPC_FEATURE_64 HWCAP for 64-bit category. */
604 #define ISA_64 0x40000000
606 /* define PPC_FEATURE_POWER6_EXT HWCAP for power6x mffgpr/mftgpr instructions. */
607 #define ISA_MOVE_FPR_GPR 0x00000200
609 * Initialize the cpu to execute managed code.
612 mono_arch_cpu_init (void)
618 mib [1] = HW_CACHELINE;
619 len = sizeof (cachelinesize);
620 if (sysctl (mib, 2, &cachelinesize, (size_t*)&len, NULL, 0) == -1) {
624 cachelineinc = cachelinesize;
626 #elif defined(__linux__)
627 AuxVec vec [MAX_AUX_ENTRIES];
628 int i, vec_entries = 0;
629 /* sadly this will work only with 2.6 kernels... */
630 FILE* f = fopen ("/proc/self/auxv", "rb");
632 vec_entries = fread (&vec, sizeof (AuxVec), MAX_AUX_ENTRIES, f);
634 #ifdef USE_ENVIRON_HACK
636 AuxVec *evec = linux_find_auxv (&vec_entries);
638 memcpy (&vec, evec, sizeof (AuxVec) * MIN (vec_entries, MAX_AUX_ENTRIES));
641 for (i = 0; i < vec_entries; i++) {
642 int type = vec [i].type;
643 if (type == 19) { /* AT_DCACHEBSIZE */
644 cachelinesize = vec [i].value;
646 } else if (type == 16) { /* AT_HWCAP */
647 if (vec [i].value & 0x00002000 /*PPC_FEATURE_ICACHE_SNOOP*/)
648 cpu_hw_caps |= PPC_ICACHE_SNOOP;
649 if (vec [i].value & ISA_2X)
650 cpu_hw_caps |= PPC_ISA_2X;
651 if (vec [i].value & ISA_64)
652 cpu_hw_caps |= PPC_ISA_64;
653 if (vec [i].value & ISA_MOVE_FPR_GPR)
654 cpu_hw_caps |= PPC_MOVE_FPR_GPR;
656 } else if (type == 15) { /* AT_PLATFORM */
657 const char *arch = (char*)vec [i].value;
658 if (strcmp (arch, "ppc970") == 0 ||
659 (strncmp (arch, "power", 5) == 0 && arch [5] >= '4' && arch [5] <= '7'))
660 cpu_hw_caps |= PPC_MULTIPLE_LS_UNITS;
661 /*printf ("cpu: %s\n", (char*)vec [i].value);*/
665 #elif defined(G_COMPILER_CODEWARRIOR)
668 #elif defined(MONO_CROSS_COMPILE)
670 //#error Need a way to get cache line size
675 cachelineinc = cachelinesize;
677 if (mono_cpu_count () > 1)
678 cpu_hw_caps |= PPC_SMP_CAPABLE;
682 * Initialize architecture specific code.
685 mono_arch_init (void)
687 InitializeCriticalSection (&mini_arch_mutex);
689 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
690 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
691 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
693 mono_aot_register_jit_icall ("mono_ppc_throw_exception", mono_ppc_throw_exception);
697 * Cleanup architecture specific code.
700 mono_arch_cleanup (void)
702 DeleteCriticalSection (&mini_arch_mutex);
706 * This function returns the optimizations supported on this cpu.
709 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
713 /* no ppc-specific optimizations yet */
718 #ifdef __mono_ppc64__
719 #define CASE_PPC32(c)
720 #define CASE_PPC64(c) case c:
722 #define CASE_PPC32(c) case c:
723 #define CASE_PPC64(c)
727 is_regsize_var (MonoType *t) {
730 t = mini_type_get_underlying_type (NULL, t);
734 CASE_PPC64 (MONO_TYPE_I8)
735 CASE_PPC64 (MONO_TYPE_U8)
739 case MONO_TYPE_FNPTR:
741 case MONO_TYPE_OBJECT:
742 case MONO_TYPE_STRING:
743 case MONO_TYPE_CLASS:
744 case MONO_TYPE_SZARRAY:
745 case MONO_TYPE_ARRAY:
747 case MONO_TYPE_GENERICINST:
748 if (!mono_type_generic_inst_is_valuetype (t))
751 case MONO_TYPE_VALUETYPE:
759 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
764 for (i = 0; i < cfg->num_varinfo; i++) {
765 MonoInst *ins = cfg->varinfo [i];
766 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
769 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
772 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
775 /* we can only allocate 32 bit values */
776 if (is_regsize_var (ins->inst_vtype)) {
777 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
778 g_assert (i == vmv->idx);
779 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
785 #endif /* ifndef DISABLE_JIT */
788 mono_arch_get_global_int_regs (MonoCompile *cfg)
792 if (cfg->frame_reg != ppc_sp)
794 /* ppc_r13 is used by the system on PPC EABI */
795 for (i = 14; i < top; ++i) {
797 * Reserve r29 for holding the vtable address for virtual calls in AOT mode,
798 * since the trampolines can clobber r11.
800 if (!(cfg->compile_aot && i == 29))
801 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
808 * mono_arch_regalloc_cost:
810 * Return the cost, in number of memory references, of the action of
811 * allocating the variable VMV into a register during global register
815 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
822 mono_arch_flush_icache (guint8 *code, gint size)
824 #ifdef MONO_CROSS_COMPILE
827 guint8 *endp, *start;
831 start = (guint8*)((gsize)start & ~(cachelinesize - 1));
832 /* use dcbf for smp support, later optimize for UP, see pem._64bit.d20030611.pdf page 211 */
833 #if defined(G_COMPILER_CODEWARRIOR)
834 if (cpu_hw_caps & PPC_SMP_CAPABLE) {
835 for (p = start; p < endp; p += cachelineinc) {
839 for (p = start; p < endp; p += cachelineinc) {
845 for (p = start; p < endp; p += cachelineinc) {
856 /* For POWER5/6 with ICACHE_SNOOPing only one icbi in the range is required.
857 * The sync is required to insure that the store queue is completely empty.
858 * While the icbi performs no cache operations, icbi/isync is required to
859 * kill local prefetch.
861 if (cpu_hw_caps & PPC_ICACHE_SNOOP) {
863 asm ("icbi 0,%0;" : : "r"(code) : "memory");
867 /* use dcbf for smp support, see pem._64bit.d20030611.pdf page 211 */
868 if (cpu_hw_caps & PPC_SMP_CAPABLE) {
869 for (p = start; p < endp; p += cachelineinc) {
870 asm ("dcbf 0,%0;" : : "r"(p) : "memory");
873 for (p = start; p < endp; p += cachelineinc) {
874 asm ("dcbst 0,%0;" : : "r"(p) : "memory");
879 for (p = start; p < endp; p += cachelineinc) {
880 /* for ISA2.0+ implementations we should not need any extra sync between the
881 * icbi instructions. Both the 2.0 PEM and the PowerISA-2.05 say this.
882 * So I am not sure which chip had this problem but its not an issue on
883 * of the ISA V2 chips.
885 if (cpu_hw_caps & PPC_ISA_2X)
886 asm ("icbi 0,%0;" : : "r"(p) : "memory");
888 asm ("icbi 0,%0; sync;" : : "r"(p) : "memory");
890 if (!(cpu_hw_caps & PPC_ISA_2X))
898 mono_arch_flush_register_windows (void)
903 #define ALWAYS_ON_STACK(s) s
904 #define FP_ALSO_IN_REG(s) s
906 #ifdef __mono_ppc64__
907 #define ALWAYS_ON_STACK(s) s
908 #define FP_ALSO_IN_REG(s) s
910 #define ALWAYS_ON_STACK(s)
911 #define FP_ALSO_IN_REG(s)
913 #define ALIGN_DOUBLES
926 guint32 vtsize; /* in param area */
928 guint8 vtregs; /* number of registers used to pass a RegTypeStructByVal */
929 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
930 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
931 guint8 bytes : 4; /* size in bytes - only valid for
932 RegTypeStructByVal if the struct fits
933 in one word, otherwise it's 0*/
948 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
950 #ifdef __mono_ppc64__
955 if (*gr >= 3 + PPC_NUM_REG_ARGS) {
956 ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size;
957 ainfo->reg = ppc_sp; /* in the caller */
958 ainfo->regtype = RegTypeBase;
959 *stack_size += sizeof (gpointer);
961 ALWAYS_ON_STACK (*stack_size += sizeof (gpointer));
965 if (*gr >= 3 + PPC_NUM_REG_ARGS - 1) {
967 //*stack_size += (*stack_size % 8);
969 ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size;
970 ainfo->reg = ppc_sp; /* in the caller */
971 ainfo->regtype = RegTypeBase;
978 ALWAYS_ON_STACK (*stack_size += 8);
986 #if defined(__APPLE__) || defined(__mono_ppc64__)
988 has_only_a_r48_field (MonoClass *klass)
992 gboolean have_field = FALSE;
994 while ((f = mono_class_get_fields (klass, &iter))) {
995 if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
998 if (!f->type->byref && (f->type->type == MONO_TYPE_R4 || f->type->type == MONO_TYPE_R8))
1009 calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
1012 int n = sig->hasthis + sig->param_count;
1013 MonoType *simpletype;
1014 guint32 stack_size = 0;
1015 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
1017 fr = PPC_FIRST_FPARG_REG;
1018 gr = PPC_FIRST_ARG_REG;
1020 /* FIXME: handle returning a struct */
1021 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1022 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1023 cinfo->struct_ret = PPC_FIRST_ARG_REG;
1028 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1031 DEBUG(printf("params: %d\n", sig->param_count));
1032 for (i = 0; i < sig->param_count; ++i) {
1033 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1034 /* Prevent implicit arguments and sig_cookie from
1035 being passed in registers */
1036 gr = PPC_LAST_ARG_REG + 1;
1037 /* FIXME: don't we have to set fr, too? */
1038 /* Emit the signature cookie just before the implicit arguments */
1039 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1041 DEBUG(printf("param %d: ", i));
1042 if (sig->params [i]->byref) {
1043 DEBUG(printf("byref\n"));
1044 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1048 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
1049 switch (simpletype->type) {
1050 case MONO_TYPE_BOOLEAN:
1053 cinfo->args [n].size = 1;
1054 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1057 case MONO_TYPE_CHAR:
1060 cinfo->args [n].size = 2;
1061 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1066 cinfo->args [n].size = 4;
1067 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1073 case MONO_TYPE_FNPTR:
1074 case MONO_TYPE_CLASS:
1075 case MONO_TYPE_OBJECT:
1076 case MONO_TYPE_STRING:
1077 case MONO_TYPE_SZARRAY:
1078 case MONO_TYPE_ARRAY:
1079 cinfo->args [n].size = sizeof (gpointer);
1080 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1083 case MONO_TYPE_GENERICINST:
1084 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1085 cinfo->args [n].size = sizeof (gpointer);
1086 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1091 case MONO_TYPE_VALUETYPE:
1092 case MONO_TYPE_TYPEDBYREF: {
1096 klass = mono_class_from_mono_type (sig->params [i]);
1097 if (simpletype->type == MONO_TYPE_TYPEDBYREF)
1098 size = sizeof (MonoTypedRef);
1099 else if (is_pinvoke)
1100 size = mono_class_native_size (klass, NULL);
1102 size = mono_class_value_size (klass, NULL);
1104 #if defined(__APPLE__) || defined(__mono_ppc64__)
1105 if ((size == 4 || size == 8) && has_only_a_r48_field (klass)) {
1106 cinfo->args [n].size = size;
1108 /* It was 7, now it is 8 in LinuxPPC */
1109 if (fr <= PPC_LAST_FPARG_REG) {
1110 cinfo->args [n].regtype = RegTypeFP;
1111 cinfo->args [n].reg = fr;
1113 FP_ALSO_IN_REG (gr ++);
1115 FP_ALSO_IN_REG (gr ++);
1116 ALWAYS_ON_STACK (stack_size += size);
1118 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1119 cinfo->args [n].regtype = RegTypeBase;
1120 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1127 DEBUG(printf ("load %d bytes struct\n",
1128 mono_class_native_size (sig->params [i]->data.klass, NULL)));
1130 #if PPC_PASS_STRUCTS_BY_VALUE
1132 int align_size = size;
1134 int rest = PPC_LAST_ARG_REG - gr + 1;
1137 align_size += (sizeof (gpointer) - 1);
1138 align_size &= ~(sizeof (gpointer) - 1);
1139 nregs = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1140 n_in_regs = MIN (rest, nregs);
1144 /* FIXME: check this */
1145 if (size >= 3 && size % 4 != 0)
1148 cinfo->args [n].regtype = RegTypeStructByVal;
1149 cinfo->args [n].vtregs = n_in_regs;
1150 cinfo->args [n].size = n_in_regs;
1151 cinfo->args [n].vtsize = nregs - n_in_regs;
1152 cinfo->args [n].reg = gr;
1154 #ifdef __mono_ppc64__
1155 if (nregs == 1 && is_pinvoke)
1156 cinfo->args [n].bytes = size;
1159 cinfo->args [n].bytes = 0;
1161 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1162 /*g_print ("offset for arg %d at %d\n", n, PPC_STACK_PARAM_OFFSET + stack_size);*/
1163 stack_size += nregs * sizeof (gpointer);
1166 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1167 cinfo->args [n].regtype = RegTypeStructByAddr;
1168 cinfo->args [n].vtsize = size;
1175 cinfo->args [n].size = 8;
1176 add_general (&gr, &stack_size, cinfo->args + n, SIZEOF_REGISTER == 8);
1180 cinfo->args [n].size = 4;
1182 /* It was 7, now it is 8 in LinuxPPC */
1183 if (fr <= PPC_LAST_FPARG_REG) {
1184 cinfo->args [n].regtype = RegTypeFP;
1185 cinfo->args [n].reg = fr;
1187 FP_ALSO_IN_REG (gr ++);
1188 ALWAYS_ON_STACK (stack_size += SIZEOF_REGISTER);
1190 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size + MONO_PPC_32_64_CASE (0, 4);
1191 cinfo->args [n].regtype = RegTypeBase;
1192 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1193 stack_size += SIZEOF_REGISTER;
1198 cinfo->args [n].size = 8;
1199 /* It was 7, now it is 8 in LinuxPPC */
1200 if (fr <= PPC_LAST_FPARG_REG) {
1201 cinfo->args [n].regtype = RegTypeFP;
1202 cinfo->args [n].reg = fr;
1204 FP_ALSO_IN_REG (gr += sizeof (double) / SIZEOF_REGISTER);
1205 ALWAYS_ON_STACK (stack_size += 8);
1207 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1208 cinfo->args [n].regtype = RegTypeBase;
1209 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1215 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1219 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1220 /* Prevent implicit arguments and sig_cookie from
1221 being passed in registers */
1222 gr = PPC_LAST_ARG_REG + 1;
1223 /* Emit the signature cookie just before the implicit arguments */
1224 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1228 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
1229 switch (simpletype->type) {
1230 case MONO_TYPE_BOOLEAN:
1235 case MONO_TYPE_CHAR:
1241 case MONO_TYPE_FNPTR:
1242 case MONO_TYPE_CLASS:
1243 case MONO_TYPE_OBJECT:
1244 case MONO_TYPE_SZARRAY:
1245 case MONO_TYPE_ARRAY:
1246 case MONO_TYPE_STRING:
1247 cinfo->ret.reg = ppc_r3;
1251 cinfo->ret.reg = ppc_r3;
1255 cinfo->ret.reg = ppc_f1;
1256 cinfo->ret.regtype = RegTypeFP;
1258 case MONO_TYPE_GENERICINST:
1259 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1260 cinfo->ret.reg = ppc_r3;
1264 case MONO_TYPE_VALUETYPE:
1266 case MONO_TYPE_TYPEDBYREF:
1267 case MONO_TYPE_VOID:
1270 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1274 /* align stack size to 16 */
1275 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1276 stack_size = (stack_size + 15) & ~15;
1278 cinfo->stack_usage = stack_size;
1283 allocate_tailcall_valuetype_addrs (MonoCompile *cfg)
1285 #if !PPC_PASS_STRUCTS_BY_VALUE
1286 MonoMethodSignature *sig = mono_method_signature (cfg->method);
1287 int num_structs = 0;
1290 if (!(cfg->flags & MONO_CFG_HAS_TAIL))
1293 for (i = 0; i < sig->param_count; ++i) {
1294 MonoType *type = mono_type_get_underlying_type (sig->params [i]);
1295 if (type->type == MONO_TYPE_VALUETYPE)
1300 cfg->tailcall_valuetype_addrs =
1301 mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * num_structs);
1302 for (i = 0; i < num_structs; ++i) {
1303 cfg->tailcall_valuetype_addrs [i] =
1304 mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1305 cfg->tailcall_valuetype_addrs [i]->flags |= MONO_INST_INDIRECT;
1312 * Set var information according to the calling convention. ppc version.
1313 * The locals var stuff should most likely be split in another method.
1316 mono_arch_allocate_vars (MonoCompile *m)
1318 MonoMethodSignature *sig;
1319 MonoMethodHeader *header;
1321 int i, offset, size, align, curinst;
1322 int frame_reg = ppc_sp;
1324 guint32 locals_stack_size, locals_stack_align;
1326 allocate_tailcall_valuetype_addrs (m);
1328 m->flags |= MONO_CFG_HAS_SPILLUP;
1330 /* allow room for the vararg method args: void* and long/double */
1331 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
1332 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1333 /* this is bug #60332: remove when #59509 is fixed, so no weird vararg
1334 * call convs needs to be handled this way.
1336 if (m->flags & MONO_CFG_HAS_VARARGS)
1337 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1338 /* gtk-sharp and other broken code will dllimport vararg functions even with
1339 * non-varargs signatures. Since there is little hope people will get this right
1340 * we assume they won't.
1342 if (m->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)
1343 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1348 * We use the frame register also for any method that has
1349 * exception clauses. This way, when the handlers are called,
1350 * the code will reference local variables using the frame reg instead of
1351 * the stack pointer: if we had to restore the stack pointer, we'd
1352 * corrupt the method frames that are already on the stack (since
1353 * filters get called before stack unwinding happens) when the filter
1354 * code would call any method (this also applies to finally etc.).
1356 if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1357 frame_reg = ppc_r31;
1358 m->frame_reg = frame_reg;
1359 if (frame_reg != ppc_sp) {
1360 m->used_int_regs |= 1 << frame_reg;
1363 sig = mono_method_signature (m->method);
1367 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1368 m->ret->opcode = OP_REGVAR;
1369 m->ret->inst_c0 = m->ret->dreg = ppc_r3;
1371 /* FIXME: handle long values? */
1372 switch (mini_type_get_underlying_type (m->generic_sharing_context, sig->ret)->type) {
1373 case MONO_TYPE_VOID:
1377 m->ret->opcode = OP_REGVAR;
1378 m->ret->inst_c0 = m->ret->dreg = ppc_f1;
1381 m->ret->opcode = OP_REGVAR;
1382 m->ret->inst_c0 = m->ret->dreg = ppc_r3;
1386 /* local vars are at a positive offset from the stack pointer */
1388 * also note that if the function uses alloca, we use ppc_r31
1389 * to point at the local variables.
1391 offset = PPC_MINIMAL_STACK_SIZE; /* linkage area */
1392 /* align the offset to 16 bytes: not sure this is needed here */
1394 //offset &= ~(16 - 1);
1396 /* add parameter area size for called functions */
1397 offset += m->param_area;
1399 offset &= ~(16 - 1);
1401 /* allow room to save the return value */
1402 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
1405 /* the MonoLMF structure is stored just below the stack pointer */
1408 /* this stuff should not be needed on ppc and the new jit,
1409 * because a call on ppc to the handlers doesn't change the
1410 * stack pointer and the jist doesn't manipulate the stack pointer
1411 * for operations involving valuetypes.
1413 /* reserve space to store the esp */
1414 offset += sizeof (gpointer);
1416 /* this is a global constant */
1417 mono_exc_esp_offset = offset;
1420 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1421 offset += sizeof(gpointer) - 1;
1422 offset &= ~(sizeof(gpointer) - 1);
1424 m->vret_addr->opcode = OP_REGOFFSET;
1425 m->vret_addr->inst_basereg = frame_reg;
1426 m->vret_addr->inst_offset = offset;
1428 if (G_UNLIKELY (m->verbose_level > 1)) {
1429 printf ("vret_addr =");
1430 mono_print_ins (m->vret_addr);
1433 offset += sizeof(gpointer);
1436 offsets = mono_allocate_stack_slots_full (m, FALSE, &locals_stack_size, &locals_stack_align);
1437 if (locals_stack_align) {
1438 offset += (locals_stack_align - 1);
1439 offset &= ~(locals_stack_align - 1);
1441 for (i = m->locals_start; i < m->num_varinfo; i++) {
1442 if (offsets [i] != -1) {
1443 MonoInst *inst = m->varinfo [i];
1444 inst->opcode = OP_REGOFFSET;
1445 inst->inst_basereg = frame_reg;
1446 inst->inst_offset = offset + offsets [i];
1448 g_print ("allocating local %d (%s) to %d\n",
1449 i, mono_type_get_name (inst->inst_vtype), inst->inst_offset);
1453 offset += locals_stack_size;
1457 inst = m->args [curinst];
1458 if (inst->opcode != OP_REGVAR) {
1459 inst->opcode = OP_REGOFFSET;
1460 inst->inst_basereg = frame_reg;
1461 offset += sizeof (gpointer) - 1;
1462 offset &= ~(sizeof (gpointer) - 1);
1463 inst->inst_offset = offset;
1464 offset += sizeof (gpointer);
1469 for (i = 0; i < sig->param_count; ++i) {
1470 inst = m->args [curinst];
1471 if (inst->opcode != OP_REGVAR) {
1472 inst->opcode = OP_REGOFFSET;
1473 inst->inst_basereg = frame_reg;
1475 size = mono_type_native_stack_size (sig->params [i], (guint32*)&align);
1476 inst->backend.is_pinvoke = 1;
1478 size = mono_type_size (sig->params [i], &align);
1480 if (MONO_TYPE_ISSTRUCT (sig->params [i]) && size < sizeof (gpointer))
1481 size = align = sizeof (gpointer);
1482 offset += align - 1;
1483 offset &= ~(align - 1);
1484 inst->inst_offset = offset;
1490 /* some storage for fp conversions */
1493 m->arch.fp_conv_var_offset = offset;
1496 /* align the offset to 16 bytes */
1498 offset &= ~(16 - 1);
1501 m->stack_offset = offset;
1503 if (sig->call_convention == MONO_CALL_VARARG) {
1504 CallInfo *cinfo = calculate_sizes (m->method->signature, m->method->signature->pinvoke);
1506 m->sig_cookie = cinfo->sig_cookie.offset;
1513 mono_arch_create_vars (MonoCompile *cfg)
1515 MonoMethodSignature *sig = mono_method_signature (cfg->method);
1517 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1518 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1522 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1523 * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
1527 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1529 int sig_reg = mono_alloc_ireg (cfg);
1531 /* FIXME: Add support for signature tokens to AOT */
1532 cfg->disable_aot = TRUE;
1534 MONO_EMIT_NEW_ICONST (cfg, sig_reg, (gulong)call->signature);
1535 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG,
1536 ppc_r1, cinfo->sig_cookie.offset, sig_reg);
1540 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1543 MonoMethodSignature *sig;
1547 sig = call->signature;
1548 n = sig->param_count + sig->hasthis;
1550 cinfo = calculate_sizes (sig, sig->pinvoke);
1552 for (i = 0; i < n; ++i) {
1553 ArgInfo *ainfo = cinfo->args + i;
1556 if (i >= sig->hasthis)
1557 t = sig->params [i - sig->hasthis];
1559 t = &mono_defaults.int_class->byval_arg;
1560 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
1562 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos))
1563 emit_sig_cookie (cfg, call, cinfo);
1565 in = call->args [i];
1567 if (ainfo->regtype == RegTypeGeneral) {
1568 #ifndef __mono_ppc64__
1569 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1570 MONO_INST_NEW (cfg, ins, OP_MOVE);
1571 ins->dreg = mono_alloc_ireg (cfg);
1572 ins->sreg1 = in->dreg + 1;
1573 MONO_ADD_INS (cfg->cbb, ins);
1574 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1576 MONO_INST_NEW (cfg, ins, OP_MOVE);
1577 ins->dreg = mono_alloc_ireg (cfg);
1578 ins->sreg1 = in->dreg + 2;
1579 MONO_ADD_INS (cfg->cbb, ins);
1580 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1584 MONO_INST_NEW (cfg, ins, OP_MOVE);
1585 ins->dreg = mono_alloc_ireg (cfg);
1586 ins->sreg1 = in->dreg;
1587 MONO_ADD_INS (cfg->cbb, ins);
1589 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1591 } else if (ainfo->regtype == RegTypeStructByAddr) {
1592 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1593 ins->opcode = OP_OUTARG_VT;
1594 ins->sreg1 = in->dreg;
1595 ins->klass = in->klass;
1596 ins->inst_p0 = call;
1597 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1598 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1599 MONO_ADD_INS (cfg->cbb, ins);
1600 } else if (ainfo->regtype == RegTypeStructByVal) {
1601 /* this is further handled in mono_arch_emit_outarg_vt () */
1602 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1603 ins->opcode = OP_OUTARG_VT;
1604 ins->sreg1 = in->dreg;
1605 ins->klass = in->klass;
1606 ins->inst_p0 = call;
1607 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1608 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1609 MONO_ADD_INS (cfg->cbb, ins);
1610 } else if (ainfo->regtype == RegTypeBase) {
1611 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1612 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1613 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1614 if (t->type == MONO_TYPE_R8)
1615 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1617 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1619 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1621 } else if (ainfo->regtype == RegTypeFP) {
1622 if (t->type == MONO_TYPE_VALUETYPE) {
1623 /* this is further handled in mono_arch_emit_outarg_vt () */
1624 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1625 ins->opcode = OP_OUTARG_VT;
1626 ins->sreg1 = in->dreg;
1627 ins->klass = in->klass;
1628 ins->inst_p0 = call;
1629 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1630 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1631 MONO_ADD_INS (cfg->cbb, ins);
1633 cfg->flags |= MONO_CFG_HAS_FPOUT;
1635 int dreg = mono_alloc_freg (cfg);
1637 if (ainfo->size == 4) {
1638 MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, dreg, in->dreg);
1640 MONO_INST_NEW (cfg, ins, OP_FMOVE);
1642 ins->sreg1 = in->dreg;
1643 MONO_ADD_INS (cfg->cbb, ins);
1646 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
1647 cfg->flags |= MONO_CFG_HAS_FPOUT;
1650 g_assert_not_reached ();
1654 /* Emit the signature cookie in the case that there is no
1655 additional argument */
1656 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1657 emit_sig_cookie (cfg, call, cinfo);
1659 if (cinfo->struct_ret) {
1662 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1663 vtarg->sreg1 = call->vret_var->dreg;
1664 vtarg->dreg = mono_alloc_preg (cfg);
1665 MONO_ADD_INS (cfg->cbb, vtarg);
1667 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->struct_ret, FALSE);
1670 call->stack_usage = cinfo->stack_usage;
1671 cfg->param_area = MAX (PPC_MINIMAL_PARAM_AREA_SIZE, MAX (cfg->param_area, cinfo->stack_usage));
1672 cfg->flags |= MONO_CFG_HAS_CALLS;
1678 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1680 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1681 ArgInfo *ainfo = ins->inst_p1;
1682 int ovf_size = ainfo->vtsize;
1683 int doffset = ainfo->offset;
1684 int i, soffset, dreg;
1686 if (ainfo->regtype == RegTypeStructByVal) {
1693 * Darwin pinvokes needs some special handling for 1
1694 * and 2 byte arguments
1696 g_assert (ins->klass);
1697 if (call->signature->pinvoke)
1698 size = mono_class_native_size (ins->klass, NULL);
1699 if (size == 2 || size == 1) {
1700 int tmpr = mono_alloc_ireg (cfg);
1702 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmpr, src->dreg, soffset);
1704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmpr, src->dreg, soffset);
1705 dreg = mono_alloc_ireg (cfg);
1706 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, dreg, tmpr);
1707 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, FALSE);
1710 for (i = 0; i < ainfo->vtregs; ++i) {
1711 int antipadding = 0;
1714 antipadding = sizeof (gpointer) - ainfo->bytes;
1716 dreg = mono_alloc_ireg (cfg);
1717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, dreg, dreg, antipadding * 8);
1720 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1721 soffset += sizeof (gpointer);
1724 mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1725 } else if (ainfo->regtype == RegTypeFP) {
1726 int tmpr = mono_alloc_freg (cfg);
1727 if (ainfo->size == 4)
1728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, 0);
1730 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, 0);
1731 dreg = mono_alloc_freg (cfg);
1732 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr);
1733 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
1735 MonoInst *vtcopy = mono_compile_create_var (cfg, &src->klass->byval_arg, OP_LOCAL);
1739 /* FIXME: alignment? */
1740 if (call->signature->pinvoke) {
1741 size = mono_type_native_stack_size (&src->klass->byval_arg, NULL);
1742 vtcopy->backend.is_pinvoke = 1;
1744 size = mini_type_stack_size (cfg->generic_sharing_context, &src->klass->byval_arg, NULL);
1747 g_assert (ovf_size > 0);
1749 EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype);
1750 mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, 0);
1753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, load->dreg);
1755 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE);
1760 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1762 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context,
1763 mono_method_signature (method)->ret);
1766 #ifndef __mono_ppc64__
1767 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1770 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1771 ins->sreg1 = val->dreg + 1;
1772 ins->sreg2 = val->dreg + 2;
1773 MONO_ADD_INS (cfg->cbb, ins);
1777 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1778 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1782 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1785 /* FIXME: this is just a useless hint: fix the interface to include the opcode */
1787 mono_arch_is_inst_imm (gint64 imm)
1793 * Allow tracing to work with this interface (with an optional argument)
1797 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1801 ppc_load_ptr (code, ppc_r3, cfg->method);
1802 ppc_li (code, ppc_r4, 0); /* NULL ebp for now */
1803 ppc_load_func (code, ppc_r0, func);
1804 ppc_mtlr (code, ppc_r0);
1818 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1821 int save_mode = SAVE_NONE;
1823 MonoMethod *method = cfg->method;
1824 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context,
1825 mono_method_signature (method)->ret)->type;
1826 int save_offset = PPC_STACK_PARAM_OFFSET + cfg->param_area;
1830 offset = code - cfg->native_code;
1831 /* we need about 16 instructions */
1832 if (offset > (cfg->code_size - 16 * 4)) {
1833 cfg->code_size *= 2;
1834 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1835 code = cfg->native_code + offset;
1839 case MONO_TYPE_VOID:
1840 /* special case string .ctor icall */
1841 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1842 save_mode = SAVE_ONE;
1844 save_mode = SAVE_NONE;
1846 #ifndef __mono_ppc64__
1849 save_mode = SAVE_TWO;
1854 save_mode = SAVE_FP;
1856 case MONO_TYPE_VALUETYPE:
1857 save_mode = SAVE_STRUCT;
1860 save_mode = SAVE_ONE;
1864 switch (save_mode) {
1866 ppc_stw (code, ppc_r3, save_offset, cfg->frame_reg);
1867 ppc_stw (code, ppc_r4, save_offset + 4, cfg->frame_reg);
1868 if (enable_arguments) {
1869 ppc_mr (code, ppc_r5, ppc_r4);
1870 ppc_mr (code, ppc_r4, ppc_r3);
1874 ppc_stptr (code, ppc_r3, save_offset, cfg->frame_reg);
1875 if (enable_arguments) {
1876 ppc_mr (code, ppc_r4, ppc_r3);
1880 ppc_stfd (code, ppc_f1, save_offset, cfg->frame_reg);
1881 if (enable_arguments) {
1882 /* FIXME: what reg? */
1883 ppc_fmr (code, ppc_f3, ppc_f1);
1884 /* FIXME: use 8 byte load on PPC64 */
1885 ppc_lwz (code, ppc_r4, save_offset, cfg->frame_reg);
1886 ppc_lwz (code, ppc_r5, save_offset + 4, cfg->frame_reg);
1890 if (enable_arguments) {
1891 /* FIXME: get the actual address */
1892 ppc_mr (code, ppc_r4, ppc_r3);
1900 ppc_load_ptr (code, ppc_r3, cfg->method);
1901 ppc_load_func (code, ppc_r0, func);
1902 ppc_mtlr (code, ppc_r0);
1905 switch (save_mode) {
1907 ppc_lwz (code, ppc_r3, save_offset, cfg->frame_reg);
1908 ppc_lwz (code, ppc_r4, save_offset + 4, cfg->frame_reg);
1911 ppc_ldptr (code, ppc_r3, save_offset, cfg->frame_reg);
1914 ppc_lfd (code, ppc_f1, save_offset, cfg->frame_reg);
1924 * Conditional branches have a small offset, so if it is likely overflowed,
1925 * we do a branch to the end of the method (uncond branches have much larger
1926 * offsets) where we perform the conditional and jump back unconditionally.
1927 * It's slightly slower, since we add two uncond branches, but it's very simple
1928 * with the current patch implementation and such large methods are likely not
1929 * going to be perf critical anyway.
1934 const char *exception;
1941 #define EMIT_COND_BRANCH_FLAGS(ins,b0,b1) \
1942 if (0 && ins->inst_true_bb->native_offset) { \
1943 ppc_bc (code, (b0), (b1), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffff); \
1945 int br_disp = ins->inst_true_bb->max_offset - offset; \
1946 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
1947 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1948 ovfj->data.bb = ins->inst_true_bb; \
1949 ovfj->ip_offset = 0; \
1950 ovfj->b0_cond = (b0); \
1951 ovfj->b1_cond = (b1); \
1952 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB_OVF, ovfj); \
1955 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1956 ppc_bc (code, (b0), (b1), 0); \
1960 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_b0_table [(cond)], branch_b1_table [(cond)])
1962 /* emit an exception if condition is fail
1964 * We assign the extra code used to throw the implicit exceptions
1965 * to cfg->bb_exit as far as the big branch handling is concerned
1967 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(b0,b1,exc_name) \
1969 int br_disp = cfg->bb_exit->max_offset - offset; \
1970 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
1971 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1972 ovfj->data.exception = (exc_name); \
1973 ovfj->ip_offset = code - cfg->native_code; \
1974 ovfj->b0_cond = (b0); \
1975 ovfj->b1_cond = (b1); \
1976 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1978 cfg->bb_exit->max_offset += 24; \
1980 mono_add_patch_info (cfg, code - cfg->native_code, \
1981 MONO_PATCH_INFO_EXC, exc_name); \
1982 ppc_bcl (code, (b0), (b1), 0); \
1986 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1989 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1994 normalize_opcode (int opcode)
1997 #ifndef __mono_ilp32__
1998 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMBASE, OP_LOADI8_MEMBASE):
1999 return OP_LOAD_MEMBASE;
2000 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMINDEX, OP_LOADI8_MEMINDEX):
2001 return OP_LOAD_MEMINDEX;
2002 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_REG, OP_STOREI8_MEMBASE_REG):
2003 return OP_STORE_MEMBASE_REG;
2004 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_IMM, OP_STOREI8_MEMBASE_IMM):
2005 return OP_STORE_MEMBASE_IMM;
2006 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMINDEX, OP_STOREI8_MEMINDEX):
2007 return OP_STORE_MEMINDEX;
2009 case MONO_PPC_32_64_CASE (OP_ISHR_IMM, OP_LSHR_IMM):
2011 case MONO_PPC_32_64_CASE (OP_ISHR_UN_IMM, OP_LSHR_UN_IMM):
2012 return OP_SHR_UN_IMM;
2019 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2021 MonoInst *ins, *n, *last_ins = NULL;
2023 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2024 switch (normalize_opcode (ins->opcode)) {
2026 /* remove unnecessary multiplication with 1 */
2027 if (ins->inst_imm == 1) {
2028 if (ins->dreg != ins->sreg1) {
2029 ins->opcode = OP_MOVE;
2031 MONO_DELETE_INS (bb, ins);
2035 int power2 = mono_is_power_of_two (ins->inst_imm);
2037 ins->opcode = OP_SHL_IMM;
2038 ins->inst_imm = power2;
2042 case OP_LOAD_MEMBASE:
2044 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2045 * OP_LOAD_MEMBASE offset(basereg), reg
2047 if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_REG &&
2048 ins->inst_basereg == last_ins->inst_destbasereg &&
2049 ins->inst_offset == last_ins->inst_offset) {
2050 if (ins->dreg == last_ins->sreg1) {
2051 MONO_DELETE_INS (bb, ins);
2054 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2055 ins->opcode = OP_MOVE;
2056 ins->sreg1 = last_ins->sreg1;
2060 * Note: reg1 must be different from the basereg in the second load
2061 * OP_LOAD_MEMBASE offset(basereg), reg1
2062 * OP_LOAD_MEMBASE offset(basereg), reg2
2064 * OP_LOAD_MEMBASE offset(basereg), reg1
2065 * OP_MOVE reg1, reg2
2067 } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_LOAD_MEMBASE &&
2068 ins->inst_basereg != last_ins->dreg &&
2069 ins->inst_basereg == last_ins->inst_basereg &&
2070 ins->inst_offset == last_ins->inst_offset) {
2072 if (ins->dreg == last_ins->dreg) {
2073 MONO_DELETE_INS (bb, ins);
2076 ins->opcode = OP_MOVE;
2077 ins->sreg1 = last_ins->dreg;
2080 //g_assert_not_reached ();
2084 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2085 * OP_LOAD_MEMBASE offset(basereg), reg
2087 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2088 * OP_ICONST reg, imm
2090 } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_IMM &&
2091 ins->inst_basereg == last_ins->inst_destbasereg &&
2092 ins->inst_offset == last_ins->inst_offset) {
2093 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2094 ins->opcode = OP_ICONST;
2095 ins->inst_c0 = last_ins->inst_imm;
2096 g_assert_not_reached (); // check this rule
2100 case OP_LOADU1_MEMBASE:
2101 case OP_LOADI1_MEMBASE:
2102 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2103 ins->inst_basereg == last_ins->inst_destbasereg &&
2104 ins->inst_offset == last_ins->inst_offset) {
2105 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2106 ins->sreg1 = last_ins->sreg1;
2109 case OP_LOADU2_MEMBASE:
2110 case OP_LOADI2_MEMBASE:
2111 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2112 ins->inst_basereg == last_ins->inst_destbasereg &&
2113 ins->inst_offset == last_ins->inst_offset) {
2114 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2115 ins->sreg1 = last_ins->sreg1;
2118 #ifdef __mono_ppc64__
2119 case OP_LOADU4_MEMBASE:
2120 case OP_LOADI4_MEMBASE:
2121 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
2122 ins->inst_basereg == last_ins->inst_destbasereg &&
2123 ins->inst_offset == last_ins->inst_offset) {
2124 ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4;
2125 ins->sreg1 = last_ins->sreg1;
2130 ins->opcode = OP_MOVE;
2134 if (ins->dreg == ins->sreg1) {
2135 MONO_DELETE_INS (bb, ins);
2139 * OP_MOVE sreg, dreg
2140 * OP_MOVE dreg, sreg
2142 if (last_ins && last_ins->opcode == OP_MOVE &&
2143 ins->sreg1 == last_ins->dreg &&
2144 ins->dreg == last_ins->sreg1) {
2145 MONO_DELETE_INS (bb, ins);
2153 bb->last_ins = last_ins;
2157 mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
2159 switch (ins->opcode) {
2160 case OP_ICONV_TO_R_UN: {
2161 #if G_BYTE_ORDER == G_BIG_ENDIAN
2162 static const guint64 adjust_val = 0x4330000000000000ULL;
2164 static const guint64 adjust_val = 0x0000000000003043ULL;
2166 int msw_reg = mono_alloc_ireg (cfg);
2167 int adj_reg = mono_alloc_freg (cfg);
2168 int tmp_reg = mono_alloc_freg (cfg);
2169 int basereg = ppc_sp;
2171 MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000);
2172 if (!ppc_is_imm16 (offset + 4)) {
2173 basereg = mono_alloc_ireg (cfg);
2174 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2176 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg);
2177 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, ins->sreg1);
2178 MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, &adjust_val);
2179 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset);
2180 MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg);
2181 ins->opcode = OP_NOP;
2184 #ifndef __mono_ppc64__
2185 case OP_ICONV_TO_R4:
2186 case OP_ICONV_TO_R8: {
2187 /* If we have a PPC_FEATURE_64 machine we can avoid
2188 this and use the fcfid instruction. Otherwise
2189 on an old 32-bit chip and we have to do this the
2191 if (!(cpu_hw_caps & PPC_ISA_64)) {
2192 /* FIXME: change precision for CEE_CONV_R4 */
2193 static const guint64 adjust_val = 0x4330000080000000ULL;
2194 int msw_reg = mono_alloc_ireg (cfg);
2195 int xored = mono_alloc_ireg (cfg);
2196 int adj_reg = mono_alloc_freg (cfg);
2197 int tmp_reg = mono_alloc_freg (cfg);
2198 int basereg = ppc_sp;
2200 if (!ppc_is_imm16 (offset + 4)) {
2201 basereg = mono_alloc_ireg (cfg);
2202 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2204 MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000);
2205 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg);
2206 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_XOR_IMM, xored, ins->sreg1, 0x80000000);
2207 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, xored);
2208 MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, (gpointer)&adjust_val);
2209 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset);
2210 MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg);
2211 if (ins->opcode == OP_ICONV_TO_R4)
2212 MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, ins->dreg, ins->dreg);
2213 ins->opcode = OP_NOP;
2219 int msw_reg = mono_alloc_ireg (cfg);
2220 int basereg = ppc_sp;
2222 if (!ppc_is_imm16 (offset + 4)) {
2223 basereg = mono_alloc_ireg (cfg);
2224 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2226 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, basereg, offset, ins->sreg1);
2227 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset);
2228 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_FINITE, -1, msw_reg);
2229 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
2230 ins->opcode = OP_NOP;
2233 #ifdef __mono_ppc64__
2235 case OP_IADD_OVF_UN:
2237 int shifted1_reg = mono_alloc_ireg (cfg);
2238 int shifted2_reg = mono_alloc_ireg (cfg);
2239 int result_shifted_reg = mono_alloc_ireg (cfg);
2241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted1_reg, ins->sreg1, 32);
2242 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted2_reg, ins->sreg2, 32);
2243 MONO_EMIT_NEW_BIALU (cfg, ins->opcode, result_shifted_reg, shifted1_reg, shifted2_reg);
2244 if (ins->opcode == OP_IADD_OVF_UN)
2245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, ins->dreg, result_shifted_reg, 32);
2247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, ins->dreg, result_shifted_reg, 32);
2248 ins->opcode = OP_NOP;
2255 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
2257 switch (ins->opcode) {
2259 /* ADC sets the condition code */
2260 MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
2261 MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
2264 case OP_LADD_OVF_UN:
2265 /* ADC sets the condition code */
2266 MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
2267 MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_UN_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
2271 /* SBB sets the condition code */
2272 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
2273 MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
2276 case OP_LSUB_OVF_UN:
2277 /* SBB sets the condition code */
2278 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
2279 MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_UN_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
2283 /* From gcc generated code */
2284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PPC_SUBFIC, ins->dreg + 1, ins->sreg1 + 1, 0);
2285 MONO_EMIT_NEW_UNALU (cfg, OP_PPC_SUBFZE, ins->dreg + 2, ins->sreg1 + 2);
2294 * the branch_b0_table should maintain the order of these
2308 branch_b0_table [] = {
2323 branch_b1_table [] = {
2337 #define NEW_INS(cfg,dest,op) do { \
2338 MONO_INST_NEW((cfg), (dest), (op)); \
2339 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
2343 map_to_reg_reg_op (int op)
2352 case OP_COMPARE_IMM:
2354 case OP_ICOMPARE_IMM:
2356 case OP_LCOMPARE_IMM:
2372 case OP_LOAD_MEMBASE:
2373 return OP_LOAD_MEMINDEX;
2374 case OP_LOADI4_MEMBASE:
2375 return OP_LOADI4_MEMINDEX;
2376 case OP_LOADU4_MEMBASE:
2377 return OP_LOADU4_MEMINDEX;
2378 case OP_LOADI8_MEMBASE:
2379 return OP_LOADI8_MEMINDEX;
2380 case OP_LOADU1_MEMBASE:
2381 return OP_LOADU1_MEMINDEX;
2382 case OP_LOADI2_MEMBASE:
2383 return OP_LOADI2_MEMINDEX;
2384 case OP_LOADU2_MEMBASE:
2385 return OP_LOADU2_MEMINDEX;
2386 case OP_LOADI1_MEMBASE:
2387 return OP_LOADI1_MEMINDEX;
2388 case OP_LOADR4_MEMBASE:
2389 return OP_LOADR4_MEMINDEX;
2390 case OP_LOADR8_MEMBASE:
2391 return OP_LOADR8_MEMINDEX;
2392 case OP_STOREI1_MEMBASE_REG:
2393 return OP_STOREI1_MEMINDEX;
2394 case OP_STOREI2_MEMBASE_REG:
2395 return OP_STOREI2_MEMINDEX;
2396 case OP_STOREI4_MEMBASE_REG:
2397 return OP_STOREI4_MEMINDEX;
2398 case OP_STOREI8_MEMBASE_REG:
2399 return OP_STOREI8_MEMINDEX;
2400 case OP_STORE_MEMBASE_REG:
2401 return OP_STORE_MEMINDEX;
2402 case OP_STORER4_MEMBASE_REG:
2403 return OP_STORER4_MEMINDEX;
2404 case OP_STORER8_MEMBASE_REG:
2405 return OP_STORER8_MEMINDEX;
2406 case OP_STORE_MEMBASE_IMM:
2407 return OP_STORE_MEMBASE_REG;
2408 case OP_STOREI1_MEMBASE_IMM:
2409 return OP_STOREI1_MEMBASE_REG;
2410 case OP_STOREI2_MEMBASE_IMM:
2411 return OP_STOREI2_MEMBASE_REG;
2412 case OP_STOREI4_MEMBASE_IMM:
2413 return OP_STOREI4_MEMBASE_REG;
2414 case OP_STOREI8_MEMBASE_IMM:
2415 return OP_STOREI8_MEMBASE_REG;
2417 return mono_op_imm_to_op (op);
2420 //#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op))
2422 #define compare_opcode_is_unsigned(opcode) \
2423 (((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \
2424 ((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \
2425 ((opcode) >= OP_LBNE_UN && (opcode) <= OP_LBLT_UN) || \
2426 ((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \
2427 ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \
2428 ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || \
2429 (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN || \
2430 (opcode) == OP_LCLT_UN || (opcode) == OP_LCGT_UN))
2433 * Remove from the instruction list the instructions that can't be
2434 * represented with very simple instructions with no register
2438 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2440 MonoInst *ins, *next, *temp, *last_ins = NULL;
2443 MONO_BB_FOR_EACH_INS (bb, ins) {
2445 switch (ins->opcode) {
2446 case OP_IDIV_UN_IMM:
2449 case OP_IREM_UN_IMM:
2450 NEW_INS (cfg, temp, OP_ICONST);
2451 temp->inst_c0 = ins->inst_imm;
2452 temp->dreg = mono_alloc_ireg (cfg);
2453 ins->sreg2 = temp->dreg;
2454 if (ins->opcode == OP_IDIV_IMM)
2455 ins->opcode = OP_IDIV;
2456 else if (ins->opcode == OP_IREM_IMM)
2457 ins->opcode = OP_IREM;
2458 else if (ins->opcode == OP_IDIV_UN_IMM)
2459 ins->opcode = OP_IDIV_UN;
2460 else if (ins->opcode == OP_IREM_UN_IMM)
2461 ins->opcode = OP_IREM_UN;
2463 /* handle rem separately */
2467 CASE_PPC64 (OP_LREM)
2468 CASE_PPC64 (OP_LREM_UN) {
2470 /* we change a rem dest, src1, src2 to
2471 * div temp1, src1, src2
2472 * mul temp2, temp1, src2
2473 * sub dest, src1, temp2
2475 if (ins->opcode == OP_IREM || ins->opcode == OP_IREM_UN) {
2476 NEW_INS (cfg, mul, OP_IMUL);
2477 NEW_INS (cfg, temp, ins->opcode == OP_IREM? OP_IDIV: OP_IDIV_UN);
2478 ins->opcode = OP_ISUB;
2480 NEW_INS (cfg, mul, OP_LMUL);
2481 NEW_INS (cfg, temp, ins->opcode == OP_LREM? OP_LDIV: OP_LDIV_UN);
2482 ins->opcode = OP_LSUB;
2484 temp->sreg1 = ins->sreg1;
2485 temp->sreg2 = ins->sreg2;
2486 temp->dreg = mono_alloc_ireg (cfg);
2487 mul->sreg1 = temp->dreg;
2488 mul->sreg2 = ins->sreg2;
2489 mul->dreg = mono_alloc_ireg (cfg);
2490 ins->sreg2 = mul->dreg;
2494 CASE_PPC64 (OP_LADD_IMM)
2497 if (!ppc_is_imm16 (ins->inst_imm)) {
2498 NEW_INS (cfg, temp, OP_ICONST);
2499 temp->inst_c0 = ins->inst_imm;
2500 temp->dreg = mono_alloc_ireg (cfg);
2501 ins->sreg2 = temp->dreg;
2502 ins->opcode = map_to_reg_reg_op (ins->opcode);
2506 CASE_PPC64 (OP_LSUB_IMM)
2508 if (!ppc_is_imm16 (-ins->inst_imm)) {
2509 NEW_INS (cfg, temp, OP_ICONST);
2510 temp->inst_c0 = ins->inst_imm;
2511 temp->dreg = mono_alloc_ireg (cfg);
2512 ins->sreg2 = temp->dreg;
2513 ins->opcode = map_to_reg_reg_op (ins->opcode);
2525 gboolean is_imm = ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff));
2526 #ifdef __mono_ppc64__
2527 if (ins->inst_imm & 0xffffffff00000000ULL)
2531 NEW_INS (cfg, temp, OP_ICONST);
2532 temp->inst_c0 = ins->inst_imm;
2533 temp->dreg = mono_alloc_ireg (cfg);
2534 ins->sreg2 = temp->dreg;
2535 ins->opcode = map_to_reg_reg_op (ins->opcode);
2544 NEW_INS (cfg, temp, OP_ICONST);
2545 temp->inst_c0 = ins->inst_imm;
2546 temp->dreg = mono_alloc_ireg (cfg);
2547 ins->sreg2 = temp->dreg;
2548 ins->opcode = map_to_reg_reg_op (ins->opcode);
2550 case OP_COMPARE_IMM:
2551 case OP_ICOMPARE_IMM:
2552 CASE_PPC64 (OP_LCOMPARE_IMM)
2554 /* Branch opts can eliminate the branch */
2555 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
2556 ins->opcode = OP_NOP;
2560 if (compare_opcode_is_unsigned (next->opcode)) {
2561 if (!ppc_is_uimm16 (ins->inst_imm)) {
2562 NEW_INS (cfg, temp, OP_ICONST);
2563 temp->inst_c0 = ins->inst_imm;
2564 temp->dreg = mono_alloc_ireg (cfg);
2565 ins->sreg2 = temp->dreg;
2566 ins->opcode = map_to_reg_reg_op (ins->opcode);
2569 if (!ppc_is_imm16 (ins->inst_imm)) {
2570 NEW_INS (cfg, temp, OP_ICONST);
2571 temp->inst_c0 = ins->inst_imm;
2572 temp->dreg = mono_alloc_ireg (cfg);
2573 ins->sreg2 = temp->dreg;
2574 ins->opcode = map_to_reg_reg_op (ins->opcode);
2580 if (ins->inst_imm == 1) {
2581 ins->opcode = OP_MOVE;
2584 if (ins->inst_imm == 0) {
2585 ins->opcode = OP_ICONST;
2589 imm = mono_is_power_of_two (ins->inst_imm);
2591 ins->opcode = OP_SHL_IMM;
2592 ins->inst_imm = imm;
2595 if (!ppc_is_imm16 (ins->inst_imm)) {
2596 NEW_INS (cfg, temp, OP_ICONST);
2597 temp->inst_c0 = ins->inst_imm;
2598 temp->dreg = mono_alloc_ireg (cfg);
2599 ins->sreg2 = temp->dreg;
2600 ins->opcode = map_to_reg_reg_op (ins->opcode);
2603 case OP_LOCALLOC_IMM:
2604 NEW_INS (cfg, temp, OP_ICONST);
2605 temp->inst_c0 = ins->inst_imm;
2606 temp->dreg = mono_alloc_ireg (cfg);
2607 ins->sreg1 = temp->dreg;
2608 ins->opcode = OP_LOCALLOC;
2610 case OP_LOAD_MEMBASE:
2611 case OP_LOADI4_MEMBASE:
2612 CASE_PPC64 (OP_LOADI8_MEMBASE)
2613 case OP_LOADU4_MEMBASE:
2614 case OP_LOADI2_MEMBASE:
2615 case OP_LOADU2_MEMBASE:
2616 case OP_LOADI1_MEMBASE:
2617 case OP_LOADU1_MEMBASE:
2618 case OP_LOADR4_MEMBASE:
2619 case OP_LOADR8_MEMBASE:
2620 case OP_STORE_MEMBASE_REG:
2621 CASE_PPC64 (OP_STOREI8_MEMBASE_REG)
2622 case OP_STOREI4_MEMBASE_REG:
2623 case OP_STOREI2_MEMBASE_REG:
2624 case OP_STOREI1_MEMBASE_REG:
2625 case OP_STORER4_MEMBASE_REG:
2626 case OP_STORER8_MEMBASE_REG:
2627 /* we can do two things: load the immed in a register
2628 * and use an indexed load, or see if the immed can be
2629 * represented as an ad_imm + a load with a smaller offset
2630 * that fits. We just do the first for now, optimize later.
2632 if (ppc_is_imm16 (ins->inst_offset))
2634 NEW_INS (cfg, temp, OP_ICONST);
2635 temp->inst_c0 = ins->inst_offset;
2636 temp->dreg = mono_alloc_ireg (cfg);
2637 ins->sreg2 = temp->dreg;
2638 ins->opcode = map_to_reg_reg_op (ins->opcode);
2640 case OP_STORE_MEMBASE_IMM:
2641 case OP_STOREI1_MEMBASE_IMM:
2642 case OP_STOREI2_MEMBASE_IMM:
2643 case OP_STOREI4_MEMBASE_IMM:
2644 CASE_PPC64 (OP_STOREI8_MEMBASE_IMM)
2645 NEW_INS (cfg, temp, OP_ICONST);
2646 temp->inst_c0 = ins->inst_imm;
2647 temp->dreg = mono_alloc_ireg (cfg);
2648 ins->sreg1 = temp->dreg;
2649 ins->opcode = map_to_reg_reg_op (ins->opcode);
2651 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2654 if (cfg->compile_aot) {
2655 /* Keep these in the aot case */
2658 NEW_INS (cfg, temp, OP_ICONST);
2659 temp->inst_c0 = (gulong)ins->inst_p0;
2660 temp->dreg = mono_alloc_ireg (cfg);
2661 ins->inst_basereg = temp->dreg;
2662 ins->inst_offset = 0;
2663 ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE;
2665 /* make it handle the possibly big ins->inst_offset
2666 * later optimize to use lis + load_membase
2672 bb->last_ins = last_ins;
2673 bb->max_vreg = cfg->next_vreg;
2677 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2679 long offset = cfg->arch.fp_conv_var_offset;
2681 /* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */
2682 #ifdef __mono_ppc64__
2684 ppc_fctidz (code, ppc_f0, sreg);
2689 ppc_fctiwz (code, ppc_f0, sreg);
2692 if (ppc_is_imm16 (offset + sub_offset)) {
2693 ppc_stfd (code, ppc_f0, offset, cfg->frame_reg);
2695 ppc_ldr (code, dreg, offset + sub_offset, cfg->frame_reg);
2697 ppc_lwz (code, dreg, offset + sub_offset, cfg->frame_reg);
2699 ppc_load (code, dreg, offset);
2700 ppc_add (code, dreg, dreg, cfg->frame_reg);
2701 ppc_stfd (code, ppc_f0, 0, dreg);
2703 ppc_ldr (code, dreg, sub_offset, dreg);
2705 ppc_lwz (code, dreg, sub_offset, dreg);
2709 ppc_andid (code, dreg, dreg, 0xff);
2711 ppc_andid (code, dreg, dreg, 0xffff);
2712 #ifdef __mono_ppc64__
2714 ppc_clrldi (code, dreg, dreg, 32);
2718 ppc_extsb (code, dreg, dreg);
2720 ppc_extsh (code, dreg, dreg);
2721 #ifdef __mono_ppc64__
2723 ppc_extsw (code, dreg, dreg);
2731 const guchar *target;
2736 #define is_call_imm(diff) ((glong)(diff) >= -33554432 && (glong)(diff) <= 33554431)
2739 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2740 #ifdef __mono_ppc64__
2741 g_assert_not_reached ();
2743 PatchData *pdata = (PatchData*)user_data;
2744 guchar *code = data;
2745 guint32 *thunks = data;
2746 guint32 *endthunks = (guint32*)(code + bsize);
2750 int difflow, diffhigh;
2752 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2753 difflow = (char*)pdata->code - (char*)thunks;
2754 diffhigh = (char*)pdata->code - (char*)endthunks;
2755 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2758 templ = (guchar*)load;
2759 ppc_load_sequence (templ, ppc_r0, pdata->target);
2761 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2762 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2763 while (thunks < endthunks) {
2764 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2765 if ((thunks [0] == load [0]) && (thunks [1] == load [1])) {
2766 ppc_patch (pdata->code, (guchar*)thunks);
2769 static int num_thunks = 0;
2771 if ((num_thunks % 20) == 0)
2772 g_print ("num_thunks lookup: %d\n", num_thunks);
2775 } else if ((thunks [0] == 0) && (thunks [1] == 0)) {
2776 /* found a free slot instead: emit thunk */
2777 code = (guchar*)thunks;
2778 ppc_lis (code, ppc_r0, (gulong)(pdata->target) >> 16);
2779 ppc_ori (code, ppc_r0, ppc_r0, (gulong)(pdata->target) & 0xffff);
2780 ppc_mtctr (code, ppc_r0);
2781 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
2782 mono_arch_flush_icache ((guchar*)thunks, 16);
2784 ppc_patch (pdata->code, (guchar*)thunks);
2787 static int num_thunks = 0;
2789 if ((num_thunks % 20) == 0)
2790 g_print ("num_thunks: %d\n", num_thunks);
2794 /* skip 16 bytes, the size of the thunk */
2798 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2805 handle_thunk (int absolute, guchar *code, const guchar *target) {
2806 MonoDomain *domain = mono_domain_get ();
2810 pdata.target = target;
2811 pdata.absolute = absolute;
2814 mono_domain_lock (domain);
2815 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2818 /* this uses the first available slot */
2820 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2822 mono_domain_unlock (domain);
2824 if (pdata.found != 1)
2825 g_print ("thunk failed for %p from %p\n", target, code);
2826 g_assert (pdata.found == 1);
2830 patch_ins (guint8 *code, guint32 ins)
2832 *(guint32*)code = GUINT32_TO_BE (ins);
2833 mono_arch_flush_icache (code, 4);
2837 ppc_patch_full (guchar *code, const guchar *target, gboolean is_fd)
2839 guint32 ins = GUINT32_FROM_BE (*(guint32*)code);
2840 guint32 prim = ins >> 26;
2843 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2845 // prefer relative branches, they are more position independent (e.g. for AOT compilation).
2846 gint diff = target - code;
2849 if (diff <= 33554431){
2850 ins = (18 << 26) | (diff) | (ins & 1);
2851 patch_ins (code, ins);
2855 /* diff between 0 and -33554432 */
2856 if (diff >= -33554432){
2857 ins = (18 << 26) | (diff & ~0xfc000000) | (ins & 1);
2858 patch_ins (code, ins);
2863 if ((glong)target >= 0){
2864 if ((glong)target <= 33554431){
2865 ins = (18 << 26) | ((gulong) target) | (ins & 1) | 2;
2866 patch_ins (code, ins);
2870 if ((glong)target >= -33554432){
2871 ins = (18 << 26) | (((gulong)target) & ~0xfc000000) | (ins & 1) | 2;
2872 patch_ins (code, ins);
2877 handle_thunk (TRUE, code, target);
2880 g_assert_not_reached ();
2888 guint32 li = (gulong)target;
2889 ins = (ins & 0xffff0000) | (ins & 3);
2890 ovf = li & 0xffff0000;
2891 if (ovf != 0 && ovf != 0xffff0000)
2892 g_assert_not_reached ();
2895 // FIXME: assert the top bits of li are 0
2897 gint diff = target - code;
2898 ins = (ins & 0xffff0000) | (ins & 3);
2899 ovf = diff & 0xffff0000;
2900 if (ovf != 0 && ovf != 0xffff0000)
2901 g_assert_not_reached ();
2905 patch_ins (code, ins);
2909 if (prim == 15 || ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
2910 #ifdef __mono_ppc64__
2911 guint32 *seq = (guint32*)code;
2912 guint32 *branch_ins;
2914 /* the trampoline code will try to patch the blrl, blr, bcctr */
2915 if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
2917 if (ppc_opcode (seq [-3]) == 58 || ppc_opcode (seq [-3]) == 31) /* ld || mr */
2922 if (ppc_opcode (seq [5]) == 58 || ppc_opcode (seq [5]) == 31) /* ld || mr */
2923 branch_ins = seq + 8;
2925 branch_ins = seq + 6;
2928 seq = (guint32*)code;
2929 /* this is the lis/ori/sldi/oris/ori/(ld/ld|mr/nop)/mtlr/blrl sequence */
2930 g_assert (mono_ppc_is_direct_call_sequence (branch_ins));
2932 if (ppc_opcode (seq [5]) == 58) { /* ld */
2933 g_assert (ppc_opcode (seq [6]) == 58); /* ld */
2936 guint8 *buf = (guint8*)&seq [5];
2937 ppc_mr (buf, ppc_r0, ppc_r11);
2942 target = mono_get_addr_from_ftnptr ((gpointer)target);
2945 /* FIXME: make this thread safe */
2946 /* FIXME: we're assuming we're using r11 here */
2947 ppc_load_ptr_sequence (code, ppc_r11, target);
2948 mono_arch_flush_icache ((guint8*)seq, 28);
2951 /* the trampoline code will try to patch the blrl, blr, bcctr */
2952 if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
2955 /* this is the lis/ori/mtlr/blrl sequence */
2956 seq = (guint32*)code;
2957 g_assert ((seq [0] >> 26) == 15);
2958 g_assert ((seq [1] >> 26) == 24);
2959 g_assert ((seq [2] >> 26) == 31);
2960 g_assert (seq [3] == 0x4e800021 || seq [3] == 0x4e800020 || seq [3] == 0x4e800420);
2961 /* FIXME: make this thread safe */
2962 ppc_lis (code, ppc_r0, (guint32)(target) >> 16);
2963 ppc_ori (code, ppc_r0, ppc_r0, (guint32)(target) & 0xffff);
2964 mono_arch_flush_icache (code - 8, 8);
2967 g_assert_not_reached ();
2969 // g_print ("patched with 0x%08x\n", ins);
2973 ppc_patch (guchar *code, const guchar *target)
2975 ppc_patch_full (code, target, FALSE);
2979 mono_ppc_patch (guchar *code, const guchar *target)
2981 ppc_patch (code, target);
2985 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
2987 switch (ins->opcode) {
2990 case OP_FCALL_MEMBASE:
2991 if (ins->dreg != ppc_f1)
2992 ppc_fmr (code, ins->dreg, ppc_f1);
3000 * emit_load_volatile_arguments:
3002 * Load volatile arguments from the stack to the original input registers.
3003 * Required before a tail call.
3006 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3008 MonoMethod *method = cfg->method;
3009 MonoMethodSignature *sig;
3013 int struct_index = 0;
3015 sig = mono_method_signature (method);
3017 /* This is the opposite of the code in emit_prolog */
3021 cinfo = calculate_sizes (sig, sig->pinvoke);
3023 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3024 ArgInfo *ainfo = &cinfo->ret;
3025 inst = cfg->vret_addr;
3026 g_assert (ppc_is_imm16 (inst->inst_offset));
3027 ppc_ldptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
3029 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3030 ArgInfo *ainfo = cinfo->args + i;
3031 inst = cfg->args [pos];
3033 g_assert (inst->opcode != OP_REGVAR);
3034 g_assert (ppc_is_imm16 (inst->inst_offset));
3036 switch (ainfo->regtype) {
3037 case RegTypeGeneral:
3038 switch (ainfo->size) {
3040 ppc_lbz (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
3043 ppc_lhz (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
3045 #ifdef __mono_ppc64__
3047 ppc_lwz (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
3051 ppc_ldptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
3057 switch (ainfo->size) {
3059 ppc_lfs (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
3062 ppc_lfd (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
3065 g_assert_not_reached ();
3070 MonoType *type = mini_type_get_underlying_type (cfg->generic_sharing_context,
3071 &inst->klass->byval_arg);
3073 #ifndef __mono_ppc64__
3074 if (type->type == MONO_TYPE_I8)
3078 if (MONO_TYPE_IS_REFERENCE (type) || type->type == MONO_TYPE_I8) {
3079 ppc_ldptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
3080 ppc_stptr (code, ppc_r0, ainfo->offset, ainfo->reg);
3081 } else if (type->type == MONO_TYPE_I4) {
3082 ppc_lwz (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
3083 ppc_stw (code, ppc_r0, ainfo->offset, ainfo->reg);
3091 case RegTypeStructByVal: {
3102 * Darwin pinvokes needs some special handling
3103 * for 1 and 2 byte arguments
3105 if (method->signature->pinvoke)
3106 size = mono_class_native_size (inst->klass, NULL);
3107 if (size == 1 || size == 2) {
3112 for (j = 0; j < ainfo->vtregs; ++j) {
3113 ppc_ldptr (code, ainfo->reg + j,
3114 inst->inst_offset + j * sizeof (gpointer),
3115 inst->inst_basereg);
3116 /* FIXME: shift to the right */
3123 case RegTypeStructByAddr: {
3124 MonoInst *addr = cfg->tailcall_valuetype_addrs [struct_index];
3126 g_assert (ppc_is_imm16 (addr->inst_offset));
3127 g_assert (!ainfo->offset);
3128 ppc_ldptr (code, ainfo->reg, addr->inst_offset, addr->inst_basereg);
3135 g_assert_not_reached ();
3146 /* This must be kept in sync with emit_load_volatile_arguments(). */
3148 ins_native_length (MonoCompile *cfg, MonoInst *ins)
3150 int len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3151 MonoMethodSignature *sig;
3156 if (ins->opcode != OP_JMP)
3159 call = (MonoCallInst*)ins;
3160 sig = mono_method_signature (cfg->method);
3161 cinfo = calculate_sizes (sig, sig->pinvoke);
3163 if (MONO_TYPE_ISSTRUCT (sig->ret))
3165 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3166 ArgInfo *ainfo = cinfo->args + i;
3168 switch (ainfo->regtype) {
3169 case RegTypeGeneral:
3178 case RegTypeStructByVal:
3179 len += 4 * ainfo->size;
3182 case RegTypeStructByAddr:
3187 g_assert_not_reached ();
3197 emit_reserve_param_area (MonoCompile *cfg, guint8 *code)
3199 long size = cfg->param_area;
3201 size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3202 size &= -MONO_ARCH_FRAME_ALIGNMENT;
3207 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
3208 if (ppc_is_imm16 (-size)) {
3209 ppc_stptr_update (code, ppc_r0, -size, ppc_sp);
3211 ppc_load (code, ppc_r11, -size);
3212 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r11);
3219 emit_unreserve_param_area (MonoCompile *cfg, guint8 *code)
3221 long size = cfg->param_area;
3223 size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3224 size &= -MONO_ARCH_FRAME_ALIGNMENT;
3229 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
3230 if (ppc_is_imm16 (size)) {
3231 ppc_stptr_update (code, ppc_r0, size, ppc_sp);
3233 ppc_load (code, ppc_r11, size);
3234 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r11);
3240 #define MASK_SHIFT_IMM(i) ((i) & MONO_PPC_32_64_CASE (0x1f, 0x3f))
3244 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3246 MonoInst *ins, *next;
3249 guint8 *code = cfg->native_code + cfg->code_len;
3250 MonoInst *last_ins = NULL;
3251 guint last_offset = 0;
3255 /* we don't align basic blocks of loops on ppc */
3257 if (cfg->verbose_level > 2)
3258 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3260 cpos = bb->max_offset;
3262 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3263 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3264 //g_assert (!mono_compile_aot);
3267 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3268 /* this is not thread save, but good enough */
3269 /* fixme: howto handle overflows? */
3270 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3273 MONO_BB_FOR_EACH_INS (bb, ins) {
3274 offset = code - cfg->native_code;
3276 max_len = ins_native_length (cfg, ins);
3278 if (offset > (cfg->code_size - max_len - 16)) {
3279 cfg->code_size *= 2;
3280 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3281 code = cfg->native_code + offset;
3283 // if (ins->cil_code)
3284 // g_print ("cil code\n");
3285 mono_debug_record_line_number (cfg, ins, offset);
3287 switch (normalize_opcode (ins->opcode)) {
3288 case OP_RELAXED_NOP:
3291 case OP_DUMMY_STORE:
3292 case OP_NOT_REACHED:
3295 case OP_SEQ_POINT: {
3298 if (cfg->compile_aot)
3302 * Read from the single stepping trigger page. This will cause a
3303 * SIGSEGV when single stepping is enabled.
3304 * We do this _before_ the breakpoint, so single stepping after
3305 * a breakpoint is hit will step to the next IL offset.
3307 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3308 ppc_load (code, ppc_r11, (gsize)ss_trigger_page);
3309 ppc_ldptr (code, ppc_r11, 0, ppc_r11);
3312 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3315 * A placeholder for a possible breakpoint inserted by
3316 * mono_arch_set_breakpoint ().
3318 for (i = 0; i < BREAKPOINT_SIZE / 4; ++i)
3323 emit_tls_access (code, ins->dreg, ins->inst_offset);
3326 ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2);
3327 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3328 ppc_mr (code, ppc_r4, ppc_r0);
3331 ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2);
3332 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3333 ppc_mr (code, ppc_r4, ppc_r0);
3335 case OP_MEMORY_BARRIER:
3338 case OP_STOREI1_MEMBASE_REG:
3339 if (ppc_is_imm16 (ins->inst_offset)) {
3340 ppc_stb (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3342 if (ppc_is_imm32 (ins->inst_offset)) {
3343 ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3344 ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r12);
3346 ppc_load (code, ppc_r0, ins->inst_offset);
3347 ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3351 case OP_STOREI2_MEMBASE_REG:
3352 if (ppc_is_imm16 (ins->inst_offset)) {
3353 ppc_sth (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3355 if (ppc_is_imm32 (ins->inst_offset)) {
3356 ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3357 ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r12);
3359 ppc_load (code, ppc_r0, ins->inst_offset);
3360 ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3364 case OP_STORE_MEMBASE_REG:
3365 if (ppc_is_imm16 (ins->inst_offset)) {
3366 ppc_stptr (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3368 if (ppc_is_imm32 (ins->inst_offset)) {
3369 ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3370 ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r12);
3372 ppc_load (code, ppc_r0, ins->inst_offset);
3373 ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3377 #ifdef __mono_ilp32__
3378 case OP_STOREI8_MEMBASE_REG:
3379 if (ppc_is_imm16 (ins->inst_offset)) {
3380 ppc_str (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3382 ppc_load (code, ppc_r0, ins->inst_offset);
3383 ppc_str_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3387 case OP_STOREI1_MEMINDEX:
3388 ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3390 case OP_STOREI2_MEMINDEX:
3391 ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3393 case OP_STORE_MEMINDEX:
3394 ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3397 g_assert_not_reached ();
3399 case OP_LOAD_MEMBASE:
3400 if (ppc_is_imm16 (ins->inst_offset)) {
3401 ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3403 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3404 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3405 ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->dreg);
3407 ppc_load (code, ppc_r0, ins->inst_offset);
3408 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3412 case OP_LOADI4_MEMBASE:
3413 #ifdef __mono_ppc64__
3414 if (ppc_is_imm16 (ins->inst_offset)) {
3415 ppc_lwa (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3417 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3418 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3419 ppc_lwa (code, ins->dreg, ins->inst_offset, ins->dreg);
3421 ppc_load (code, ppc_r0, ins->inst_offset);
3422 ppc_lwax (code, ins->dreg, ins->inst_basereg, ppc_r0);
3427 case OP_LOADU4_MEMBASE:
3428 if (ppc_is_imm16 (ins->inst_offset)) {
3429 ppc_lwz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3431 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3432 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3433 ppc_lwz (code, ins->dreg, ins->inst_offset, ins->dreg);
3435 ppc_load (code, ppc_r0, ins->inst_offset);
3436 ppc_lwzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3440 case OP_LOADI1_MEMBASE:
3441 case OP_LOADU1_MEMBASE:
3442 if (ppc_is_imm16 (ins->inst_offset)) {
3443 ppc_lbz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3445 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3446 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3447 ppc_lbz (code, ins->dreg, ins->inst_offset, ins->dreg);
3449 ppc_load (code, ppc_r0, ins->inst_offset);
3450 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3453 if (ins->opcode == OP_LOADI1_MEMBASE)
3454 ppc_extsb (code, ins->dreg, ins->dreg);
3456 case OP_LOADU2_MEMBASE:
3457 if (ppc_is_imm16 (ins->inst_offset)) {
3458 ppc_lhz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3460 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3461 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3462 ppc_lhz (code, ins->dreg, ins->inst_offset, ins->dreg);
3464 ppc_load (code, ppc_r0, ins->inst_offset);
3465 ppc_lhzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3469 case OP_LOADI2_MEMBASE:
3470 if (ppc_is_imm16 (ins->inst_offset)) {
3471 ppc_lha (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3473 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3474 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3475 ppc_lha (code, ins->dreg, ins->inst_offset, ins->dreg);
3477 ppc_load (code, ppc_r0, ins->inst_offset);
3478 ppc_lhax (code, ins->dreg, ins->inst_basereg, ppc_r0);
3482 #ifdef __mono_ilp32__
3483 case OP_LOADI8_MEMBASE:
3484 if (ppc_is_imm16 (ins->inst_offset)) {
3485 ppc_ldr (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3487 ppc_load (code, ppc_r0, ins->inst_offset);
3488 ppc_ldr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3492 case OP_LOAD_MEMINDEX:
3493 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3495 case OP_LOADI4_MEMINDEX:
3496 #ifdef __mono_ppc64__
3497 ppc_lwax (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3500 case OP_LOADU4_MEMINDEX:
3501 ppc_lwzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3503 case OP_LOADU2_MEMINDEX:
3504 ppc_lhzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3506 case OP_LOADI2_MEMINDEX:
3507 ppc_lhax (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3509 case OP_LOADU1_MEMINDEX:
3510 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3512 case OP_LOADI1_MEMINDEX:
3513 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3514 ppc_extsb (code, ins->dreg, ins->dreg);
3516 case OP_ICONV_TO_I1:
3517 CASE_PPC64 (OP_LCONV_TO_I1)
3518 ppc_extsb (code, ins->dreg, ins->sreg1);
3520 case OP_ICONV_TO_I2:
3521 CASE_PPC64 (OP_LCONV_TO_I2)
3522 ppc_extsh (code, ins->dreg, ins->sreg1);
3524 case OP_ICONV_TO_U1:
3525 CASE_PPC64 (OP_LCONV_TO_U1)
3526 ppc_clrlwi (code, ins->dreg, ins->sreg1, 24);
3528 case OP_ICONV_TO_U2:
3529 CASE_PPC64 (OP_LCONV_TO_U2)
3530 ppc_clrlwi (code, ins->dreg, ins->sreg1, 16);
3534 CASE_PPC64 (OP_LCOMPARE)
3535 L = (sizeof (mgreg_t) == 4 || ins->opcode == OP_ICOMPARE) ? 0 : 1;
3537 if (next && compare_opcode_is_unsigned (next->opcode))
3538 ppc_cmpl (code, 0, L, ins->sreg1, ins->sreg2);
3540 ppc_cmp (code, 0, L, ins->sreg1, ins->sreg2);
3542 case OP_COMPARE_IMM:
3543 case OP_ICOMPARE_IMM:
3544 CASE_PPC64 (OP_LCOMPARE_IMM)
3545 L = (sizeof (mgreg_t) == 4 || ins->opcode == OP_ICOMPARE_IMM) ? 0 : 1;
3547 if (next && compare_opcode_is_unsigned (next->opcode)) {
3548 if (ppc_is_uimm16 (ins->inst_imm)) {
3549 ppc_cmpli (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff));
3551 g_assert_not_reached ();
3554 if (ppc_is_imm16 (ins->inst_imm)) {
3555 ppc_cmpi (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff));
3557 g_assert_not_reached ();
3566 ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2);
3569 CASE_PPC64 (OP_LADD)
3570 ppc_add (code, ins->dreg, ins->sreg1, ins->sreg2);
3574 ppc_adde (code, ins->dreg, ins->sreg1, ins->sreg2);
3577 if (ppc_is_imm16 (ins->inst_imm)) {
3578 ppc_addic (code, ins->dreg, ins->sreg1, ins->inst_imm);
3580 g_assert_not_reached ();
3585 CASE_PPC64 (OP_LADD_IMM)
3586 if (ppc_is_imm16 (ins->inst_imm)) {
3587 ppc_addi (code, ins->dreg, ins->sreg1, ins->inst_imm);
3589 g_assert_not_reached ();
3593 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3595 ppc_addo (code, ins->dreg, ins->sreg1, ins->sreg2);
3596 ppc_mfspr (code, ppc_r0, ppc_xer);
3597 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3598 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3600 case OP_IADD_OVF_UN:
3601 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3603 ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2);
3604 ppc_mfspr (code, ppc_r0, ppc_xer);
3605 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3606 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3609 CASE_PPC64 (OP_LSUB_OVF)
3610 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3612 ppc_subfo (code, ins->dreg, ins->sreg2, ins->sreg1);
3613 ppc_mfspr (code, ppc_r0, ppc_xer);
3614 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3615 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3617 case OP_ISUB_OVF_UN:
3618 CASE_PPC64 (OP_LSUB_OVF_UN)
3619 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3621 ppc_subfc (code, ins->dreg, ins->sreg2, ins->sreg1);
3622 ppc_mfspr (code, ppc_r0, ppc_xer);
3623 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3624 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3626 case OP_ADD_OVF_CARRY:
3627 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3629 ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2);
3630 ppc_mfspr (code, ppc_r0, ppc_xer);
3631 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3632 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3634 case OP_ADD_OVF_UN_CARRY:
3635 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3637 ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2);
3638 ppc_mfspr (code, ppc_r0, ppc_xer);
3639 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3640 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3642 case OP_SUB_OVF_CARRY:
3643 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3645 ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1);
3646 ppc_mfspr (code, ppc_r0, ppc_xer);
3647 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3648 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3650 case OP_SUB_OVF_UN_CARRY:
3651 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3653 ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1);
3654 ppc_mfspr (code, ppc_r0, ppc_xer);
3655 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3656 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3660 ppc_subfco (code, ins->dreg, ins->sreg2, ins->sreg1);
3663 CASE_PPC64 (OP_LSUB)
3664 ppc_subf (code, ins->dreg, ins->sreg2, ins->sreg1);
3668 ppc_subfe (code, ins->dreg, ins->sreg2, ins->sreg1);
3672 CASE_PPC64 (OP_LSUB_IMM)
3673 // we add the negated value
3674 if (ppc_is_imm16 (-ins->inst_imm))
3675 ppc_addi (code, ins->dreg, ins->sreg1, -ins->inst_imm);
3677 g_assert_not_reached ();
3681 g_assert (ppc_is_imm16 (ins->inst_imm));
3682 ppc_subfic (code, ins->dreg, ins->sreg1, ins->inst_imm);
3685 ppc_subfze (code, ins->dreg, ins->sreg1);
3688 CASE_PPC64 (OP_LAND)
3689 /* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */
3690 ppc_and (code, ins->sreg1, ins->dreg, ins->sreg2);
3694 CASE_PPC64 (OP_LAND_IMM)
3695 if (!(ins->inst_imm & 0xffff0000)) {
3696 ppc_andid (code, ins->sreg1, ins->dreg, ins->inst_imm);
3697 } else if (!(ins->inst_imm & 0xffff)) {
3698 ppc_andisd (code, ins->sreg1, ins->dreg, ((guint32)ins->inst_imm >> 16));
3700 g_assert_not_reached ();
3704 CASE_PPC64 (OP_LDIV) {
3705 guint8 *divisor_is_m1;
3706 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3708 ppc_compare_reg_imm (code, 0, ins->sreg2, -1);
3709 divisor_is_m1 = code;
3710 ppc_bc (code, PPC_BR_FALSE | PPC_BR_LIKELY, PPC_BR_EQ, 0);
3711 ppc_lis (code, ppc_r0, 0x8000);
3712 #ifdef __mono_ppc64__
3713 if (ins->opcode == OP_LDIV)
3714 ppc_sldi (code, ppc_r0, ppc_r0, 32);
3716 ppc_compare (code, 0, ins->sreg1, ppc_r0);
3717 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "ArithmeticException");
3718 ppc_patch (divisor_is_m1, code);
3719 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3721 if (ins->opcode == OP_IDIV)
3722 ppc_divwod (code, ins->dreg, ins->sreg1, ins->sreg2);
3723 #ifdef __mono_ppc64__
3725 ppc_divdod (code, ins->dreg, ins->sreg1, ins->sreg2);
3727 ppc_mfspr (code, ppc_r0, ppc_xer);
3728 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3729 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
3733 CASE_PPC64 (OP_LDIV_UN)
3734 if (ins->opcode == OP_IDIV_UN)
3735 ppc_divwuod (code, ins->dreg, ins->sreg1, ins->sreg2);
3736 #ifdef __mono_ppc64__
3738 ppc_divduod (code, ins->dreg, ins->sreg1, ins->sreg2);
3740 ppc_mfspr (code, ppc_r0, ppc_xer);
3741 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3742 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
3748 g_assert_not_reached ();
3751 ppc_or (code, ins->dreg, ins->sreg1, ins->sreg2);
3755 CASE_PPC64 (OP_LOR_IMM)
3756 if (!(ins->inst_imm & 0xffff0000)) {
3757 ppc_ori (code, ins->sreg1, ins->dreg, ins->inst_imm);
3758 } else if (!(ins->inst_imm & 0xffff)) {
3759 ppc_oris (code, ins->dreg, ins->sreg1, ((guint32)(ins->inst_imm) >> 16));
3761 g_assert_not_reached ();
3765 CASE_PPC64 (OP_LXOR)
3766 ppc_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
3770 CASE_PPC64 (OP_LXOR_IMM)
3771 if (!(ins->inst_imm & 0xffff0000)) {
3772 ppc_xori (code, ins->sreg1, ins->dreg, ins->inst_imm);
3773 } else if (!(ins->inst_imm & 0xffff)) {
3774 ppc_xoris (code, ins->sreg1, ins->dreg, ((guint32)(ins->inst_imm) >> 16));
3776 g_assert_not_reached ();
3780 CASE_PPC64 (OP_LSHL)
3781 ppc_shift_left (code, ins->dreg, ins->sreg1, ins->sreg2);
3785 CASE_PPC64 (OP_LSHL_IMM)
3786 ppc_shift_left_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3789 ppc_sraw (code, ins->dreg, ins->sreg1, ins->sreg2);
3792 ppc_shift_right_arith_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3795 if (MASK_SHIFT_IMM (ins->inst_imm))
3796 ppc_shift_right_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3798 ppc_mr (code, ins->dreg, ins->sreg1);
3801 ppc_srw (code, ins->dreg, ins->sreg1, ins->sreg2);
3804 CASE_PPC64 (OP_LNOT)
3805 ppc_not (code, ins->dreg, ins->sreg1);
3808 CASE_PPC64 (OP_LNEG)
3809 ppc_neg (code, ins->dreg, ins->sreg1);
3812 CASE_PPC64 (OP_LMUL)
3813 ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2);
3817 CASE_PPC64 (OP_LMUL_IMM)
3818 if (ppc_is_imm16 (ins->inst_imm)) {
3819 ppc_mulli (code, ins->dreg, ins->sreg1, ins->inst_imm);
3821 g_assert_not_reached ();
3825 CASE_PPC64 (OP_LMUL_OVF)
3826 /* we annot use mcrxr, since it's not implemented on some processors
3827 * XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3829 if (ins->opcode == OP_IMUL_OVF)
3830 ppc_mullwo (code, ins->dreg, ins->sreg1, ins->sreg2);
3831 #ifdef __mono_ppc64__
3833 ppc_mulldo (code, ins->dreg, ins->sreg1, ins->sreg2);
3835 ppc_mfspr (code, ppc_r0, ppc_xer);
3836 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3837 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3839 case OP_IMUL_OVF_UN:
3840 CASE_PPC64 (OP_LMUL_OVF_UN)
3841 /* we first multiply to get the high word and compare to 0
3842 * to set the flags, then the result is discarded and then
3843 * we multiply to get the lower * bits result
3845 if (ins->opcode == OP_IMUL_OVF_UN)
3846 ppc_mulhwu (code, ppc_r0, ins->sreg1, ins->sreg2);
3847 #ifdef __mono_ppc64__
3849 ppc_mulhdu (code, ppc_r0, ins->sreg1, ins->sreg2);
3851 ppc_cmpi (code, 0, 0, ppc_r0, 0);
3852 EMIT_COND_SYSTEM_EXCEPTION (CEE_BNE_UN - CEE_BEQ, "OverflowException");
3853 ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2);
3856 ppc_load (code, ins->dreg, ins->inst_c0);
3859 ppc_load (code, ins->dreg, ins->inst_l);
3862 case OP_LOAD_GOTADDR:
3863 /* The PLT implementation depends on this */
3864 g_assert (ins->dreg == ppc_r30);
3866 code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
3869 // FIXME: Fix max instruction length
3870 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
3871 /* arch_emit_got_access () patches this */
3872 ppc_load32 (code, ppc_r0, 0);
3873 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3876 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3877 ppc_load_sequence (code, ins->dreg, 0);
3879 CASE_PPC32 (OP_ICONV_TO_I4)
3880 CASE_PPC32 (OP_ICONV_TO_U4)
3882 ppc_mr (code, ins->dreg, ins->sreg1);
3885 int saved = ins->sreg1;
3886 if (ins->sreg1 == ppc_r3) {
3887 ppc_mr (code, ppc_r0, ins->sreg1);
3890 if (ins->sreg2 != ppc_r3)
3891 ppc_mr (code, ppc_r3, ins->sreg2);
3892 if (saved != ppc_r4)
3893 ppc_mr (code, ppc_r4, saved);
3897 ppc_fmr (code, ins->dreg, ins->sreg1);
3899 case OP_FCONV_TO_R4:
3900 ppc_frsp (code, ins->dreg, ins->sreg1);
3906 * Keep in sync with mono_arch_emit_epilog
3908 g_assert (!cfg->method->save_lmf);
3910 * Note: we can use ppc_r11 here because it is dead anyway:
3911 * we're leaving the method.
3913 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
3914 long ret_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
3915 if (ppc_is_imm16 (ret_offset)) {
3916 ppc_ldptr (code, ppc_r0, ret_offset, cfg->frame_reg);
3918 ppc_load (code, ppc_r11, ret_offset);
3919 ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r11);
3921 ppc_mtlr (code, ppc_r0);
3924 code = emit_load_volatile_arguments (cfg, code);
3926 if (ppc_is_imm16 (cfg->stack_usage)) {
3927 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage);
3929 /* cfg->stack_usage is an int, so we can use
3930 * an addis/addi sequence here even in 64-bit. */
3931 ppc_addis (code, ppc_r11, cfg->frame_reg, ppc_ha(cfg->stack_usage));
3932 ppc_addi (code, ppc_r11, ppc_r11, cfg->stack_usage);
3934 if (!cfg->method->save_lmf) {
3935 /*for (i = 31; i >= 14; --i) {
3936 if (cfg->used_float_regs & (1 << i)) {
3937 pos += sizeof (double);
3938 ppc_lfd (code, i, -pos, cfg->frame_reg);
3942 for (i = 31; i >= 13; --i) {
3943 if (cfg->used_int_regs & (1 << i)) {
3944 pos += sizeof (gpointer);
3945 ppc_ldptr (code, i, -pos, ppc_r11);
3949 /* FIXME restore from MonoLMF: though this can't happen yet */
3951 ppc_mr (code, ppc_sp, ppc_r11);
3952 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3953 if (cfg->compile_aot) {
3954 /* arch_emit_got_access () patches this */
3955 ppc_load32 (code, ppc_r0, 0);
3956 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3957 ppc_ldptr_indexed (code, ppc_r11, ppc_r30, ppc_r0);
3958 ppc_ldptr (code, ppc_r0, 0, ppc_r11);
3960 ppc_ldptr_indexed (code, ppc_r0, ppc_r30, ppc_r0);
3962 ppc_mtctr (code, ppc_r0);
3963 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
3970 /* ensure ins->sreg1 is not NULL */
3971 ppc_ldptr (code, ppc_r0, 0, ins->sreg1);
3974 long cookie_offset = cfg->sig_cookie + cfg->stack_usage;
3975 if (ppc_is_imm16 (cookie_offset)) {
3976 ppc_addi (code, ppc_r0, cfg->frame_reg, cookie_offset);
3978 ppc_load (code, ppc_r0, cookie_offset);
3979 ppc_add (code, ppc_r0, cfg->frame_reg, ppc_r0);
3981 ppc_stptr (code, ppc_r0, 0, ins->sreg1);
3990 call = (MonoCallInst*)ins;
3991 if (ins->flags & MONO_INST_HAS_METHOD)
3992 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3994 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3995 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
3996 ppc_load_func (code, ppc_r0, 0);
3997 ppc_mtlr (code, ppc_r0);
4002 /* FIXME: this should be handled somewhere else in the new jit */
4003 code = emit_move_return_value (cfg, ins, code);
4009 case OP_VOIDCALL_REG:
4011 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
4012 ppc_ldptr (code, ppc_r0, 0, ins->sreg1);
4013 /* FIXME: if we know that this is a method, we
4014 can omit this load */
4015 ppc_ldptr (code, ppc_r2, 8, ins->sreg1);
4016 ppc_mtlr (code, ppc_r0);
4018 ppc_mtlr (code, ins->sreg1);
4021 /* FIXME: this should be handled somewhere else in the new jit */
4022 code = emit_move_return_value (cfg, ins, code);
4024 case OP_FCALL_MEMBASE:
4025 case OP_LCALL_MEMBASE:
4026 case OP_VCALL_MEMBASE:
4027 case OP_VCALL2_MEMBASE:
4028 case OP_VOIDCALL_MEMBASE:
4029 case OP_CALL_MEMBASE:
4030 if (cfg->compile_aot && ins->sreg1 == ppc_r11) {
4031 /* The trampolines clobber this */
4032 ppc_mr (code, ppc_r29, ins->sreg1);
4033 ppc_ldptr (code, ppc_r0, ins->inst_offset, ppc_r29);
4035 ppc_ldptr (code, ppc_r0, ins->inst_offset, ins->sreg1);
4037 ppc_mtlr (code, ppc_r0);
4039 /* FIXME: this should be handled somewhere else in the new jit */
4040 code = emit_move_return_value (cfg, ins, code);
4043 guint8 * zero_loop_jump, * zero_loop_start;
4044 /* keep alignment */
4045 int alloca_waste = PPC_STACK_PARAM_OFFSET + cfg->param_area + 31;
4046 int area_offset = alloca_waste;
4048 ppc_addi (code, ppc_r11, ins->sreg1, alloca_waste + 31);
4049 /* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */
4050 ppc_clear_right_imm (code, ppc_r11, ppc_r11, 4);
4051 /* use ctr to store the number of words to 0 if needed */
4052 if (ins->flags & MONO_INST_INIT) {
4053 /* we zero 4 bytes at a time:
4054 * we add 7 instead of 3 so that we set the counter to
4055 * at least 1, otherwise the bdnz instruction will make
4056 * it negative and iterate billions of times.
4058 ppc_addi (code, ppc_r0, ins->sreg1, 7);
4059 ppc_shift_right_arith_imm (code, ppc_r0, ppc_r0, 2);
4060 ppc_mtctr (code, ppc_r0);
4062 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
4063 ppc_neg (code, ppc_r11, ppc_r11);
4064 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r11);
4066 /* FIXME: make this loop work in 8 byte
4067 increments on PPC64 */
4068 if (ins->flags & MONO_INST_INIT) {
4069 /* adjust the dest reg by -4 so we can use stwu */
4070 /* we actually adjust -8 because we let the loop
4073 ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 8));
4074 ppc_li (code, ppc_r11, 0);
4075 zero_loop_start = code;
4076 ppc_stwu (code, ppc_r11, 4, ins->dreg);
4077 zero_loop_jump = code;
4078 ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
4079 ppc_patch (zero_loop_jump, zero_loop_start);
4081 ppc_addi (code, ins->dreg, ppc_sp, area_offset);
4086 ppc_mr (code, ppc_r3, ins->sreg1);
4087 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4088 (gpointer)"mono_arch_throw_exception");
4089 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
4090 ppc_load_func (code, ppc_r0, 0);
4091 ppc_mtlr (code, ppc_r0);
4100 ppc_mr (code, ppc_r3, ins->sreg1);
4101 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4102 (gpointer)"mono_arch_rethrow_exception");
4103 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
4104 ppc_load_func (code, ppc_r0, 0);
4105 ppc_mtlr (code, ppc_r0);
4112 case OP_START_HANDLER: {
4113 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4114 g_assert (spvar->inst_basereg != ppc_sp);
4115 code = emit_reserve_param_area (cfg, code);
4116 ppc_mflr (code, ppc_r0);
4117 if (ppc_is_imm16 (spvar->inst_offset)) {
4118 ppc_stptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4120 ppc_load (code, ppc_r11, spvar->inst_offset);
4121 ppc_stptr_indexed (code, ppc_r0, ppc_r11, spvar->inst_basereg);
4125 case OP_ENDFILTER: {
4126 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4127 g_assert (spvar->inst_basereg != ppc_sp);
4128 code = emit_unreserve_param_area (cfg, code);
4129 if (ins->sreg1 != ppc_r3)
4130 ppc_mr (code, ppc_r3, ins->sreg1);
4131 if (ppc_is_imm16 (spvar->inst_offset)) {
4132 ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4134 ppc_load (code, ppc_r11, spvar->inst_offset);
4135 ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r11);
4137 ppc_mtlr (code, ppc_r0);
4141 case OP_ENDFINALLY: {
4142 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4143 g_assert (spvar->inst_basereg != ppc_sp);
4144 code = emit_unreserve_param_area (cfg, code);
4145 ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4146 ppc_mtlr (code, ppc_r0);
4150 case OP_CALL_HANDLER:
4151 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4153 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
4156 ins->inst_c0 = code - cfg->native_code;
4159 /*if (ins->inst_target_bb->native_offset) {
4161 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
4163 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4168 ppc_mtctr (code, ins->sreg1);
4169 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
4173 CASE_PPC64 (OP_LCEQ)
4174 ppc_li (code, ins->dreg, 0);
4175 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2);
4176 ppc_li (code, ins->dreg, 1);
4182 CASE_PPC64 (OP_LCLT)
4183 CASE_PPC64 (OP_LCLT_UN)
4184 ppc_li (code, ins->dreg, 1);
4185 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4186 ppc_li (code, ins->dreg, 0);
4192 CASE_PPC64 (OP_LCGT)
4193 CASE_PPC64 (OP_LCGT_UN)
4194 ppc_li (code, ins->dreg, 1);
4195 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4196 ppc_li (code, ins->dreg, 0);
4198 case OP_COND_EXC_EQ:
4199 case OP_COND_EXC_NE_UN:
4200 case OP_COND_EXC_LT:
4201 case OP_COND_EXC_LT_UN:
4202 case OP_COND_EXC_GT:
4203 case OP_COND_EXC_GT_UN:
4204 case OP_COND_EXC_GE:
4205 case OP_COND_EXC_GE_UN:
4206 case OP_COND_EXC_LE:
4207 case OP_COND_EXC_LE_UN:
4208 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
4210 case OP_COND_EXC_IEQ:
4211 case OP_COND_EXC_INE_UN:
4212 case OP_COND_EXC_ILT:
4213 case OP_COND_EXC_ILT_UN:
4214 case OP_COND_EXC_IGT:
4215 case OP_COND_EXC_IGT_UN:
4216 case OP_COND_EXC_IGE:
4217 case OP_COND_EXC_IGE_UN:
4218 case OP_COND_EXC_ILE:
4219 case OP_COND_EXC_ILE_UN:
4220 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
4232 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
4235 /* floating point opcodes */
4237 g_assert (cfg->compile_aot);
4239 /* FIXME: Optimize this */
4241 ppc_mflr (code, ppc_r11);
4243 *(double*)code = *(double*)ins->inst_p0;
4245 ppc_lfd (code, ins->dreg, 8, ppc_r11);
4248 g_assert_not_reached ();
4250 case OP_STORER8_MEMBASE_REG:
4251 if (ppc_is_imm16 (ins->inst_offset)) {
4252 ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4254 if (ppc_is_imm32 (ins->inst_offset)) {
4255 ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4256 ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r12);
4258 ppc_load (code, ppc_r0, ins->inst_offset);
4259 ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4263 case OP_LOADR8_MEMBASE:
4264 if (ppc_is_imm16 (ins->inst_offset)) {
4265 ppc_lfd (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
4267 if (ppc_is_imm32 (ins->inst_offset)) {
4268 ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4269 ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r12);
4271 ppc_load (code, ppc_r0, ins->inst_offset);
4272 ppc_lfdx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
4276 case OP_STORER4_MEMBASE_REG:
4277 ppc_frsp (code, ins->sreg1, ins->sreg1);
4278 if (ppc_is_imm16 (ins->inst_offset)) {
4279 ppc_stfs (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4281 if (ppc_is_imm32 (ins->inst_offset)) {
4282 ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4283 ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r12);
4285 ppc_load (code, ppc_r0, ins->inst_offset);
4286 ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4290 case OP_LOADR4_MEMBASE:
4291 if (ppc_is_imm16 (ins->inst_offset)) {
4292 ppc_lfs (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
4294 if (ppc_is_imm32 (ins->inst_offset)) {
4295 ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4296 ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r12);
4298 ppc_load (code, ppc_r0, ins->inst_offset);
4299 ppc_lfsx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
4303 case OP_LOADR4_MEMINDEX:
4304 ppc_lfsx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4306 case OP_LOADR8_MEMINDEX:
4307 ppc_lfdx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4309 case OP_STORER4_MEMINDEX:
4310 ppc_frsp (code, ins->sreg1, ins->sreg1);
4311 ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4313 case OP_STORER8_MEMINDEX:
4314 ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4317 case CEE_CONV_R4: /* FIXME: change precision */
4319 g_assert_not_reached ();
4320 case OP_FCONV_TO_I1:
4321 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4323 case OP_FCONV_TO_U1:
4324 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4326 case OP_FCONV_TO_I2:
4327 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4329 case OP_FCONV_TO_U2:
4330 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4332 case OP_FCONV_TO_I4:
4334 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4336 case OP_FCONV_TO_U4:
4338 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4340 case OP_LCONV_TO_R_UN:
4341 g_assert_not_reached ();
4342 /* Implemented as helper calls */
4344 case OP_LCONV_TO_OVF_I4_2:
4345 case OP_LCONV_TO_OVF_I: {
4346 #ifdef __mono_ppc64__
4349 guint8 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
4350 // Check if its negative
4351 ppc_cmpi (code, 0, 0, ins->sreg1, 0);
4352 negative_branch = code;
4353 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0);
4354 // Its positive msword == 0
4355 ppc_cmpi (code, 0, 0, ins->sreg2, 0);
4356 msword_positive_branch = code;
4357 ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
4359 ovf_ex_target = code;
4360 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException");
4362 ppc_patch (negative_branch, code);
4363 ppc_cmpi (code, 0, 0, ins->sreg2, -1);
4364 msword_negative_branch = code;
4365 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4366 ppc_patch (msword_negative_branch, ovf_ex_target);
4368 ppc_patch (msword_positive_branch, code);
4369 if (ins->dreg != ins->sreg1)
4370 ppc_mr (code, ins->dreg, ins->sreg1);
4375 ppc_fsqrtd (code, ins->dreg, ins->sreg1);
4378 ppc_fadd (code, ins->dreg, ins->sreg1, ins->sreg2);
4381 ppc_fsub (code, ins->dreg, ins->sreg1, ins->sreg2);
4384 ppc_fmul (code, ins->dreg, ins->sreg1, ins->sreg2);
4387 ppc_fdiv (code, ins->dreg, ins->sreg1, ins->sreg2);
4390 ppc_fneg (code, ins->dreg, ins->sreg1);
4394 g_assert_not_reached ();
4397 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4400 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4401 ppc_li (code, ins->dreg, 0);
4402 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2);
4403 ppc_li (code, ins->dreg, 1);
4406 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4407 ppc_li (code, ins->dreg, 1);
4408 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4409 ppc_li (code, ins->dreg, 0);
4412 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4413 ppc_li (code, ins->dreg, 1);
4414 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3);
4415 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4416 ppc_li (code, ins->dreg, 0);
4419 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4420 ppc_li (code, ins->dreg, 1);
4421 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4422 ppc_li (code, ins->dreg, 0);
4425 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4426 ppc_li (code, ins->dreg, 1);
4427 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3);
4428 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4429 ppc_li (code, ins->dreg, 0);
4432 EMIT_COND_BRANCH (ins, CEE_BEQ - CEE_BEQ);
4435 EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ);
4438 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4439 EMIT_COND_BRANCH (ins, CEE_BLT - CEE_BEQ);
4442 EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO);
4443 EMIT_COND_BRANCH (ins, CEE_BLT_UN - CEE_BEQ);
4446 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4447 EMIT_COND_BRANCH (ins, CEE_BGT - CEE_BEQ);
4450 EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO);
4451 EMIT_COND_BRANCH (ins, CEE_BGT_UN - CEE_BEQ);
4454 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4455 EMIT_COND_BRANCH (ins, CEE_BGE - CEE_BEQ);
4458 EMIT_COND_BRANCH (ins, CEE_BGE_UN - CEE_BEQ);
4461 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4462 EMIT_COND_BRANCH (ins, CEE_BLE - CEE_BEQ);
4465 EMIT_COND_BRANCH (ins, CEE_BLE_UN - CEE_BEQ);
4468 g_assert_not_reached ();
4469 case OP_CHECK_FINITE: {
4470 ppc_rlwinm (code, ins->sreg1, ins->sreg1, 0, 1, 31);
4471 ppc_addis (code, ins->sreg1, ins->sreg1, -32752);
4472 ppc_rlwinmd (code, ins->sreg1, ins->sreg1, 1, 31, 31);
4473 EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException");
4476 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4477 #ifdef __mono_ppc64__
4478 ppc_load_sequence (code, ins->dreg, (guint64)0x0f0f0f0f0f0f0f0fLL);
4480 ppc_load_sequence (code, ins->dreg, (gulong)0x0f0f0f0fL);
4485 #ifdef __mono_ppc64__
4486 case OP_ICONV_TO_I4:
4488 ppc_extsw (code, ins->dreg, ins->sreg1);
4490 case OP_ICONV_TO_U4:
4492 ppc_clrldi (code, ins->dreg, ins->sreg1, 32);
4494 case OP_ICONV_TO_R4:
4495 case OP_ICONV_TO_R8:
4496 case OP_LCONV_TO_R4:
4497 case OP_LCONV_TO_R8: {
4499 if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_ICONV_TO_R8) {
4500 ppc_extsw (code, ppc_r0, ins->sreg1);
4505 if (cpu_hw_caps & PPC_MOVE_FPR_GPR) {
4506 ppc_mffgpr (code, ins->dreg, tmp);
4508 ppc_str (code, tmp, -8, ppc_r1);
4509 ppc_lfd (code, ins->dreg, -8, ppc_r1);
4511 ppc_fcfid (code, ins->dreg, ins->dreg);
4512 if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_LCONV_TO_R4)
4513 ppc_frsp (code, ins->dreg, ins->dreg);
4517 ppc_srad (code, ins->dreg, ins->sreg1, ins->sreg2);
4520 ppc_srd (code, ins->dreg, ins->sreg1, ins->sreg2);
4523 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
4525 ppc_mfspr (code, ppc_r0, ppc_xer);
4526 ppc_andisd (code, ppc_r0, ppc_r0, (1 << 13)); /* CA */
4527 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, ins->inst_p1);
4529 case OP_COND_EXC_OV:
4530 ppc_mfspr (code, ppc_r0, ppc_xer);
4531 ppc_andisd (code, ppc_r0, ppc_r0, (1 << 14)); /* OV */
4532 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, ins->inst_p1);
4544 EMIT_COND_BRANCH (ins, ins->opcode - OP_LBEQ);
4546 case OP_FCONV_TO_I8:
4547 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
4549 case OP_FCONV_TO_U8:
4550 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE);
4552 case OP_STOREI4_MEMBASE_REG:
4553 if (ppc_is_imm16 (ins->inst_offset)) {
4554 ppc_stw (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4556 ppc_load (code, ppc_r0, ins->inst_offset);
4557 ppc_stwx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4560 case OP_STOREI4_MEMINDEX:
4561 ppc_stwx (code, ins->sreg1, ins->sreg2, ins->inst_destbasereg);
4564 ppc_srawi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4566 case OP_ISHR_UN_IMM:
4567 if (ins->inst_imm & 0x1f)
4568 ppc_srwi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4570 ppc_mr (code, ins->dreg, ins->sreg1);
4572 case OP_ATOMIC_ADD_NEW_I4:
4573 case OP_ATOMIC_ADD_NEW_I8: {
4574 guint8 *loop = code, *branch;
4575 g_assert (ins->inst_offset == 0);
4576 if (ins->opcode == OP_ATOMIC_ADD_NEW_I4)
4577 ppc_lwarx (code, ppc_r0, 0, ins->inst_basereg);
4579 ppc_ldarx (code, ppc_r0, 0, ins->inst_basereg);
4580 ppc_add (code, ppc_r0, ppc_r0, ins->sreg2);
4581 if (ins->opcode == OP_ATOMIC_ADD_NEW_I4)
4582 ppc_stwcxd (code, ppc_r0, 0, ins->inst_basereg);
4584 ppc_stdcxd (code, ppc_r0, 0, ins->inst_basereg);
4586 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4587 ppc_patch (branch, loop);
4588 ppc_mr (code, ins->dreg, ppc_r0);
4592 case OP_ICONV_TO_R4:
4593 case OP_ICONV_TO_R8: {
4594 if (cpu_hw_caps & PPC_ISA_64) {
4595 ppc_srawi(code, ppc_r0, ins->sreg1, 31);
4596 ppc_stw (code, ppc_r0, -8, ppc_r1);
4597 ppc_stw (code, ins->sreg1, -4, ppc_r1);
4598 ppc_lfd (code, ins->dreg, -8, ppc_r1);
4599 ppc_fcfid (code, ins->dreg, ins->dreg);
4600 if (ins->opcode == OP_ICONV_TO_R4)
4601 ppc_frsp (code, ins->dreg, ins->dreg);
4606 case OP_ATOMIC_CAS_I4:
4607 CASE_PPC64 (OP_ATOMIC_CAS_I8) {
4608 int location = ins->sreg1;
4609 int value = ins->sreg2;
4610 int comparand = ins->sreg3;
4611 guint8 *start, *not_equal, *lost_reservation;
4614 if (ins->opcode == OP_ATOMIC_CAS_I4)
4615 ppc_lwarx (code, ppc_r0, 0, location);
4616 #ifdef __mono_ppc64__
4618 ppc_ldarx (code, ppc_r0, 0, location);
4620 ppc_cmp (code, 0, ins->opcode == OP_ATOMIC_CAS_I4 ? 0 : 1, ppc_r0, comparand);
4623 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4624 if (ins->opcode == OP_ATOMIC_CAS_I4)
4625 ppc_stwcxd (code, value, 0, location);
4626 #ifdef __mono_ppc64__
4628 ppc_stdcxd (code, value, 0, location);
4631 lost_reservation = code;
4632 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4633 ppc_patch (lost_reservation, start);
4635 ppc_patch (not_equal, code);
4636 ppc_mr (code, ins->dreg, ppc_r0);
4641 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4642 g_assert_not_reached ();
4645 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4646 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4647 mono_inst_name (ins->opcode), max_len, (glong)(code - cfg->native_code - offset));
4648 g_assert_not_reached ();
4654 last_offset = offset;
4657 cfg->code_len = code - cfg->native_code;
4659 #endif /* !DISABLE_JIT */
4662 mono_arch_register_lowlevel_calls (void)
4664 /* The signature doesn't matter */
4665 mono_register_jit_icall (mono_ppc_throw_exception, "mono_ppc_throw_exception", mono_create_icall_signature ("void"), TRUE);
4668 #ifdef __mono_ppc64__
4669 #define patch_load_sequence(ip,val) do {\
4670 guint16 *__load = (guint16*)(ip); \
4671 g_assert (sizeof (val) == sizeof (gsize)); \
4672 __load [1] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4673 __load [3] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4674 __load [7] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4675 __load [9] = ((guint64)(gsize)(val)) & 0xffff; \
4678 #define patch_load_sequence(ip,val) do {\
4679 guint16 *__lis_ori = (guint16*)(ip); \
4680 __lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \
4681 __lis_ori [3] = ((gulong)(val)) & 0xffff; \
4687 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4689 MonoJumpInfo *patch_info;
4690 gboolean compile_aot = !run_cctors;
4692 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4693 unsigned char *ip = patch_info->ip.i + code;
4694 unsigned char *target;
4695 gboolean is_fd = FALSE;
4697 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4700 switch (patch_info->type) {
4701 case MONO_PATCH_INFO_BB:
4702 case MONO_PATCH_INFO_LABEL:
4705 /* No need to patch these */
4710 switch (patch_info->type) {
4711 case MONO_PATCH_INFO_IP:
4712 patch_load_sequence (ip, ip);
4714 case MONO_PATCH_INFO_METHOD_REL:
4715 g_assert_not_reached ();
4716 *((gpointer *)(ip)) = code + patch_info->data.offset;
4718 case MONO_PATCH_INFO_SWITCH: {
4719 gpointer *table = (gpointer *)patch_info->data.table->table;
4722 patch_load_sequence (ip, table);
4724 for (i = 0; i < patch_info->data.table->table_size; i++) {
4725 table [i] = (glong)patch_info->data.table->table [i] + code;
4727 /* we put into the table the absolute address, no need for ppc_patch in this case */
4730 case MONO_PATCH_INFO_METHODCONST:
4731 case MONO_PATCH_INFO_CLASS:
4732 case MONO_PATCH_INFO_IMAGE:
4733 case MONO_PATCH_INFO_FIELD:
4734 case MONO_PATCH_INFO_VTABLE:
4735 case MONO_PATCH_INFO_IID:
4736 case MONO_PATCH_INFO_SFLDA:
4737 case MONO_PATCH_INFO_LDSTR:
4738 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4739 case MONO_PATCH_INFO_LDTOKEN:
4740 /* from OP_AOTCONST : lis + ori */
4741 patch_load_sequence (ip, target);
4743 case MONO_PATCH_INFO_R4:
4744 case MONO_PATCH_INFO_R8:
4745 g_assert_not_reached ();
4746 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4748 case MONO_PATCH_INFO_EXC_NAME:
4749 g_assert_not_reached ();
4750 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4752 case MONO_PATCH_INFO_NONE:
4753 case MONO_PATCH_INFO_BB_OVF:
4754 case MONO_PATCH_INFO_EXC_OVF:
4755 /* everything is dealt with at epilog output time */
4757 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
4758 case MONO_PATCH_INFO_INTERNAL_METHOD:
4759 case MONO_PATCH_INFO_ABS:
4760 case MONO_PATCH_INFO_CLASS_INIT:
4761 case MONO_PATCH_INFO_RGCTX_FETCH:
4768 ppc_patch_full (ip, target, is_fd);
4773 * Emit code to save the registers in used_int_regs or the registers in the MonoLMF
4774 * structure at positive offset pos from register base_reg. pos is guaranteed to fit into
4775 * the instruction offset immediate for all the registers.
4778 save_registers (MonoCompile *cfg, guint8* code, int pos, int base_reg, gboolean save_lmf, guint32 used_int_regs, int cfa_offset)
4782 for (i = 13; i <= 31; i++) {
4783 if (used_int_regs & (1 << i)) {
4784 ppc_str (code, i, pos, base_reg);
4785 mono_emit_unwind_op_offset (cfg, code, i, pos - cfa_offset);
4786 pos += sizeof (mgreg_t);
4790 /* pos is the start of the MonoLMF structure */
4791 int offset = pos + G_STRUCT_OFFSET (MonoLMF, iregs);
4792 for (i = 13; i <= 31; i++) {
4793 ppc_str (code, i, offset, base_reg);
4794 mono_emit_unwind_op_offset (cfg, code, i, offset - cfa_offset);
4795 offset += sizeof (mgreg_t);
4797 offset = pos + G_STRUCT_OFFSET (MonoLMF, fregs);
4798 for (i = 14; i < 32; i++) {
4799 ppc_stfd (code, i, offset, base_reg);
4800 offset += sizeof (gdouble);
4807 * Stack frame layout:
4809 * ------------------- sp
4810 * MonoLMF structure or saved registers
4811 * -------------------
4813 * -------------------
4815 * -------------------
4816 * optional 8 bytes for tracing
4817 * -------------------
4818 * param area size is cfg->param_area
4819 * -------------------
4820 * linkage area size is PPC_STACK_PARAM_OFFSET
4821 * ------------------- sp
4825 mono_arch_emit_prolog (MonoCompile *cfg)
4827 MonoMethod *method = cfg->method;
4829 MonoMethodSignature *sig;
4831 long alloc_size, pos, max_offset, cfa_offset;
4837 int tailcall_struct_index;
4839 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4842 sig = mono_method_signature (method);
4843 cfg->code_size = MONO_PPC_32_64_CASE (260, 384) + sig->param_count * 20;
4844 code = cfg->native_code = g_malloc (cfg->code_size);
4848 /* We currently emit unwind info for aot, but don't use it */
4849 mono_emit_unwind_op_def_cfa (cfg, code, ppc_r1, 0);
4851 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
4852 ppc_mflr (code, ppc_r0);
4853 ppc_str (code, ppc_r0, PPC_RET_ADDR_OFFSET, ppc_sp);
4854 mono_emit_unwind_op_offset (cfg, code, ppc_lr, PPC_RET_ADDR_OFFSET);
4857 alloc_size = cfg->stack_offset;
4860 if (!method->save_lmf) {
4861 for (i = 31; i >= 13; --i) {
4862 if (cfg->used_int_regs & (1 << i)) {
4863 pos += sizeof (mgreg_t);
4867 pos += sizeof (MonoLMF);
4871 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4872 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4873 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4874 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4877 cfg->stack_usage = alloc_size;
4878 g_assert ((alloc_size & (MONO_ARCH_FRAME_ALIGNMENT-1)) == 0);
4880 if (ppc_is_imm16 (-alloc_size)) {
4881 ppc_str_update (code, ppc_sp, -alloc_size, ppc_sp);
4882 cfa_offset = alloc_size;
4883 mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
4884 code = save_registers (cfg, code, alloc_size - pos, ppc_sp, method->save_lmf, cfg->used_int_regs, cfa_offset);
4887 ppc_addi (code, ppc_r11, ppc_sp, -pos);
4888 ppc_load (code, ppc_r0, -alloc_size);
4889 ppc_str_update_indexed (code, ppc_sp, ppc_sp, ppc_r0);
4890 cfa_offset = alloc_size;
4891 mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
4892 code = save_registers (cfg, code, 0, ppc_r11, method->save_lmf, cfg->used_int_regs, cfa_offset);
4895 if (cfg->frame_reg != ppc_sp) {
4896 ppc_mr (code, cfg->frame_reg, ppc_sp);
4897 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4900 /* store runtime generic context */
4901 if (cfg->rgctx_var) {
4902 g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET &&
4903 (cfg->rgctx_var->inst_basereg == ppc_r1 || cfg->rgctx_var->inst_basereg == ppc_r31));
4905 ppc_stptr (code, MONO_ARCH_RGCTX_REG, cfg->rgctx_var->inst_offset, cfg->rgctx_var->inst_basereg);
4908 /* compute max_offset in order to use short forward jumps
4909 * we always do it on ppc because the immediate displacement
4910 * for jumps is too small
4913 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4915 bb->max_offset = max_offset;
4917 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4920 MONO_BB_FOR_EACH_INS (bb, ins)
4921 max_offset += ins_native_length (cfg, ins);
4924 /* load arguments allocated to register from the stack */
4927 cinfo = calculate_sizes (sig, sig->pinvoke);
4929 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
4930 ArgInfo *ainfo = &cinfo->ret;
4932 inst = cfg->vret_addr;
4935 if (ppc_is_imm16 (inst->inst_offset)) {
4936 ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4938 ppc_load (code, ppc_r11, inst->inst_offset);
4939 ppc_stptr_indexed (code, ainfo->reg, ppc_r11, inst->inst_basereg);
4943 tailcall_struct_index = 0;
4944 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4945 ArgInfo *ainfo = cinfo->args + i;
4946 inst = cfg->args [pos];
4948 if (cfg->verbose_level > 2)
4949 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
4950 if (inst->opcode == OP_REGVAR) {
4951 if (ainfo->regtype == RegTypeGeneral)
4952 ppc_mr (code, inst->dreg, ainfo->reg);
4953 else if (ainfo->regtype == RegTypeFP)
4954 ppc_fmr (code, inst->dreg, ainfo->reg);
4955 else if (ainfo->regtype == RegTypeBase) {
4956 ppc_ldr (code, ppc_r11, 0, ppc_sp);
4957 ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r11);
4959 g_assert_not_reached ();
4961 if (cfg->verbose_level > 2)
4962 g_print ("Argument %ld assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4964 /* the argument should be put on the stack: FIXME handle size != word */
4965 if (ainfo->regtype == RegTypeGeneral) {
4966 switch (ainfo->size) {
4968 if (ppc_is_imm16 (inst->inst_offset)) {
4969 ppc_stb (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4971 if (ppc_is_imm32 (inst->inst_offset)) {
4972 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
4973 ppc_stb (code, ainfo->reg, ppc_r11, inst->inst_offset);
4975 ppc_load (code, ppc_r11, inst->inst_offset);
4976 ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
4981 if (ppc_is_imm16 (inst->inst_offset)) {
4982 ppc_sth (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4984 if (ppc_is_imm32 (inst->inst_offset)) {
4985 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
4986 ppc_sth (code, ainfo->reg, ppc_r11, inst->inst_offset);
4988 ppc_load (code, ppc_r11, inst->inst_offset);
4989 ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
4993 #ifdef __mono_ppc64__
4995 if (ppc_is_imm16 (inst->inst_offset)) {
4996 ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4998 if (ppc_is_imm32 (inst->inst_offset)) {
4999 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
5000 ppc_stw (code, ainfo->reg, ppc_r11, inst->inst_offset);
5002 ppc_load (code, ppc_r11, inst->inst_offset);
5003 ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
5008 if (ppc_is_imm16 (inst->inst_offset)) {
5009 ppc_str (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5011 ppc_load (code, ppc_r11, inst->inst_offset);
5012 ppc_str_indexed (code, ainfo->reg, ppc_r11, inst->inst_basereg);
5017 if (ppc_is_imm16 (inst->inst_offset + 4)) {
5018 ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5019 ppc_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg);
5021 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
5022 ppc_addi (code, ppc_r11, ppc_r11, inst->inst_offset);
5023 ppc_stw (code, ainfo->reg, 0, ppc_r11);
5024 ppc_stw (code, ainfo->reg + 1, 4, ppc_r11);
5029 if (ppc_is_imm16 (inst->inst_offset)) {
5030 ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5032 if (ppc_is_imm32 (inst->inst_offset)) {
5033 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
5034 ppc_stptr (code, ainfo->reg, ppc_r11, inst->inst_offset);
5036 ppc_load (code, ppc_r11, inst->inst_offset);
5037 ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r11);
5042 } else if (ainfo->regtype == RegTypeBase) {
5043 g_assert (ppc_is_imm16 (ainfo->offset));
5044 /* load the previous stack pointer in r11 */
5045 ppc_ldr (code, ppc_r11, 0, ppc_sp);
5046 ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r11);
5047 switch (ainfo->size) {
5049 if (ppc_is_imm16 (inst->inst_offset)) {
5050 ppc_stb (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5052 if (ppc_is_imm32 (inst->inst_offset)) {
5053 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
5054 ppc_stb (code, ppc_r0, ppc_r11, inst->inst_offset);
5056 ppc_load (code, ppc_r11, inst->inst_offset);
5057 ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r11);
5062 if (ppc_is_imm16 (inst->inst_offset)) {
5063 ppc_sth (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5065 if (ppc_is_imm32 (inst->inst_offset)) {
5066 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
5067 ppc_sth (code, ppc_r0, ppc_r11, inst->inst_offset);
5069 ppc_load (code, ppc_r11, inst->inst_offset);
5070 ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r11);
5074 #ifdef __mono_ppc64__
5076 if (ppc_is_imm16 (inst->inst_offset)) {
5077 ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5079 if (ppc_is_imm32 (inst->inst_offset)) {
5080 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
5081 ppc_stw (code, ppc_r0, ppc_r11, inst->inst_offset);
5083 ppc_load (code, ppc_r11, inst->inst_offset);
5084 ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r11);
5089 if (ppc_is_imm16 (inst->inst_offset)) {
5090 ppc_str (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5092 ppc_load (code, ppc_r11, inst->inst_offset);
5093 ppc_str_indexed (code, ppc_r0, ppc_r11, inst->inst_basereg);
5098 g_assert (ppc_is_imm16 (ainfo->offset + 4));
5099 if (ppc_is_imm16 (inst->inst_offset + 4)) {
5100 ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5101 ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r11);
5102 ppc_stw (code, ppc_r0, inst->inst_offset + 4, inst->inst_basereg);
5104 /* use r12 to load the 2nd half of the long before we clobber r11. */
5105 ppc_lwz (code, ppc_r12, ainfo->offset + 4, ppc_r11);
5106 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
5107 ppc_addi (code, ppc_r11, ppc_r11, inst->inst_offset);
5108 ppc_stw (code, ppc_r0, 0, ppc_r11);
5109 ppc_stw (code, ppc_r12, 4, ppc_r11);
5114 if (ppc_is_imm16 (inst->inst_offset)) {
5115 ppc_stptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5117 if (ppc_is_imm32 (inst->inst_offset)) {
5118 ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
5119 ppc_stptr (code, ppc_r0, ppc_r11, inst->inst_offset);
5121 ppc_load (code, ppc_r11, inst->inst_offset);
5122 ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r11);
5127 } else if (ainfo->regtype == RegTypeFP) {
5128 g_assert (ppc_is_imm16 (inst->inst_offset));
5129 if (ainfo->size == 8)
5130 ppc_stfd (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5131 else if (ainfo->size == 4)
5132 ppc_stfs (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5134 g_assert_not_reached ();
5135 } else if (ainfo->regtype == RegTypeStructByVal) {
5136 int doffset = inst->inst_offset;
5140 g_assert (ppc_is_imm16 (inst->inst_offset));
5141 g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (gpointer)));
5142 /* FIXME: what if there is no class? */
5143 if (sig->pinvoke && mono_class_from_mono_type (inst->inst_vtype))
5144 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
5145 for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) {
5148 * Darwin handles 1 and 2 byte
5149 * structs specially by
5150 * loading h/b into the arg
5151 * register. Only done for
5155 ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5157 ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5161 #ifdef __mono_ppc64__
5163 g_assert (cur_reg == 0);
5164 ppc_sldi (code, ppc_r0, ainfo->reg,
5165 (sizeof (gpointer) - ainfo->bytes) * 8);
5166 ppc_stptr (code, ppc_r0, doffset, inst->inst_basereg);
5170 ppc_stptr (code, ainfo->reg + cur_reg, doffset,
5171 inst->inst_basereg);
5174 soffset += sizeof (gpointer);
5175 doffset += sizeof (gpointer);
5177 if (ainfo->vtsize) {
5178 /* FIXME: we need to do the shifting here, too */
5181 /* load the previous stack pointer in r11 (r0 gets overwritten by the memcpy) */
5182 ppc_ldr (code, ppc_r11, 0, ppc_sp);
5183 if ((size & MONO_PPC_32_64_CASE (3, 7)) != 0) {
5184 code = emit_memcpy (code, size - soffset,
5185 inst->inst_basereg, doffset,
5186 ppc_r11, ainfo->offset + soffset);
5188 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer),
5189 inst->inst_basereg, doffset,
5190 ppc_r11, ainfo->offset + soffset);
5193 } else if (ainfo->regtype == RegTypeStructByAddr) {
5194 /* if it was originally a RegTypeBase */
5195 if (ainfo->offset) {
5196 /* load the previous stack pointer in r11 */
5197 ppc_ldr (code, ppc_r11, 0, ppc_sp);
5198 ppc_ldptr (code, ppc_r11, ainfo->offset, ppc_r11);
5200 ppc_mr (code, ppc_r11, ainfo->reg);
5203 if (cfg->tailcall_valuetype_addrs) {
5204 MonoInst *addr = cfg->tailcall_valuetype_addrs [tailcall_struct_index];
5206 g_assert (ppc_is_imm16 (addr->inst_offset));
5207 ppc_stptr (code, ppc_r11, addr->inst_offset, addr->inst_basereg);
5209 tailcall_struct_index++;
5212 g_assert (ppc_is_imm16 (inst->inst_offset));
5213 code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r11, 0);
5214 /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
5216 g_assert_not_reached ();
5221 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
5222 if (cfg->compile_aot)
5223 /* AOT code is only used in the root domain */
5224 ppc_load_ptr (code, ppc_r3, 0);
5226 ppc_load_ptr (code, ppc_r3, cfg->domain);
5227 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
5228 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
5229 ppc_load_func (code, ppc_r0, 0);
5230 ppc_mtlr (code, ppc_r0);
5237 if (method->save_lmf) {
5238 if (lmf_pthread_key != -1) {
5239 emit_tls_access (code, ppc_r3, lmf_pthread_key);
5240 if (tls_mode != TLS_MODE_NPTL && G_STRUCT_OFFSET (MonoJitTlsData, lmf))
5241 ppc_addi (code, ppc_r3, ppc_r3, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
5243 if (cfg->compile_aot) {
5244 /* Compute the got address which is needed by the PLT entry */
5245 code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
5247 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5248 (gpointer)"mono_get_lmf_addr");
5249 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
5250 ppc_load_func (code, ppc_r0, 0);
5251 ppc_mtlr (code, ppc_r0);
5257 /* we build the MonoLMF structure on the stack - see mini-ppc.h */
5258 /* lmf_offset is the offset from the previous stack pointer,
5259 * alloc_size is the total stack space allocated, so the offset
5260 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
5261 * The pointer to the struct is put in ppc_r11 (new_lmf).
5262 * The callee-saved registers are already in the MonoLMF structure
5264 ppc_addi (code, ppc_r11, ppc_sp, alloc_size - lmf_offset);
5265 /* ppc_r3 is the result from mono_get_lmf_addr () */
5266 ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
5267 /* new_lmf->previous_lmf = *lmf_addr */
5268 ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
5269 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
5270 /* *(lmf_addr) = r11 */
5271 ppc_stptr (code, ppc_r11, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
5272 /* save method info */
5273 if (cfg->compile_aot)
5275 ppc_load (code, ppc_r0, 0);
5277 ppc_load_ptr (code, ppc_r0, method);
5278 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r11);
5279 ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r11);
5280 /* save the current IP */
5281 if (cfg->compile_aot) {
5283 ppc_mflr (code, ppc_r0);
5285 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
5286 #ifdef __mono_ppc64__
5287 ppc_load_sequence (code, ppc_r0, (guint64)0x0101010101010101LL);
5289 ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L);
5292 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r11);
5296 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5298 cfg->code_len = code - cfg->native_code;
5299 g_assert (cfg->code_len <= cfg->code_size);
5306 mono_arch_emit_epilog (MonoCompile *cfg)
5308 MonoMethod *method = cfg->method;
5310 int max_epilog_size = 16 + 20*4;
5313 if (cfg->method->save_lmf)
5314 max_epilog_size += 128;
5316 if (mono_jit_trace_calls != NULL)
5317 max_epilog_size += 50;
5319 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5320 max_epilog_size += 50;
5322 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5323 cfg->code_size *= 2;
5324 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5325 mono_jit_stats.code_reallocs++;
5329 * Keep in sync with OP_JMP
5331 code = cfg->native_code + cfg->code_len;
5333 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
5334 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5338 if (method->save_lmf) {
5340 pos += sizeof (MonoLMF);
5342 /* save the frame reg in r8 */
5343 ppc_mr (code, ppc_r8, cfg->frame_reg);
5344 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage - lmf_offset);
5345 /* r5 = previous_lmf */
5346 ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
5348 ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
5349 /* *(lmf_addr) = previous_lmf */
5350 ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6);
5351 /* FIXME: speedup: there is no actual need to restore the registers if
5352 * we didn't actually change them (idea from Zoltan).
5355 ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r11);
5357 /*for (i = 14; i < 32; i++) {
5358 ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r11);
5360 g_assert (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET));
5361 /* use the saved copy of the frame reg in r8 */
5362 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
5363 ppc_ldr (code, ppc_r0, cfg->stack_usage + PPC_RET_ADDR_OFFSET, ppc_r8);
5364 ppc_mtlr (code, ppc_r0);
5366 ppc_addic (code, ppc_sp, ppc_r8, cfg->stack_usage);
5368 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
5369 long return_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
5370 if (ppc_is_imm16 (return_offset)) {
5371 ppc_ldr (code, ppc_r0, return_offset, cfg->frame_reg);
5373 ppc_load (code, ppc_r11, return_offset);
5374 ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r11);
5376 ppc_mtlr (code, ppc_r0);
5378 if (ppc_is_imm16 (cfg->stack_usage)) {
5379 int offset = cfg->stack_usage;
5380 for (i = 13; i <= 31; i++) {
5381 if (cfg->used_int_regs & (1 << i))
5382 offset -= sizeof (mgreg_t);
5384 if (cfg->frame_reg != ppc_sp)
5385 ppc_mr (code, ppc_r11, cfg->frame_reg);
5386 /* note r31 (possibly the frame register) is restored last */
5387 for (i = 13; i <= 31; i++) {
5388 if (cfg->used_int_regs & (1 << i)) {
5389 ppc_ldr (code, i, offset, cfg->frame_reg);
5390 offset += sizeof (mgreg_t);
5393 if (cfg->frame_reg != ppc_sp)
5394 ppc_addi (code, ppc_sp, ppc_r11, cfg->stack_usage);
5396 ppc_addi (code, ppc_sp, ppc_sp, cfg->stack_usage);
5398 ppc_load32 (code, ppc_r11, cfg->stack_usage);
5399 if (cfg->used_int_regs) {
5400 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
5401 for (i = 31; i >= 13; --i) {
5402 if (cfg->used_int_regs & (1 << i)) {
5403 pos += sizeof (mgreg_t);
5404 ppc_ldr (code, i, -pos, ppc_r11);
5407 ppc_mr (code, ppc_sp, ppc_r11);
5409 ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r11);
5416 cfg->code_len = code - cfg->native_code;
5418 g_assert (cfg->code_len < cfg->code_size);
5421 #endif /* ifndef DISABLE_JIT */
5423 /* remove once throw_exception_by_name is eliminated */
5425 exception_id_by_name (const char *name)
5427 if (strcmp (name, "IndexOutOfRangeException") == 0)
5428 return MONO_EXC_INDEX_OUT_OF_RANGE;
5429 if (strcmp (name, "OverflowException") == 0)
5430 return MONO_EXC_OVERFLOW;
5431 if (strcmp (name, "ArithmeticException") == 0)
5432 return MONO_EXC_ARITHMETIC;
5433 if (strcmp (name, "DivideByZeroException") == 0)
5434 return MONO_EXC_DIVIDE_BY_ZERO;
5435 if (strcmp (name, "InvalidCastException") == 0)
5436 return MONO_EXC_INVALID_CAST;
5437 if (strcmp (name, "NullReferenceException") == 0)
5438 return MONO_EXC_NULL_REF;
5439 if (strcmp (name, "ArrayTypeMismatchException") == 0)
5440 return MONO_EXC_ARRAY_TYPE_MISMATCH;
5441 g_error ("Unknown intrinsic exception %s\n", name);
5447 mono_arch_emit_exceptions (MonoCompile *cfg)
5449 MonoJumpInfo *patch_info;
5452 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
5453 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
5454 int max_epilog_size = 50;
5456 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
5457 exc_throw_pos [i] = NULL;
5458 exc_throw_found [i] = 0;
5461 /* count the number of exception infos */
5464 * make sure we have enough space for exceptions
5466 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5467 if (patch_info->type == MONO_PATCH_INFO_EXC) {
5468 i = exception_id_by_name (patch_info->data.target);
5469 if (!exc_throw_found [i]) {
5470 max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4;
5471 exc_throw_found [i] = TRUE;
5473 } else if (patch_info->type == MONO_PATCH_INFO_BB_OVF)
5474 max_epilog_size += 12;
5475 else if (patch_info->type == MONO_PATCH_INFO_EXC_OVF) {
5476 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5477 i = exception_id_by_name (ovfj->data.exception);
5478 if (!exc_throw_found [i]) {
5479 max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4;
5480 exc_throw_found [i] = TRUE;
5482 max_epilog_size += 8;
5486 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5487 cfg->code_size *= 2;
5488 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5489 mono_jit_stats.code_reallocs++;
5492 code = cfg->native_code + cfg->code_len;
5494 /* add code to raise exceptions */
5495 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5496 switch (patch_info->type) {
5497 case MONO_PATCH_INFO_BB_OVF: {
5498 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5499 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5500 /* patch the initial jump */
5501 ppc_patch (ip, code);
5502 ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 2);
5504 ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */
5505 /* jump back to the true target */
5507 ip = ovfj->data.bb->native_offset + cfg->native_code;
5508 ppc_patch (code - 4, ip);
5509 patch_info->type = MONO_PATCH_INFO_NONE;
5512 case MONO_PATCH_INFO_EXC_OVF: {
5513 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5514 MonoJumpInfo *newji;
5515 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5516 unsigned char *bcl = code;
5517 /* patch the initial jump: we arrived here with a call */
5518 ppc_patch (ip, code);
5519 ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 0);
5521 ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */
5522 /* patch the conditional jump to the right handler */
5523 /* make it processed next */
5524 newji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
5525 newji->type = MONO_PATCH_INFO_EXC;
5526 newji->ip.i = bcl - cfg->native_code;
5527 newji->data.target = ovfj->data.exception;
5528 newji->next = patch_info->next;
5529 patch_info->next = newji;
5530 patch_info->type = MONO_PATCH_INFO_NONE;
5533 case MONO_PATCH_INFO_EXC: {
5534 MonoClass *exc_class;
5536 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5537 i = exception_id_by_name (patch_info->data.target);
5538 if (exc_throw_pos [i] && !(ip > exc_throw_pos [i] && ip - exc_throw_pos [i] > 50000)) {
5539 ppc_patch (ip, exc_throw_pos [i]);
5540 patch_info->type = MONO_PATCH_INFO_NONE;
5543 exc_throw_pos [i] = code;
5546 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5547 g_assert (exc_class);
5549 ppc_patch (ip, code);
5550 /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
5551 ppc_load (code, ppc_r3, exc_class->type_token);
5552 /* we got here from a conditional call, so the calling ip is set in lr */
5553 ppc_mflr (code, ppc_r4);
5554 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5555 patch_info->data.name = "mono_arch_throw_corlib_exception";
5556 patch_info->ip.i = code - cfg->native_code;
5557 if (FORCE_INDIR_CALL || cfg->method->dynamic) {
5558 ppc_load_func (code, ppc_r0, 0);
5559 ppc_mtctr (code, ppc_r0);
5560 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5572 cfg->code_len = code - cfg->native_code;
5574 g_assert (cfg->code_len <= cfg->code_size);
5580 try_offset_access (void *value, guint32 idx)
5582 register void* me __asm__ ("r2");
5583 void ***p = (void***)((char*)me + 284);
5584 int idx1 = idx / 32;
5585 int idx2 = idx % 32;
5588 if (value != p[idx1][idx2])
5595 setup_tls_access (void)
5599 #if defined(__linux__) && defined(_CS_GNU_LIBPTHREAD_VERSION)
5600 size_t conf_size = 0;
5603 /* FIXME for darwin */
5604 guint32 *ins, *code;
5605 guint32 cmplwi_1023, li_0x48, blr_ins;
5609 tls_mode = TLS_MODE_FAILED;
5612 if (tls_mode == TLS_MODE_FAILED)
5614 if (g_getenv ("MONO_NO_TLS")) {
5615 tls_mode = TLS_MODE_FAILED;
5619 if (tls_mode == TLS_MODE_DETECT) {
5620 #if defined(__APPLE__) && defined(__mono_ppc__) && !defined(__mono_ppc64__)
5621 tls_mode = TLS_MODE_DARWIN_G4;
5622 #elif defined(__linux__) && defined(_CS_GNU_LIBPTHREAD_VERSION)
5623 conf_size = confstr ( _CS_GNU_LIBPTHREAD_VERSION, confbuf, sizeof(confbuf));
5624 if ((conf_size > 4) && (strncmp (confbuf, "NPTL", 4) == 0))
5625 tls_mode = TLS_MODE_NPTL;
5626 #elif !defined(TARGET_PS3)
5627 ins = (guint32*)pthread_getspecific;
5628 /* uncond branch to the real method */
5629 if ((*ins >> 26) == 18) {
5631 val = (*ins & ~3) << 6;
5635 ins = (guint32*)(long)val;
5637 ins = (guint32*) ((char*)ins + val);
5640 code = &cmplwi_1023;
5641 ppc_cmpli (code, 0, 0, ppc_r3, 1023);
5643 ppc_li (code, ppc_r4, 0x48);
5646 if (*ins == cmplwi_1023) {
5647 int found_lwz_284 = 0;
5648 for (ptk = 0; ptk < 20; ++ptk) {
5650 if (!*ins || *ins == blr_ins)
5652 if ((guint16)*ins == 284 && (*ins >> 26) == 32) {
5657 if (!found_lwz_284) {
5658 tls_mode = TLS_MODE_FAILED;
5661 tls_mode = TLS_MODE_LTHREADS;
5662 } else if (*ins == li_0x48) {
5664 /* uncond branch to the real method */
5665 if ((*ins >> 26) == 18) {
5667 val = (*ins & ~3) << 6;
5671 ins = (guint32*)(long)val;
5673 ins = (guint32*) ((char*)ins + val);
5675 code = (guint32*)&val;
5676 ppc_li (code, ppc_r0, 0x7FF2);
5677 if (ins [1] == val) {
5678 /* Darwin on G4, implement */
5679 tls_mode = TLS_MODE_FAILED;
5682 code = (guint32*)&val;
5683 ppc_mfspr (code, ppc_r3, 104);
5684 if (ins [1] != val) {
5685 tls_mode = TLS_MODE_FAILED;
5688 tls_mode = TLS_MODE_DARWIN_G5;
5691 tls_mode = TLS_MODE_FAILED;
5695 tls_mode = TLS_MODE_FAILED;
5701 if (tls_mode == TLS_MODE_DETECT)
5702 tls_mode = TLS_MODE_FAILED;
5703 if (tls_mode == TLS_MODE_FAILED)
5705 if ((monodomain_key == -1) && (tls_mode == TLS_MODE_NPTL)) {
5706 monodomain_key = mono_domain_get_tls_offset();
5708 /* if not TLS_MODE_NPTL or local dynamic (as indicated by
5709 mono_domain_get_tls_offset returning -1) then use keyed access. */
5710 if (monodomain_key == -1) {
5711 ptk = mono_domain_get_tls_key ();
5713 ptk = mono_pthread_key_for_tls (ptk);
5715 monodomain_key = ptk;
5720 if ((lmf_pthread_key == -1) && (tls_mode == TLS_MODE_NPTL)) {
5721 lmf_pthread_key = mono_get_lmf_addr_tls_offset();
5723 /* if not TLS_MODE_NPTL or local dynamic (as indicated by
5724 mono_get_lmf_addr_tls_offset returning -1) then use keyed access. */
5725 if (lmf_pthread_key == -1) {
5726 ptk = mono_pthread_key_for_tls (mono_jit_tls_id);
5728 /*g_print ("MonoLMF at: %d\n", ptk);*/
5729 /*if (!try_offset_access (mono_get_lmf_addr (), ptk)) {
5730 init_tls_failed = 1;
5733 lmf_pthread_key = ptk;
5740 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5742 setup_tls_access ();
5746 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5750 #ifdef MONO_ARCH_HAVE_IMT
5752 #define CMP_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
5754 #define LOADSTORE_SIZE 4
5755 #define JUMP_IMM_SIZE 12
5756 #define JUMP_IMM32_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 8)
5757 #define ENABLE_WRONG_METHOD_CHECK 0
5760 * LOCKING: called with the domain lock held
5763 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5764 gpointer fail_tramp)
5768 guint8 *code, *start;
5770 for (i = 0; i < count; ++i) {
5771 MonoIMTCheckItem *item = imt_entries [i];
5772 if (item->is_equals) {
5773 if (item->check_target_idx) {
5774 if (!item->compare_done)
5775 item->chunk_size += CMP_SIZE;
5776 if (item->has_target_code)
5777 item->chunk_size += BR_SIZE + JUMP_IMM32_SIZE;
5779 item->chunk_size += LOADSTORE_SIZE + BR_SIZE + JUMP_IMM_SIZE;
5782 item->chunk_size += CMP_SIZE + BR_SIZE + JUMP_IMM32_SIZE * 2;
5783 if (!item->has_target_code)
5784 item->chunk_size += LOADSTORE_SIZE;
5786 item->chunk_size += LOADSTORE_SIZE + JUMP_IMM_SIZE;
5787 #if ENABLE_WRONG_METHOD_CHECK
5788 item->chunk_size += CMP_SIZE + BR_SIZE + 4;
5793 item->chunk_size += CMP_SIZE + BR_SIZE;
5794 imt_entries [item->check_target_idx]->compare_done = TRUE;
5796 size += item->chunk_size;
5798 /* the initial load of the vtable address */
5799 size += PPC_LOAD_SEQUENCE_LENGTH + LOADSTORE_SIZE;
5801 code = mono_method_alloc_generic_virtual_thunk (domain, size);
5803 code = mono_domain_code_reserve (domain, size);
5808 * We need to save and restore r11 because it might be
5809 * used by the caller as the vtable register, so
5810 * clobbering it will trip up the magic trampoline.
5812 * FIXME: Get rid of this by making sure that r11 is
5813 * not used as the vtable register in interface calls.
5815 ppc_stptr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
5816 ppc_load (code, ppc_r11, (gsize)(& (vtable->vtable [0])));
5818 for (i = 0; i < count; ++i) {
5819 MonoIMTCheckItem *item = imt_entries [i];
5820 item->code_target = code;
5821 if (item->is_equals) {
5822 if (item->check_target_idx) {
5823 if (!item->compare_done) {
5824 ppc_load (code, ppc_r0, (gsize)item->key);
5825 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5827 item->jmp_code = code;
5828 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5829 if (item->has_target_code) {
5830 ppc_load_ptr (code, ppc_r0, item->value.target_code);
5832 ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r11);
5833 ppc_ldptr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
5835 ppc_mtctr (code, ppc_r0);
5836 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5839 ppc_load (code, ppc_r0, (gulong)item->key);
5840 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5841 item->jmp_code = code;
5842 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5843 if (item->has_target_code) {
5844 ppc_load_ptr (code, ppc_r0, item->value.target_code);
5847 ppc_load_ptr (code, ppc_r0, & (vtable->vtable [item->value.vtable_slot]));
5848 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
5850 ppc_mtctr (code, ppc_r0);
5851 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5852 ppc_patch (item->jmp_code, code);
5853 ppc_load_ptr (code, ppc_r0, fail_tramp);
5854 ppc_mtctr (code, ppc_r0);
5855 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5856 item->jmp_code = NULL;
5858 /* enable the commented code to assert on wrong method */
5859 #if ENABLE_WRONG_METHOD_CHECK
5860 ppc_load (code, ppc_r0, (guint32)item->key);
5861 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5862 item->jmp_code = code;
5863 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5865 ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r11);
5866 ppc_ldptr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
5867 ppc_mtctr (code, ppc_r0);
5868 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5869 #if ENABLE_WRONG_METHOD_CHECK
5870 ppc_patch (item->jmp_code, code);
5872 item->jmp_code = NULL;
5877 ppc_load (code, ppc_r0, (gulong)item->key);
5878 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5879 item->jmp_code = code;
5880 ppc_bc (code, PPC_BR_FALSE, PPC_BR_LT, 0);
5883 /* patch the branches to get to the target items */
5884 for (i = 0; i < count; ++i) {
5885 MonoIMTCheckItem *item = imt_entries [i];
5886 if (item->jmp_code) {
5887 if (item->check_target_idx) {
5888 ppc_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5894 mono_stats.imt_thunks_size += code - start;
5895 g_assert (code - start <= size);
5896 mono_arch_flush_icache (start, size);
5901 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5903 mgreg_t *r = (mgreg_t*)regs;
5905 return (MonoMethod*)(gsize) r [MONO_ARCH_IMT_REG];
5910 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5912 mgreg_t *r = (mgreg_t*)regs;
5914 return (MonoVTable*)(gsize) r [MONO_ARCH_RGCTX_REG];
5918 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5925 mono_arch_print_tree (MonoInst *tree, int arity)
5930 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5934 setup_tls_access ();
5935 if (monodomain_key == -1)
5938 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5939 ins->inst_offset = monodomain_key;
5944 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5947 return MONO_CONTEXT_GET_SP (ctx);
5949 g_assert (reg >= ppc_r13);
5951 return (gpointer)(gsize)ctx->regs [reg - ppc_r13];
5955 mono_arch_get_patch_offset (guint8 *code)
5961 * mono_aot_emit_load_got_addr:
5963 * Emit code to load the got address.
5964 * On PPC, the result is placed into r30.
5967 mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji)
5970 ppc_mflr (code, ppc_r30);
5972 mono_add_patch_info (cfg, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
5974 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
5975 /* arch_emit_got_address () patches this */
5976 #if defined(TARGET_POWERPC64)
5982 ppc_load32 (code, ppc_r0, 0);
5983 ppc_add (code, ppc_r30, ppc_r30, ppc_r0);
5990 * mono_ppc_emit_load_aotconst:
5992 * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
5993 * TARGET from the mscorlib GOT in full-aot code.
5994 * On PPC, the GOT address is assumed to be in r30, and the result is placed into
5998 mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target)
6000 /* Load the mscorlib got address */
6001 ppc_ldptr (code, ppc_r11, sizeof (gpointer), ppc_r30);
6002 *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
6003 /* arch_emit_got_access () patches this */
6004 ppc_load32 (code, ppc_r0, 0);
6005 ppc_ldptr_indexed (code, ppc_r11, ppc_r11, ppc_r0);
6010 /* Soft Debug support */
6011 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6018 * mono_arch_set_breakpoint:
6020 * See mini-amd64.c for docs.
6023 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6026 guint8 *orig_code = code;
6028 ppc_load_sequence (code, ppc_r11, (gsize)bp_trigger_page);
6029 ppc_ldptr (code, ppc_r11, 0, ppc_r11);
6031 g_assert (code - orig_code == BREAKPOINT_SIZE);
6033 mono_arch_flush_icache (orig_code, code - orig_code);
6037 * mono_arch_clear_breakpoint:
6039 * See mini-amd64.c for docs.
6042 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6047 for (i = 0; i < BREAKPOINT_SIZE / 4; ++i)
6050 mono_arch_flush_icache (ip, code - ip);
6054 * mono_arch_is_breakpoint_event:
6056 * See mini-amd64.c for docs.
6059 mono_arch_is_breakpoint_event (void *info, void *sigctx)
6061 siginfo_t* sinfo = (siginfo_t*) info;
6062 /* Sometimes the address is off by 4 */
6063 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
6070 * mono_arch_get_ip_for_breakpoint:
6072 * See mini-amd64.c for docs.
6075 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
6077 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
6079 /* ip points at the ldptr instruction */
6080 ip -= PPC_LOAD_SEQUENCE_LENGTH;
6086 * mono_arch_skip_breakpoint:
6088 * See mini-amd64.c for docs.
6091 mono_arch_skip_breakpoint (MonoContext *ctx)
6093 /* skip the ldptr */
6094 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6102 * mono_arch_start_single_stepping:
6104 * See mini-amd64.c for docs.
6107 mono_arch_start_single_stepping (void)
6109 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
6113 * mono_arch_stop_single_stepping:
6115 * See mini-amd64.c for docs.
6118 mono_arch_stop_single_stepping (void)
6120 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
6124 * mono_arch_is_single_step_event:
6126 * See mini-amd64.c for docs.
6129 mono_arch_is_single_step_event (void *info, void *sigctx)
6131 siginfo_t* sinfo = (siginfo_t*) info;
6132 /* Sometimes the address is off by 4 */
6133 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
6140 * mono_arch_get_ip_for_single_step:
6142 * See mini-amd64.c for docs.
6145 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
6147 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
6149 /* ip points after the ldptr instruction */
6154 * mono_arch_skip_single_step:
6156 * See mini-amd64.c for docs.
6159 mono_arch_skip_single_step (MonoContext *ctx)
6161 /* skip the ldptr */
6162 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6166 * mono_arch_create_seq_point_info:
6168 * See mini-amd64.c for docs.
6171 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)