2 * mini-ppc.c: PowerPC backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Andreas Faerber <andreas.faerber@web.de>
9 * (C) 2003 Ximian, Inc.
10 * (C) 2007-2008 Andreas Faerber
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/utils/mono-proclib.h>
19 #include <mono/utils/mono-mmap.h>
20 #include <mono/utils/mono-hwcap-ppc.h>
23 #ifdef TARGET_POWERPC64
24 #include "cpu-ppc64.h"
31 #include <sys/sysctl.h>
37 #define FORCE_INDIR_CALL 1
48 /* cpu_hw_caps contains the flags defined below */
49 static int cpu_hw_caps = 0;
50 static int cachelinesize = 0;
51 static int cachelineinc = 0;
53 PPC_ICACHE_SNOOP = 1 << 0,
54 PPC_MULTIPLE_LS_UNITS = 1 << 1,
55 PPC_SMP_CAPABLE = 1 << 2,
58 PPC_MOVE_FPR_GPR = 1 << 5,
62 #define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
64 /* This mutex protects architecture specific caches */
65 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
66 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
67 static mono_mutex_t mini_arch_mutex;
69 int mono_exc_esp_offset = 0;
70 static int tls_mode = TLS_MODE_DETECT;
71 static int lmf_pthread_key = -1;
74 * The code generated for sequence points reads from this location, which is
75 * made read-only when single stepping is enabled.
77 static gpointer ss_trigger_page;
79 /* Enabled breakpoints read from this trigger page */
80 static gpointer bp_trigger_page;
83 offsets_from_pthread_key (guint32 key, int *offset2)
87 *offset2 = idx2 * sizeof (gpointer);
88 return 284 + idx1 * sizeof (gpointer);
91 #define emit_linuxthreads_tls(code,dreg,key) do {\
93 off1 = offsets_from_pthread_key ((key), &off2); \
94 ppc_ldptr ((code), (dreg), off1, ppc_r2); \
95 ppc_ldptr ((code), (dreg), off2, (dreg)); \
98 #define emit_darwing5_tls(code,dreg,key) do {\
99 int off1 = 0x48 + key * sizeof (gpointer); \
100 ppc_mfspr ((code), (dreg), 104); \
101 ppc_ldptr ((code), (dreg), off1, (dreg)); \
104 /* FIXME: ensure the sc call preserves all but r3 */
105 #define emit_darwing4_tls(code,dreg,key) do {\
106 int off1 = 0x48 + key * sizeof (gpointer); \
107 if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r12, ppc_r3); \
108 ppc_li ((code), ppc_r0, 0x7FF2); \
110 ppc_lwz ((code), (dreg), off1, ppc_r3); \
111 if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r3, ppc_r12); \
114 #ifdef PPC_THREAD_PTR_REG
115 #define emit_nptl_tls(code,dreg,key) do { \
117 int off2 = key >> 15; \
118 if ((off2 == 0) || (off2 == -1)) { \
119 ppc_ldptr ((code), (dreg), off1, PPC_THREAD_PTR_REG); \
121 int off3 = (off2 + 1) > 1; \
122 ppc_addis ((code), ppc_r12, PPC_THREAD_PTR_REG, off3); \
123 ppc_ldptr ((code), (dreg), off1, ppc_r12); \
127 #define emit_nptl_tls(code,dreg,key) do { \
128 g_assert_not_reached (); \
132 #define emit_tls_access(code,dreg,key) do { \
133 switch (tls_mode) { \
134 case TLS_MODE_LTHREADS: emit_linuxthreads_tls(code,dreg,key); break; \
135 case TLS_MODE_NPTL: emit_nptl_tls(code,dreg,key); break; \
136 case TLS_MODE_DARWIN_G5: emit_darwing5_tls(code,dreg,key); break; \
137 case TLS_MODE_DARWIN_G4: emit_darwing4_tls(code,dreg,key); break; \
138 default: g_assert_not_reached (); \
142 #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \
144 MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \
145 inst->type = STACK_R8; \
147 inst->inst_p0 = (void*)(addr); \
148 mono_bblock_add_inst (cfg->cbb, inst); \
152 mono_arch_regname (int reg) {
153 static const char rnames[][4] = {
154 "r0", "sp", "r2", "r3", "r4",
155 "r5", "r6", "r7", "r8", "r9",
156 "r10", "r11", "r12", "r13", "r14",
157 "r15", "r16", "r17", "r18", "r19",
158 "r20", "r21", "r22", "r23", "r24",
159 "r25", "r26", "r27", "r28", "r29",
162 if (reg >= 0 && reg < 32)
168 mono_arch_fregname (int reg) {
169 static const char rnames[][4] = {
170 "f0", "f1", "f2", "f3", "f4",
171 "f5", "f6", "f7", "f8", "f9",
172 "f10", "f11", "f12", "f13", "f14",
173 "f15", "f16", "f17", "f18", "f19",
174 "f20", "f21", "f22", "f23", "f24",
175 "f25", "f26", "f27", "f28", "f29",
178 if (reg >= 0 && reg < 32)
183 /* this function overwrites r0, r11, r12 */
185 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
187 /* unrolled, use the counter in big */
188 if (size > sizeof (gpointer) * 5) {
189 long shifted = size / SIZEOF_VOID_P;
190 guint8 *copy_loop_start, *copy_loop_jump;
192 ppc_load (code, ppc_r0, shifted);
193 ppc_mtctr (code, ppc_r0);
194 //g_assert (sreg == ppc_r12);
195 ppc_addi (code, ppc_r11, dreg, (doffset - sizeof (gpointer)));
196 ppc_addi (code, ppc_r12, sreg, (soffset - sizeof (gpointer)));
197 copy_loop_start = code;
198 ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r12);
199 ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r11);
200 copy_loop_jump = code;
201 ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
202 ppc_patch (copy_loop_jump, copy_loop_start);
203 size -= shifted * sizeof (gpointer);
204 doffset = soffset = 0;
207 #ifdef __mono_ppc64__
208 /* the hardware has multiple load/store units and the move is long
209 enough to use more then one regiester, then use load/load/store/store
210 to execute 2 instructions per cycle. */
211 if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
213 ppc_ldptr (code, ppc_r0, soffset, sreg);
214 ppc_ldptr (code, ppc_r11, soffset+8, sreg);
215 ppc_stptr (code, ppc_r0, doffset, dreg);
216 ppc_stptr (code, ppc_r11, doffset+8, dreg);
223 ppc_ldr (code, ppc_r0, soffset, sreg);
224 ppc_str (code, ppc_r0, doffset, dreg);
230 if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
232 ppc_lwz (code, ppc_r0, soffset, sreg);
233 ppc_lwz (code, ppc_r11, soffset+4, sreg);
234 ppc_stw (code, ppc_r0, doffset, dreg);
235 ppc_stw (code, ppc_r11, doffset+4, dreg);
243 ppc_lwz (code, ppc_r0, soffset, sreg);
244 ppc_stw (code, ppc_r0, doffset, dreg);
250 ppc_lhz (code, ppc_r0, soffset, sreg);
251 ppc_sth (code, ppc_r0, doffset, dreg);
257 ppc_lbz (code, ppc_r0, soffset, sreg);
258 ppc_stb (code, ppc_r0, doffset, dreg);
267 * mono_arch_get_argument_info:
268 * @csig: a method signature
269 * @param_count: the number of parameters to consider
270 * @arg_info: an array to store the result infos
272 * Gathers information on parameters such as size, alignment and
273 * padding. arg_info should be large enought to hold param_count + 1 entries.
275 * Returns the size of the activation frame.
278 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
280 #ifdef __mono_ppc64__
284 int k, frame_size = 0;
285 int size, align, pad;
288 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
289 frame_size += sizeof (gpointer);
293 arg_info [0].offset = offset;
296 frame_size += sizeof (gpointer);
300 arg_info [0].size = frame_size;
302 for (k = 0; k < param_count; k++) {
305 size = mono_type_native_stack_size (csig->params [k], (guint32*)&align);
307 size = mini_type_stack_size (csig->params [k], &align);
309 /* ignore alignment for now */
312 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
313 arg_info [k].pad = pad;
315 arg_info [k + 1].pad = 0;
316 arg_info [k + 1].size = size;
318 arg_info [k + 1].offset = offset;
322 align = MONO_ARCH_FRAME_ALIGNMENT;
323 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
324 arg_info [k].pad = pad;
330 #ifdef __mono_ppc64__
332 is_load_sequence (guint32 *seq)
334 return ppc_opcode (seq [0]) == 15 && /* lis */
335 ppc_opcode (seq [1]) == 24 && /* ori */
336 ppc_opcode (seq [2]) == 30 && /* sldi */
337 ppc_opcode (seq [3]) == 25 && /* oris */
338 ppc_opcode (seq [4]) == 24; /* ori */
341 #define ppc_load_get_dest(l) (((l)>>21) & 0x1f)
342 #define ppc_load_get_off(l) ((gint16)((l) & 0xffff))
346 #define ppc_is_load_op(opcode) (ppc_opcode ((opcode)) == 58 || ppc_opcode ((opcode)) == 32)
348 /* code must point to the blrl */
350 mono_ppc_is_direct_call_sequence (guint32 *code)
352 #ifdef __mono_ppc64__
353 g_assert(*code == 0x4e800021 || *code == 0x4e800020 || *code == 0x4e800420);
355 /* the thunk-less direct call sequence: lis/ori/sldi/oris/ori/mtlr/blrl */
356 if (ppc_opcode (code [-1]) == 31) { /* mtlr */
357 if (ppc_is_load_op (code [-2]) && ppc_is_load_op (code [-3])) { /* ld/ld */
358 if (!is_load_sequence (&code [-8]))
360 /* one of the loads must be "ld r2,8(rX)" or "ld r2,4(rX) for ilp32 */
361 return (ppc_load_get_dest (code [-2]) == ppc_r2 && ppc_load_get_off (code [-2]) == sizeof (gpointer)) ||
362 (ppc_load_get_dest (code [-3]) == ppc_r2 && ppc_load_get_off (code [-3]) == sizeof (gpointer));
364 if (ppc_opcode (code [-2]) == 24 && ppc_opcode (code [-3]) == 31) /* mr/nop */
365 return is_load_sequence (&code [-8]);
367 return is_load_sequence (&code [-6]);
371 g_assert(*code == 0x4e800021);
373 /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */
374 return ppc_opcode (code [-1]) == 31 &&
375 ppc_opcode (code [-2]) == 24 &&
376 ppc_opcode (code [-3]) == 15;
380 #define MAX_ARCH_DELEGATE_PARAMS 7
383 get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count, gboolean aot)
385 guint8 *code, *start;
388 int size = MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE;
390 start = code = mono_global_codeman_reserve (size);
392 code = mono_ppc_create_pre_code_ftnptr (code);
394 /* Replace the this argument with the target */
395 ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
396 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
397 /* it's a function descriptor */
398 /* Can't use ldptr as it doesn't work with r0 */
399 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
401 ppc_mtctr (code, ppc_r0);
402 ppc_ldptr (code, ppc_r3, MONO_STRUCT_OFFSET (MonoDelegate, target), ppc_r3);
403 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
405 g_assert ((code - start) <= size);
407 mono_arch_flush_icache (start, size);
411 size = MONO_PPC_32_64_CASE (32, 32) + param_count * 4 + PPC_FTNPTR_SIZE;
412 start = code = mono_global_codeman_reserve (size);
414 code = mono_ppc_create_pre_code_ftnptr (code);
416 ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
417 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
418 /* it's a function descriptor */
419 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
421 ppc_mtctr (code, ppc_r0);
422 /* slide down the arguments */
423 for (i = 0; i < param_count; ++i) {
424 ppc_mr (code, (ppc_r3 + i), (ppc_r3 + i + 1));
426 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
428 g_assert ((code - start) <= size);
430 mono_arch_flush_icache (start, size);
434 *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL);
436 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
437 *info = mono_tramp_info_create (name, start, code - start, NULL, NULL);
445 mono_arch_get_delegate_invoke_impls (void)
451 get_delegate_invoke_impl (&info, TRUE, 0, TRUE);
452 res = g_slist_prepend (res, info);
454 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
455 get_delegate_invoke_impl (&info, FALSE, i, TRUE);
456 res = g_slist_prepend (res, info);
463 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
465 guint8 *code, *start;
467 /* FIXME: Support more cases */
468 if (MONO_TYPE_ISSTRUCT (sig->ret))
472 static guint8* cached = NULL;
478 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
481 start = get_delegate_invoke_impl (&info, TRUE, 0, FALSE);
482 mono_tramp_info_register (info, NULL);
484 mono_memory_barrier ();
488 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
491 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
493 for (i = 0; i < sig->param_count; ++i)
494 if (!mono_is_regsize_var (sig->params [i]))
498 code = cache [sig->param_count];
503 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
504 start = mono_aot_get_trampoline (name);
508 start = get_delegate_invoke_impl (&info, FALSE, sig->param_count, FALSE);
509 mono_tramp_info_register (info, NULL);
512 mono_memory_barrier ();
514 cache [sig->param_count] = start;
520 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
526 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
528 mgreg_t *r = (mgreg_t*)regs;
530 return (gpointer)(gsize)r [ppc_r3];
538 #define MAX_AUX_ENTRIES 128
540 * PPC_FEATURE_POWER4, PPC_FEATURE_POWER5, PPC_FEATURE_POWER5_PLUS, PPC_FEATURE_CELL,
541 * PPC_FEATURE_PA6T, PPC_FEATURE_ARCH_2_05 are considered supporting 2X ISA features
543 #define ISA_2X (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000 | 0x00000800 | 0x00001000)
545 /* define PPC_FEATURE_64 HWCAP for 64-bit category. */
546 #define ISA_64 0x40000000
548 /* define PPC_FEATURE_POWER6_EXT HWCAP for power6x mffgpr/mftgpr instructions. */
549 #define ISA_MOVE_FPR_GPR 0x00000200
551 * Initialize the cpu to execute managed code.
554 mono_arch_cpu_init (void)
559 * Initialize architecture specific code.
562 mono_arch_init (void)
564 #if defined(MONO_CROSS_COMPILE)
565 #elif defined(__APPLE__)
567 size_t len = sizeof (cachelinesize);
570 mib [1] = HW_CACHELINE;
572 if (sysctl (mib, 2, &cachelinesize, &len, NULL, 0) == -1) {
576 cachelineinc = cachelinesize;
578 #elif defined(__linux__)
579 AuxVec vec [MAX_AUX_ENTRIES];
580 int i, vec_entries = 0;
581 /* sadly this will work only with 2.6 kernels... */
582 FILE* f = fopen ("/proc/self/auxv", "rb");
585 vec_entries = fread (&vec, sizeof (AuxVec), MAX_AUX_ENTRIES, f);
589 for (i = 0; i < vec_entries; i++) {
590 int type = vec [i].type;
592 if (type == 19) { /* AT_DCACHEBSIZE */
593 cachelinesize = vec [i].value;
597 #elif defined(G_COMPILER_CODEWARRIOR)
601 //#error Need a way to get cache line size
604 if (mono_hwcap_ppc_has_icache_snoop)
605 cpu_hw_caps |= PPC_ICACHE_SNOOP;
607 if (mono_hwcap_ppc_is_isa_2x)
608 cpu_hw_caps |= PPC_ISA_2X;
610 if (mono_hwcap_ppc_is_isa_64)
611 cpu_hw_caps |= PPC_ISA_64;
613 if (mono_hwcap_ppc_has_move_fpr_gpr)
614 cpu_hw_caps |= PPC_MOVE_FPR_GPR;
616 if (mono_hwcap_ppc_has_multiple_ls_units)
617 cpu_hw_caps |= PPC_MULTIPLE_LS_UNITS;
623 cachelineinc = cachelinesize;
625 if (mono_cpu_count () > 1)
626 cpu_hw_caps |= PPC_SMP_CAPABLE;
628 mono_mutex_init_recursive (&mini_arch_mutex);
630 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
631 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
632 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
634 mono_aot_register_jit_icall ("mono_ppc_throw_exception", mono_ppc_throw_exception);
638 * Cleanup architecture specific code.
641 mono_arch_cleanup (void)
643 mono_mutex_destroy (&mini_arch_mutex);
647 * This function returns the optimizations supported on this cpu.
650 mono_arch_cpu_optimizations (guint32 *exclude_mask)
654 /* no ppc-specific optimizations yet */
660 * This function test for all SIMD functions supported.
662 * Returns a bitmask corresponding to all supported versions.
666 mono_arch_cpu_enumerate_simd_versions (void)
668 /* SIMD is currently unimplemented */
672 #ifdef __mono_ppc64__
673 #define CASE_PPC32(c)
674 #define CASE_PPC64(c) case c:
676 #define CASE_PPC32(c) case c:
677 #define CASE_PPC64(c)
681 is_regsize_var (MonoType *t) {
684 t = mini_get_underlying_type (t);
688 CASE_PPC64 (MONO_TYPE_I8)
689 CASE_PPC64 (MONO_TYPE_U8)
693 case MONO_TYPE_FNPTR:
695 case MONO_TYPE_OBJECT:
696 case MONO_TYPE_STRING:
697 case MONO_TYPE_CLASS:
698 case MONO_TYPE_SZARRAY:
699 case MONO_TYPE_ARRAY:
701 case MONO_TYPE_GENERICINST:
702 if (!mono_type_generic_inst_is_valuetype (t))
705 case MONO_TYPE_VALUETYPE:
713 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
718 for (i = 0; i < cfg->num_varinfo; i++) {
719 MonoInst *ins = cfg->varinfo [i];
720 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
723 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
726 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
729 /* we can only allocate 32 bit values */
730 if (is_regsize_var (ins->inst_vtype)) {
731 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
732 g_assert (i == vmv->idx);
733 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
739 #endif /* ifndef DISABLE_JIT */
742 mono_arch_get_global_int_regs (MonoCompile *cfg)
746 if (cfg->frame_reg != ppc_sp)
748 /* ppc_r13 is used by the system on PPC EABI */
749 for (i = 14; i < top; ++i) {
751 * Reserve r29 for holding the vtable address for virtual calls in AOT mode,
752 * since the trampolines can clobber r12.
754 if (!(cfg->compile_aot && i == 29))
755 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
762 * mono_arch_regalloc_cost:
764 * Return the cost, in number of memory references, of the action of
765 * allocating the variable VMV into a register during global register
769 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
776 mono_arch_flush_icache (guint8 *code, gint size)
778 #ifdef MONO_CROSS_COMPILE
781 guint8 *endp, *start;
785 start = (guint8*)((gsize)start & ~(cachelinesize - 1));
786 /* use dcbf for smp support, later optimize for UP, see pem._64bit.d20030611.pdf page 211 */
787 #if defined(G_COMPILER_CODEWARRIOR)
788 if (cpu_hw_caps & PPC_SMP_CAPABLE) {
789 for (p = start; p < endp; p += cachelineinc) {
793 for (p = start; p < endp; p += cachelineinc) {
799 for (p = start; p < endp; p += cachelineinc) {
810 /* For POWER5/6 with ICACHE_SNOOPing only one icbi in the range is required.
811 * The sync is required to insure that the store queue is completely empty.
812 * While the icbi performs no cache operations, icbi/isync is required to
813 * kill local prefetch.
815 if (cpu_hw_caps & PPC_ICACHE_SNOOP) {
817 asm ("icbi 0,%0;" : : "r"(code) : "memory");
821 /* use dcbf for smp support, see pem._64bit.d20030611.pdf page 211 */
822 if (cpu_hw_caps & PPC_SMP_CAPABLE) {
823 for (p = start; p < endp; p += cachelineinc) {
824 asm ("dcbf 0,%0;" : : "r"(p) : "memory");
827 for (p = start; p < endp; p += cachelineinc) {
828 asm ("dcbst 0,%0;" : : "r"(p) : "memory");
833 for (p = start; p < endp; p += cachelineinc) {
834 /* for ISA2.0+ implementations we should not need any extra sync between the
835 * icbi instructions. Both the 2.0 PEM and the PowerISA-2.05 say this.
836 * So I am not sure which chip had this problem but its not an issue on
837 * of the ISA V2 chips.
839 if (cpu_hw_caps & PPC_ISA_2X)
840 asm ("icbi 0,%0;" : : "r"(p) : "memory");
842 asm ("icbi 0,%0; sync;" : : "r"(p) : "memory");
844 if (!(cpu_hw_caps & PPC_ISA_2X))
852 mono_arch_flush_register_windows (void)
857 #define ALWAYS_ON_STACK(s) s
858 #define FP_ALSO_IN_REG(s) s
860 #ifdef __mono_ppc64__
861 #define ALWAYS_ON_STACK(s) s
862 #define FP_ALSO_IN_REG(s) s
864 #define ALWAYS_ON_STACK(s)
865 #define FP_ALSO_IN_REG(s)
867 #define ALIGN_DOUBLES
880 guint32 vtsize; /* in param area */
882 guint8 vtregs; /* number of registers used to pass a RegTypeStructByVal */
883 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
884 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
885 guint8 bytes : 4; /* size in bytes - only valid for
886 RegTypeStructByVal if the struct fits
887 in one word, otherwise it's 0*/
896 gboolean vtype_retaddr;
904 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
906 #ifdef __mono_ppc64__
911 if (*gr >= 3 + PPC_NUM_REG_ARGS) {
912 ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size;
913 ainfo->reg = ppc_sp; /* in the caller */
914 ainfo->regtype = RegTypeBase;
915 *stack_size += sizeof (gpointer);
917 ALWAYS_ON_STACK (*stack_size += sizeof (gpointer));
921 if (*gr >= 3 + PPC_NUM_REG_ARGS - 1) {
923 //*stack_size += (*stack_size % 8);
925 ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size;
926 ainfo->reg = ppc_sp; /* in the caller */
927 ainfo->regtype = RegTypeBase;
934 ALWAYS_ON_STACK (*stack_size += 8);
942 #if defined(__APPLE__) || defined(__mono_ppc64__)
944 has_only_a_r48_field (MonoClass *klass)
948 gboolean have_field = FALSE;
950 while ((f = mono_class_get_fields (klass, &iter))) {
951 if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
954 if (!f->type->byref && (f->type->type == MONO_TYPE_R4 || f->type->type == MONO_TYPE_R8))
965 get_call_info (MonoMethodSignature *sig)
967 guint i, fr, gr, pstart;
968 int n = sig->hasthis + sig->param_count;
969 MonoType *simpletype;
970 guint32 stack_size = 0;
971 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
972 gboolean is_pinvoke = sig->pinvoke;
974 fr = PPC_FIRST_FPARG_REG;
975 gr = PPC_FIRST_ARG_REG;
977 /* FIXME: handle returning a struct */
978 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
979 cinfo->vtype_retaddr = TRUE;
985 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
986 * the first argument, allowing 'this' to be always passed in the first arg reg.
987 * Also do this if the first argument is a reference type, since virtual calls
988 * are sometimes made using calli without sig->hasthis set, like in the delegate
991 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
993 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
996 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1000 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1001 cinfo->struct_ret = cinfo->ret.reg;
1002 cinfo->vret_arg_index = 1;
1006 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1010 if (cinfo->vtype_retaddr) {
1011 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1012 cinfo->struct_ret = cinfo->ret.reg;
1016 DEBUG(printf("params: %d\n", sig->param_count));
1017 for (i = pstart; i < sig->param_count; ++i) {
1018 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1019 /* Prevent implicit arguments and sig_cookie from
1020 being passed in registers */
1021 gr = PPC_LAST_ARG_REG + 1;
1022 /* FIXME: don't we have to set fr, too? */
1023 /* Emit the signature cookie just before the implicit arguments */
1024 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1026 DEBUG(printf("param %d: ", i));
1027 if (sig->params [i]->byref) {
1028 DEBUG(printf("byref\n"));
1029 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1033 simpletype = mini_get_underlying_type (sig->params [i]);
1034 switch (simpletype->type) {
1035 case MONO_TYPE_BOOLEAN:
1038 cinfo->args [n].size = 1;
1039 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1042 case MONO_TYPE_CHAR:
1045 cinfo->args [n].size = 2;
1046 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1051 cinfo->args [n].size = 4;
1052 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1058 case MONO_TYPE_FNPTR:
1059 case MONO_TYPE_CLASS:
1060 case MONO_TYPE_OBJECT:
1061 case MONO_TYPE_STRING:
1062 case MONO_TYPE_SZARRAY:
1063 case MONO_TYPE_ARRAY:
1064 cinfo->args [n].size = sizeof (gpointer);
1065 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1068 case MONO_TYPE_GENERICINST:
1069 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1070 cinfo->args [n].size = sizeof (gpointer);
1071 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1076 case MONO_TYPE_VALUETYPE:
1077 case MONO_TYPE_TYPEDBYREF: {
1081 klass = mono_class_from_mono_type (sig->params [i]);
1082 if (simpletype->type == MONO_TYPE_TYPEDBYREF)
1083 size = sizeof (MonoTypedRef);
1084 else if (is_pinvoke)
1085 size = mono_class_native_size (klass, NULL);
1087 size = mono_class_value_size (klass, NULL);
1089 #if defined(__APPLE__) || defined(__mono_ppc64__)
1090 if ((size == 4 || size == 8) && has_only_a_r48_field (klass)) {
1091 cinfo->args [n].size = size;
1093 /* It was 7, now it is 8 in LinuxPPC */
1094 if (fr <= PPC_LAST_FPARG_REG) {
1095 cinfo->args [n].regtype = RegTypeFP;
1096 cinfo->args [n].reg = fr;
1098 FP_ALSO_IN_REG (gr ++);
1099 #if !defined(__mono_ppc64__)
1101 FP_ALSO_IN_REG (gr ++);
1103 ALWAYS_ON_STACK (stack_size += size);
1105 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1106 cinfo->args [n].regtype = RegTypeBase;
1107 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1114 DEBUG(printf ("load %d bytes struct\n",
1115 mono_class_native_size (sig->params [i]->data.klass, NULL)));
1117 #if PPC_PASS_STRUCTS_BY_VALUE
1119 int align_size = size;
1121 int rest = PPC_LAST_ARG_REG - gr + 1;
1124 align_size += (sizeof (gpointer) - 1);
1125 align_size &= ~(sizeof (gpointer) - 1);
1126 nregs = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1127 n_in_regs = MIN (rest, nregs);
1131 /* FIXME: check this */
1132 if (size >= 3 && size % 4 != 0)
1135 cinfo->args [n].regtype = RegTypeStructByVal;
1136 cinfo->args [n].vtregs = n_in_regs;
1137 cinfo->args [n].size = n_in_regs;
1138 cinfo->args [n].vtsize = nregs - n_in_regs;
1139 cinfo->args [n].reg = gr;
1141 #ifdef __mono_ppc64__
1142 if (nregs == 1 && is_pinvoke)
1143 cinfo->args [n].bytes = size;
1146 cinfo->args [n].bytes = 0;
1148 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1149 /*g_print ("offset for arg %d at %d\n", n, PPC_STACK_PARAM_OFFSET + stack_size);*/
1150 stack_size += nregs * sizeof (gpointer);
1153 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1154 cinfo->args [n].regtype = RegTypeStructByAddr;
1155 cinfo->args [n].vtsize = size;
1162 cinfo->args [n].size = 8;
1163 add_general (&gr, &stack_size, cinfo->args + n, SIZEOF_REGISTER == 8);
1167 cinfo->args [n].size = 4;
1169 /* It was 7, now it is 8 in LinuxPPC */
1170 if (fr <= PPC_LAST_FPARG_REG
1171 // For non-native vararg calls the parms must go in storage
1172 && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1174 cinfo->args [n].regtype = RegTypeFP;
1175 cinfo->args [n].reg = fr;
1177 FP_ALSO_IN_REG (gr ++);
1178 ALWAYS_ON_STACK (stack_size += SIZEOF_REGISTER);
1180 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size + MONO_PPC_32_64_CASE (0, 4);
1181 cinfo->args [n].regtype = RegTypeBase;
1182 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1183 stack_size += SIZEOF_REGISTER;
1188 cinfo->args [n].size = 8;
1189 /* It was 7, now it is 8 in LinuxPPC */
1190 if (fr <= PPC_LAST_FPARG_REG
1191 // For non-native vararg calls the parms must go in storage
1192 && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1194 cinfo->args [n].regtype = RegTypeFP;
1195 cinfo->args [n].reg = fr;
1197 FP_ALSO_IN_REG (gr += sizeof (double) / SIZEOF_REGISTER);
1198 ALWAYS_ON_STACK (stack_size += 8);
1200 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1201 cinfo->args [n].regtype = RegTypeBase;
1202 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1208 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1213 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1214 /* Prevent implicit arguments and sig_cookie from
1215 being passed in registers */
1216 gr = PPC_LAST_ARG_REG + 1;
1217 /* Emit the signature cookie just before the implicit arguments */
1218 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1222 simpletype = mini_get_underlying_type (sig->ret);
1223 switch (simpletype->type) {
1224 case MONO_TYPE_BOOLEAN:
1229 case MONO_TYPE_CHAR:
1235 case MONO_TYPE_FNPTR:
1236 case MONO_TYPE_CLASS:
1237 case MONO_TYPE_OBJECT:
1238 case MONO_TYPE_SZARRAY:
1239 case MONO_TYPE_ARRAY:
1240 case MONO_TYPE_STRING:
1241 cinfo->ret.reg = ppc_r3;
1245 cinfo->ret.reg = ppc_r3;
1249 cinfo->ret.reg = ppc_f1;
1250 cinfo->ret.regtype = RegTypeFP;
1252 case MONO_TYPE_GENERICINST:
1253 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1254 cinfo->ret.reg = ppc_r3;
1258 case MONO_TYPE_VALUETYPE:
1260 case MONO_TYPE_TYPEDBYREF:
1261 case MONO_TYPE_VOID:
1264 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1268 /* align stack size to 16 */
1269 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1270 stack_size = (stack_size + 15) & ~15;
1272 cinfo->stack_usage = stack_size;
1277 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1283 c1 = get_call_info (caller_sig);
1284 c2 = get_call_info (callee_sig);
1285 res = c1->stack_usage >= c2->stack_usage;
1286 if (callee_sig->ret && MONO_TYPE_ISSTRUCT (callee_sig->ret))
1287 /* An address on the callee's stack is passed as the first argument */
1289 for (i = 0; i < c2->nargs; ++i) {
1290 if (c2->args [i].regtype == RegTypeStructByAddr)
1291 /* An address on the callee's stack is passed as the argument */
1296 if (!mono_debug_count ())
1307 * Set var information according to the calling convention. ppc version.
1308 * The locals var stuff should most likely be split in another method.
1311 mono_arch_allocate_vars (MonoCompile *m)
1313 MonoMethodSignature *sig;
1314 MonoMethodHeader *header;
1316 int i, offset, size, align, curinst;
1317 int frame_reg = ppc_sp;
1319 guint32 locals_stack_size, locals_stack_align;
1321 m->flags |= MONO_CFG_HAS_SPILLUP;
1323 /* allow room for the vararg method args: void* and long/double */
1324 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
1325 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1326 /* this is bug #60332: remove when #59509 is fixed, so no weird vararg
1327 * call convs needs to be handled this way.
1329 if (m->flags & MONO_CFG_HAS_VARARGS)
1330 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1331 /* gtk-sharp and other broken code will dllimport vararg functions even with
1332 * non-varargs signatures. Since there is little hope people will get this right
1333 * we assume they won't.
1335 if (m->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)
1336 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1341 * We use the frame register also for any method that has
1342 * exception clauses. This way, when the handlers are called,
1343 * the code will reference local variables using the frame reg instead of
1344 * the stack pointer: if we had to restore the stack pointer, we'd
1345 * corrupt the method frames that are already on the stack (since
1346 * filters get called before stack unwinding happens) when the filter
1347 * code would call any method (this also applies to finally etc.).
1349 if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1350 frame_reg = ppc_r31;
1351 m->frame_reg = frame_reg;
1352 if (frame_reg != ppc_sp) {
1353 m->used_int_regs |= 1 << frame_reg;
1356 sig = mono_method_signature (m->method);
1360 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1361 m->ret->opcode = OP_REGVAR;
1362 m->ret->inst_c0 = m->ret->dreg = ppc_r3;
1364 /* FIXME: handle long values? */
1365 switch (mini_get_underlying_type (sig->ret)->type) {
1366 case MONO_TYPE_VOID:
1370 m->ret->opcode = OP_REGVAR;
1371 m->ret->inst_c0 = m->ret->dreg = ppc_f1;
1374 m->ret->opcode = OP_REGVAR;
1375 m->ret->inst_c0 = m->ret->dreg = ppc_r3;
1379 /* local vars are at a positive offset from the stack pointer */
1381 * also note that if the function uses alloca, we use ppc_r31
1382 * to point at the local variables.
1384 offset = PPC_MINIMAL_STACK_SIZE; /* linkage area */
1385 /* align the offset to 16 bytes: not sure this is needed here */
1387 //offset &= ~(16 - 1);
1389 /* add parameter area size for called functions */
1390 offset += m->param_area;
1392 offset &= ~(16 - 1);
1394 /* allow room to save the return value */
1395 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
1398 /* the MonoLMF structure is stored just below the stack pointer */
1401 /* this stuff should not be needed on ppc and the new jit,
1402 * because a call on ppc to the handlers doesn't change the
1403 * stack pointer and the jist doesn't manipulate the stack pointer
1404 * for operations involving valuetypes.
1406 /* reserve space to store the esp */
1407 offset += sizeof (gpointer);
1409 /* this is a global constant */
1410 mono_exc_esp_offset = offset;
1413 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1414 offset += sizeof(gpointer) - 1;
1415 offset &= ~(sizeof(gpointer) - 1);
1417 m->vret_addr->opcode = OP_REGOFFSET;
1418 m->vret_addr->inst_basereg = frame_reg;
1419 m->vret_addr->inst_offset = offset;
1421 if (G_UNLIKELY (m->verbose_level > 1)) {
1422 printf ("vret_addr =");
1423 mono_print_ins (m->vret_addr);
1426 offset += sizeof(gpointer);
1429 offsets = mono_allocate_stack_slots (m, FALSE, &locals_stack_size, &locals_stack_align);
1430 if (locals_stack_align) {
1431 offset += (locals_stack_align - 1);
1432 offset &= ~(locals_stack_align - 1);
1434 for (i = m->locals_start; i < m->num_varinfo; i++) {
1435 if (offsets [i] != -1) {
1436 MonoInst *inst = m->varinfo [i];
1437 inst->opcode = OP_REGOFFSET;
1438 inst->inst_basereg = frame_reg;
1439 inst->inst_offset = offset + offsets [i];
1441 g_print ("allocating local %d (%s) to %d\n",
1442 i, mono_type_get_name (inst->inst_vtype), inst->inst_offset);
1446 offset += locals_stack_size;
1450 inst = m->args [curinst];
1451 if (inst->opcode != OP_REGVAR) {
1452 inst->opcode = OP_REGOFFSET;
1453 inst->inst_basereg = frame_reg;
1454 offset += sizeof (gpointer) - 1;
1455 offset &= ~(sizeof (gpointer) - 1);
1456 inst->inst_offset = offset;
1457 offset += sizeof (gpointer);
1462 for (i = 0; i < sig->param_count; ++i) {
1463 inst = m->args [curinst];
1464 if (inst->opcode != OP_REGVAR) {
1465 inst->opcode = OP_REGOFFSET;
1466 inst->inst_basereg = frame_reg;
1468 size = mono_type_native_stack_size (sig->params [i], (guint32*)&align);
1469 inst->backend.is_pinvoke = 1;
1471 size = mono_type_size (sig->params [i], &align);
1473 if (MONO_TYPE_ISSTRUCT (sig->params [i]) && size < sizeof (gpointer))
1474 size = align = sizeof (gpointer);
1476 * Use at least 4/8 byte alignment, since these might be passed in registers, and
1477 * they are saved using std in the prolog.
1479 align = sizeof (gpointer);
1480 offset += align - 1;
1481 offset &= ~(align - 1);
1482 inst->inst_offset = offset;
1488 /* some storage for fp conversions */
1491 m->arch.fp_conv_var_offset = offset;
1494 /* align the offset to 16 bytes */
1496 offset &= ~(16 - 1);
1499 m->stack_offset = offset;
1501 if (sig->call_convention == MONO_CALL_VARARG) {
1502 CallInfo *cinfo = get_call_info (m->method->signature);
1504 m->sig_cookie = cinfo->sig_cookie.offset;
1511 mono_arch_create_vars (MonoCompile *cfg)
1513 MonoMethodSignature *sig = mono_method_signature (cfg->method);
1515 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1516 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1520 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1521 * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
1525 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1527 int sig_reg = mono_alloc_ireg (cfg);
1529 /* FIXME: Add support for signature tokens to AOT */
1530 cfg->disable_aot = TRUE;
1532 MONO_EMIT_NEW_ICONST (cfg, sig_reg, (gulong)call->signature);
1533 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG,
1534 ppc_r1, cinfo->sig_cookie.offset, sig_reg);
1538 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1541 MonoMethodSignature *sig;
1545 sig = call->signature;
1546 n = sig->param_count + sig->hasthis;
1548 cinfo = get_call_info (sig);
1550 for (i = 0; i < n; ++i) {
1551 ArgInfo *ainfo = cinfo->args + i;
1554 if (i >= sig->hasthis)
1555 t = sig->params [i - sig->hasthis];
1557 t = &mono_defaults.int_class->byval_arg;
1558 t = mini_get_underlying_type (t);
1560 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos))
1561 emit_sig_cookie (cfg, call, cinfo);
1563 in = call->args [i];
1565 if (ainfo->regtype == RegTypeGeneral) {
1566 #ifndef __mono_ppc64__
1567 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1568 MONO_INST_NEW (cfg, ins, OP_MOVE);
1569 ins->dreg = mono_alloc_ireg (cfg);
1570 ins->sreg1 = in->dreg + 1;
1571 MONO_ADD_INS (cfg->cbb, ins);
1572 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1574 MONO_INST_NEW (cfg, ins, OP_MOVE);
1575 ins->dreg = mono_alloc_ireg (cfg);
1576 ins->sreg1 = in->dreg + 2;
1577 MONO_ADD_INS (cfg->cbb, ins);
1578 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1582 MONO_INST_NEW (cfg, ins, OP_MOVE);
1583 ins->dreg = mono_alloc_ireg (cfg);
1584 ins->sreg1 = in->dreg;
1585 MONO_ADD_INS (cfg->cbb, ins);
1587 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1589 } else if (ainfo->regtype == RegTypeStructByAddr) {
1590 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1591 ins->opcode = OP_OUTARG_VT;
1592 ins->sreg1 = in->dreg;
1593 ins->klass = in->klass;
1594 ins->inst_p0 = call;
1595 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1596 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1597 MONO_ADD_INS (cfg->cbb, ins);
1598 } else if (ainfo->regtype == RegTypeStructByVal) {
1599 /* this is further handled in mono_arch_emit_outarg_vt () */
1600 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1601 ins->opcode = OP_OUTARG_VT;
1602 ins->sreg1 = in->dreg;
1603 ins->klass = in->klass;
1604 ins->inst_p0 = call;
1605 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1606 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1607 MONO_ADD_INS (cfg->cbb, ins);
1608 } else if (ainfo->regtype == RegTypeBase) {
1609 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1610 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1611 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1612 if (t->type == MONO_TYPE_R8)
1613 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1615 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1617 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1619 } else if (ainfo->regtype == RegTypeFP) {
1620 if (t->type == MONO_TYPE_VALUETYPE) {
1621 /* this is further handled in mono_arch_emit_outarg_vt () */
1622 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1623 ins->opcode = OP_OUTARG_VT;
1624 ins->sreg1 = in->dreg;
1625 ins->klass = in->klass;
1626 ins->inst_p0 = call;
1627 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1628 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1629 MONO_ADD_INS (cfg->cbb, ins);
1631 cfg->flags |= MONO_CFG_HAS_FPOUT;
1633 int dreg = mono_alloc_freg (cfg);
1635 if (ainfo->size == 4) {
1636 MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, dreg, in->dreg);
1638 MONO_INST_NEW (cfg, ins, OP_FMOVE);
1640 ins->sreg1 = in->dreg;
1641 MONO_ADD_INS (cfg->cbb, ins);
1644 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
1645 cfg->flags |= MONO_CFG_HAS_FPOUT;
1648 g_assert_not_reached ();
1652 /* Emit the signature cookie in the case that there is no
1653 additional argument */
1654 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1655 emit_sig_cookie (cfg, call, cinfo);
1657 if (cinfo->struct_ret) {
1660 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1661 vtarg->sreg1 = call->vret_var->dreg;
1662 vtarg->dreg = mono_alloc_preg (cfg);
1663 MONO_ADD_INS (cfg->cbb, vtarg);
1665 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->struct_ret, FALSE);
1668 call->stack_usage = cinfo->stack_usage;
1669 cfg->param_area = MAX (PPC_MINIMAL_PARAM_AREA_SIZE, MAX (cfg->param_area, cinfo->stack_usage));
1670 cfg->flags |= MONO_CFG_HAS_CALLS;
1678 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1680 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1681 ArgInfo *ainfo = ins->inst_p1;
1682 int ovf_size = ainfo->vtsize;
1683 int doffset = ainfo->offset;
1684 int i, soffset, dreg;
1686 if (ainfo->regtype == RegTypeStructByVal) {
1693 * Darwin pinvokes needs some special handling for 1
1694 * and 2 byte arguments
1696 g_assert (ins->klass);
1697 if (call->signature->pinvoke)
1698 size = mono_class_native_size (ins->klass, NULL);
1699 if (size == 2 || size == 1) {
1700 int tmpr = mono_alloc_ireg (cfg);
1702 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmpr, src->dreg, soffset);
1704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmpr, src->dreg, soffset);
1705 dreg = mono_alloc_ireg (cfg);
1706 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, dreg, tmpr);
1707 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, FALSE);
1710 for (i = 0; i < ainfo->vtregs; ++i) {
1711 int antipadding = 0;
1714 antipadding = sizeof (gpointer) - ainfo->bytes;
1716 dreg = mono_alloc_ireg (cfg);
1717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, dreg, dreg, antipadding * 8);
1720 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1721 soffset += sizeof (gpointer);
1724 mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1725 } else if (ainfo->regtype == RegTypeFP) {
1726 int tmpr = mono_alloc_freg (cfg);
1727 if (ainfo->size == 4)
1728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, 0);
1730 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, 0);
1731 dreg = mono_alloc_freg (cfg);
1732 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr);
1733 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
1735 MonoInst *vtcopy = mono_compile_create_var (cfg, &src->klass->byval_arg, OP_LOCAL);
1739 /* FIXME: alignment? */
1740 if (call->signature->pinvoke) {
1741 size = mono_type_native_stack_size (&src->klass->byval_arg, NULL);
1742 vtcopy->backend.is_pinvoke = 1;
1744 size = mini_type_stack_size (&src->klass->byval_arg, NULL);
1747 g_assert (ovf_size > 0);
1749 EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype);
1750 mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, 0);
1753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, load->dreg);
1755 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE);
1760 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1762 MonoType *ret = mini_get_underlying_type (mono_method_signature (method)->ret);
1765 #ifndef __mono_ppc64__
1766 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1769 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1770 ins->sreg1 = val->dreg + 1;
1771 ins->sreg2 = val->dreg + 2;
1772 MONO_ADD_INS (cfg->cbb, ins);
1776 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1777 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1781 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1784 /* FIXME: this is just a useless hint: fix the interface to include the opcode */
1786 mono_arch_is_inst_imm (gint64 imm)
1791 #endif /* DISABLE_JIT */
1794 * Allow tracing to work with this interface (with an optional argument)
1798 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1802 ppc_load_ptr (code, ppc_r3, cfg->method);
1803 ppc_li (code, ppc_r4, 0); /* NULL ebp for now */
1804 ppc_load_func (code, PPC_CALL_REG, func);
1805 ppc_mtlr (code, PPC_CALL_REG);
1819 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1822 int save_mode = SAVE_NONE;
1824 MonoMethod *method = cfg->method;
1825 int rtype = mini_get_underlying_type (mono_method_signature (method)->ret)->type;
1826 int save_offset = PPC_STACK_PARAM_OFFSET + cfg->param_area;
1830 offset = code - cfg->native_code;
1831 /* we need about 16 instructions */
1832 if (offset > (cfg->code_size - 16 * 4)) {
1833 cfg->code_size *= 2;
1834 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1835 code = cfg->native_code + offset;
1839 case MONO_TYPE_VOID:
1840 /* special case string .ctor icall */
1841 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1842 save_mode = SAVE_ONE;
1844 save_mode = SAVE_NONE;
1846 #ifndef __mono_ppc64__
1849 save_mode = SAVE_TWO;
1854 save_mode = SAVE_FP;
1856 case MONO_TYPE_VALUETYPE:
1857 save_mode = SAVE_STRUCT;
1860 save_mode = SAVE_ONE;
1864 switch (save_mode) {
1866 ppc_stw (code, ppc_r3, save_offset, cfg->frame_reg);
1867 ppc_stw (code, ppc_r4, save_offset + 4, cfg->frame_reg);
1868 if (enable_arguments) {
1869 ppc_mr (code, ppc_r5, ppc_r4);
1870 ppc_mr (code, ppc_r4, ppc_r3);
1874 ppc_stptr (code, ppc_r3, save_offset, cfg->frame_reg);
1875 if (enable_arguments) {
1876 ppc_mr (code, ppc_r4, ppc_r3);
1880 ppc_stfd (code, ppc_f1, save_offset, cfg->frame_reg);
1881 if (enable_arguments) {
1882 /* FIXME: what reg? */
1883 ppc_fmr (code, ppc_f3, ppc_f1);
1884 /* FIXME: use 8 byte load on PPC64 */
1885 ppc_lwz (code, ppc_r4, save_offset, cfg->frame_reg);
1886 ppc_lwz (code, ppc_r5, save_offset + 4, cfg->frame_reg);
1890 if (enable_arguments) {
1891 /* FIXME: get the actual address */
1892 ppc_mr (code, ppc_r4, ppc_r3);
1900 ppc_load_ptr (code, ppc_r3, cfg->method);
1901 ppc_load_func (code, PPC_CALL_REG, func);
1902 ppc_mtlr (code, PPC_CALL_REG);
1905 switch (save_mode) {
1907 ppc_lwz (code, ppc_r3, save_offset, cfg->frame_reg);
1908 ppc_lwz (code, ppc_r4, save_offset + 4, cfg->frame_reg);
1911 ppc_ldptr (code, ppc_r3, save_offset, cfg->frame_reg);
1914 ppc_lfd (code, ppc_f1, save_offset, cfg->frame_reg);
1924 * Conditional branches have a small offset, so if it is likely overflowed,
1925 * we do a branch to the end of the method (uncond branches have much larger
1926 * offsets) where we perform the conditional and jump back unconditionally.
1927 * It's slightly slower, since we add two uncond branches, but it's very simple
1928 * with the current patch implementation and such large methods are likely not
1929 * going to be perf critical anyway.
1934 const char *exception;
1941 #define EMIT_COND_BRANCH_FLAGS(ins,b0,b1) \
1942 if (0 && ins->inst_true_bb->native_offset) { \
1943 ppc_bc (code, (b0), (b1), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffff); \
1945 int br_disp = ins->inst_true_bb->max_offset - offset; \
1946 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
1947 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1948 ovfj->data.bb = ins->inst_true_bb; \
1949 ovfj->ip_offset = 0; \
1950 ovfj->b0_cond = (b0); \
1951 ovfj->b1_cond = (b1); \
1952 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB_OVF, ovfj); \
1955 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1956 ppc_bc (code, (b0), (b1), 0); \
1960 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_b0_table [(cond)], branch_b1_table [(cond)])
1962 /* emit an exception if condition is fail
1964 * We assign the extra code used to throw the implicit exceptions
1965 * to cfg->bb_exit as far as the big branch handling is concerned
1967 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(b0,b1,exc_name) \
1969 int br_disp = cfg->bb_exit->max_offset - offset; \
1970 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
1971 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1972 ovfj->data.exception = (exc_name); \
1973 ovfj->ip_offset = code - cfg->native_code; \
1974 ovfj->b0_cond = (b0); \
1975 ovfj->b1_cond = (b1); \
1976 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1978 cfg->bb_exit->max_offset += 24; \
1980 mono_add_patch_info (cfg, code - cfg->native_code, \
1981 MONO_PATCH_INFO_EXC, exc_name); \
1982 ppc_bcl (code, (b0), (b1), 0); \
1986 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1989 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1994 normalize_opcode (int opcode)
1997 #ifndef __mono_ilp32__
1998 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMBASE, OP_LOADI8_MEMBASE):
1999 return OP_LOAD_MEMBASE;
2000 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMINDEX, OP_LOADI8_MEMINDEX):
2001 return OP_LOAD_MEMINDEX;
2002 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_REG, OP_STOREI8_MEMBASE_REG):
2003 return OP_STORE_MEMBASE_REG;
2004 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_IMM, OP_STOREI8_MEMBASE_IMM):
2005 return OP_STORE_MEMBASE_IMM;
2006 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMINDEX, OP_STOREI8_MEMINDEX):
2007 return OP_STORE_MEMINDEX;
2009 case MONO_PPC_32_64_CASE (OP_ISHR_IMM, OP_LSHR_IMM):
2011 case MONO_PPC_32_64_CASE (OP_ISHR_UN_IMM, OP_LSHR_UN_IMM):
2012 return OP_SHR_UN_IMM;
2019 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2021 MonoInst *ins, *n, *last_ins = NULL;
2023 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2024 switch (normalize_opcode (ins->opcode)) {
2026 /* remove unnecessary multiplication with 1 */
2027 if (ins->inst_imm == 1) {
2028 if (ins->dreg != ins->sreg1) {
2029 ins->opcode = OP_MOVE;
2031 MONO_DELETE_INS (bb, ins);
2035 int power2 = mono_is_power_of_two (ins->inst_imm);
2037 ins->opcode = OP_SHL_IMM;
2038 ins->inst_imm = power2;
2042 case OP_LOAD_MEMBASE:
2044 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2045 * OP_LOAD_MEMBASE offset(basereg), reg
2047 if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_REG &&
2048 ins->inst_basereg == last_ins->inst_destbasereg &&
2049 ins->inst_offset == last_ins->inst_offset) {
2050 if (ins->dreg == last_ins->sreg1) {
2051 MONO_DELETE_INS (bb, ins);
2054 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2055 ins->opcode = OP_MOVE;
2056 ins->sreg1 = last_ins->sreg1;
2060 * Note: reg1 must be different from the basereg in the second load
2061 * OP_LOAD_MEMBASE offset(basereg), reg1
2062 * OP_LOAD_MEMBASE offset(basereg), reg2
2064 * OP_LOAD_MEMBASE offset(basereg), reg1
2065 * OP_MOVE reg1, reg2
2067 } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_LOAD_MEMBASE &&
2068 ins->inst_basereg != last_ins->dreg &&
2069 ins->inst_basereg == last_ins->inst_basereg &&
2070 ins->inst_offset == last_ins->inst_offset) {
2072 if (ins->dreg == last_ins->dreg) {
2073 MONO_DELETE_INS (bb, ins);
2076 ins->opcode = OP_MOVE;
2077 ins->sreg1 = last_ins->dreg;
2080 //g_assert_not_reached ();
2084 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2085 * OP_LOAD_MEMBASE offset(basereg), reg
2087 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2088 * OP_ICONST reg, imm
2090 } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_IMM &&
2091 ins->inst_basereg == last_ins->inst_destbasereg &&
2092 ins->inst_offset == last_ins->inst_offset) {
2093 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2094 ins->opcode = OP_ICONST;
2095 ins->inst_c0 = last_ins->inst_imm;
2096 g_assert_not_reached (); // check this rule
2100 case OP_LOADU1_MEMBASE:
2101 case OP_LOADI1_MEMBASE:
2102 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2103 ins->inst_basereg == last_ins->inst_destbasereg &&
2104 ins->inst_offset == last_ins->inst_offset) {
2105 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2106 ins->sreg1 = last_ins->sreg1;
2109 case OP_LOADU2_MEMBASE:
2110 case OP_LOADI2_MEMBASE:
2111 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2112 ins->inst_basereg == last_ins->inst_destbasereg &&
2113 ins->inst_offset == last_ins->inst_offset) {
2114 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2115 ins->sreg1 = last_ins->sreg1;
2118 #ifdef __mono_ppc64__
2119 case OP_LOADU4_MEMBASE:
2120 case OP_LOADI4_MEMBASE:
2121 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
2122 ins->inst_basereg == last_ins->inst_destbasereg &&
2123 ins->inst_offset == last_ins->inst_offset) {
2124 ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4;
2125 ins->sreg1 = last_ins->sreg1;
2130 ins->opcode = OP_MOVE;
2134 if (ins->dreg == ins->sreg1) {
2135 MONO_DELETE_INS (bb, ins);
2139 * OP_MOVE sreg, dreg
2140 * OP_MOVE dreg, sreg
2142 if (last_ins && last_ins->opcode == OP_MOVE &&
2143 ins->sreg1 == last_ins->dreg &&
2144 ins->dreg == last_ins->sreg1) {
2145 MONO_DELETE_INS (bb, ins);
2153 bb->last_ins = last_ins;
2157 mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
2159 switch (ins->opcode) {
2160 case OP_ICONV_TO_R_UN: {
2161 // This value is OK as-is for both big and little endian because of how it is stored
2162 static const guint64 adjust_val = 0x4330000000000000ULL;
2163 int msw_reg = mono_alloc_ireg (cfg);
2164 int adj_reg = mono_alloc_freg (cfg);
2165 int tmp_reg = mono_alloc_freg (cfg);
2166 int basereg = ppc_sp;
2168 MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000);
2169 if (!ppc_is_imm16 (offset + 4)) {
2170 basereg = mono_alloc_ireg (cfg);
2171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2173 #if G_BYTE_ORDER == G_BIG_ENDIAN
2174 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg);
2175 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, ins->sreg1);
2177 // For little endian the words are reversed
2178 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, msw_reg);
2179 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, ins->sreg1);
2181 MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, &adjust_val);
2182 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset);
2183 MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg);
2184 ins->opcode = OP_NOP;
2187 #ifndef __mono_ppc64__
2188 case OP_ICONV_TO_R4:
2189 case OP_ICONV_TO_R8: {
2190 /* If we have a PPC_FEATURE_64 machine we can avoid
2191 this and use the fcfid instruction. Otherwise
2192 on an old 32-bit chip and we have to do this the
2194 if (!(cpu_hw_caps & PPC_ISA_64)) {
2195 /* FIXME: change precision for CEE_CONV_R4 */
2196 static const guint64 adjust_val = 0x4330000080000000ULL;
2197 int msw_reg = mono_alloc_ireg (cfg);
2198 int xored = mono_alloc_ireg (cfg);
2199 int adj_reg = mono_alloc_freg (cfg);
2200 int tmp_reg = mono_alloc_freg (cfg);
2201 int basereg = ppc_sp;
2203 if (!ppc_is_imm16 (offset + 4)) {
2204 basereg = mono_alloc_ireg (cfg);
2205 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2207 MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000);
2208 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg);
2209 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_XOR_IMM, xored, ins->sreg1, 0x80000000);
2210 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, xored);
2211 MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, (gpointer)&adjust_val);
2212 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset);
2213 MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg);
2214 if (ins->opcode == OP_ICONV_TO_R4)
2215 MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, ins->dreg, ins->dreg);
2216 ins->opcode = OP_NOP;
2222 int msw_reg = mono_alloc_ireg (cfg);
2223 int basereg = ppc_sp;
2225 if (!ppc_is_imm16 (offset + 4)) {
2226 basereg = mono_alloc_ireg (cfg);
2227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2229 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, basereg, offset, ins->sreg1);
2230 #if G_BYTE_ORDER == G_BIG_ENDIAN
2231 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset);
2233 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset+4);
2235 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_FINITE, -1, msw_reg);
2236 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
2237 ins->opcode = OP_NOP;
2240 #ifdef __mono_ppc64__
2242 case OP_IADD_OVF_UN:
2244 int shifted1_reg = mono_alloc_ireg (cfg);
2245 int shifted2_reg = mono_alloc_ireg (cfg);
2246 int result_shifted_reg = mono_alloc_ireg (cfg);
2248 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted1_reg, ins->sreg1, 32);
2249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted2_reg, ins->sreg2, 32);
2250 MONO_EMIT_NEW_BIALU (cfg, ins->opcode, result_shifted_reg, shifted1_reg, shifted2_reg);
2251 if (ins->opcode == OP_IADD_OVF_UN)
2252 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, ins->dreg, result_shifted_reg, 32);
2254 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, ins->dreg, result_shifted_reg, 32);
2255 ins->opcode = OP_NOP;
2262 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
2264 switch (ins->opcode) {
2266 /* ADC sets the condition code */
2267 MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
2268 MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
2271 case OP_LADD_OVF_UN:
2272 /* ADC sets the condition code */
2273 MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
2274 MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_UN_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
2278 /* SBB sets the condition code */
2279 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
2280 MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
2283 case OP_LSUB_OVF_UN:
2284 /* SBB sets the condition code */
2285 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
2286 MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_UN_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
2290 /* From gcc generated code */
2291 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PPC_SUBFIC, ins->dreg + 1, ins->sreg1 + 1, 0);
2292 MONO_EMIT_NEW_UNALU (cfg, OP_PPC_SUBFZE, ins->dreg + 2, ins->sreg1 + 2);
2301 * the branch_b0_table should maintain the order of these
2315 branch_b0_table [] = {
2330 branch_b1_table [] = {
2344 #define NEW_INS(cfg,dest,op) do { \
2345 MONO_INST_NEW((cfg), (dest), (op)); \
2346 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
2350 map_to_reg_reg_op (int op)
2359 case OP_COMPARE_IMM:
2361 case OP_ICOMPARE_IMM:
2363 case OP_LCOMPARE_IMM:
2379 case OP_LOAD_MEMBASE:
2380 return OP_LOAD_MEMINDEX;
2381 case OP_LOADI4_MEMBASE:
2382 return OP_LOADI4_MEMINDEX;
2383 case OP_LOADU4_MEMBASE:
2384 return OP_LOADU4_MEMINDEX;
2385 case OP_LOADI8_MEMBASE:
2386 return OP_LOADI8_MEMINDEX;
2387 case OP_LOADU1_MEMBASE:
2388 return OP_LOADU1_MEMINDEX;
2389 case OP_LOADI2_MEMBASE:
2390 return OP_LOADI2_MEMINDEX;
2391 case OP_LOADU2_MEMBASE:
2392 return OP_LOADU2_MEMINDEX;
2393 case OP_LOADI1_MEMBASE:
2394 return OP_LOADI1_MEMINDEX;
2395 case OP_LOADR4_MEMBASE:
2396 return OP_LOADR4_MEMINDEX;
2397 case OP_LOADR8_MEMBASE:
2398 return OP_LOADR8_MEMINDEX;
2399 case OP_STOREI1_MEMBASE_REG:
2400 return OP_STOREI1_MEMINDEX;
2401 case OP_STOREI2_MEMBASE_REG:
2402 return OP_STOREI2_MEMINDEX;
2403 case OP_STOREI4_MEMBASE_REG:
2404 return OP_STOREI4_MEMINDEX;
2405 case OP_STOREI8_MEMBASE_REG:
2406 return OP_STOREI8_MEMINDEX;
2407 case OP_STORE_MEMBASE_REG:
2408 return OP_STORE_MEMINDEX;
2409 case OP_STORER4_MEMBASE_REG:
2410 return OP_STORER4_MEMINDEX;
2411 case OP_STORER8_MEMBASE_REG:
2412 return OP_STORER8_MEMINDEX;
2413 case OP_STORE_MEMBASE_IMM:
2414 return OP_STORE_MEMBASE_REG;
2415 case OP_STOREI1_MEMBASE_IMM:
2416 return OP_STOREI1_MEMBASE_REG;
2417 case OP_STOREI2_MEMBASE_IMM:
2418 return OP_STOREI2_MEMBASE_REG;
2419 case OP_STOREI4_MEMBASE_IMM:
2420 return OP_STOREI4_MEMBASE_REG;
2421 case OP_STOREI8_MEMBASE_IMM:
2422 return OP_STOREI8_MEMBASE_REG;
2424 return mono_op_imm_to_op (op);
2427 //#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op))
2429 #define compare_opcode_is_unsigned(opcode) \
2430 (((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \
2431 ((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \
2432 ((opcode) >= OP_LBNE_UN && (opcode) <= OP_LBLT_UN) || \
2433 ((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \
2434 ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \
2435 ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || \
2436 (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN || \
2437 (opcode) == OP_LCLT_UN || (opcode) == OP_LCGT_UN))
2440 * Remove from the instruction list the instructions that can't be
2441 * represented with very simple instructions with no register
2445 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2447 MonoInst *ins, *next, *temp, *last_ins = NULL;
2450 MONO_BB_FOR_EACH_INS (bb, ins) {
2452 switch (ins->opcode) {
2453 case OP_IDIV_UN_IMM:
2456 case OP_IREM_UN_IMM:
2457 CASE_PPC64 (OP_LREM_IMM) {
2458 NEW_INS (cfg, temp, OP_ICONST);
2459 temp->inst_c0 = ins->inst_imm;
2460 temp->dreg = mono_alloc_ireg (cfg);
2461 ins->sreg2 = temp->dreg;
2462 if (ins->opcode == OP_IDIV_IMM)
2463 ins->opcode = OP_IDIV;
2464 else if (ins->opcode == OP_IREM_IMM)
2465 ins->opcode = OP_IREM;
2466 else if (ins->opcode == OP_IDIV_UN_IMM)
2467 ins->opcode = OP_IDIV_UN;
2468 else if (ins->opcode == OP_IREM_UN_IMM)
2469 ins->opcode = OP_IREM_UN;
2470 else if (ins->opcode == OP_LREM_IMM)
2471 ins->opcode = OP_LREM;
2473 /* handle rem separately */
2478 CASE_PPC64 (OP_LREM)
2479 CASE_PPC64 (OP_LREM_UN) {
2481 /* we change a rem dest, src1, src2 to
2482 * div temp1, src1, src2
2483 * mul temp2, temp1, src2
2484 * sub dest, src1, temp2
2486 if (ins->opcode == OP_IREM || ins->opcode == OP_IREM_UN) {
2487 NEW_INS (cfg, mul, OP_IMUL);
2488 NEW_INS (cfg, temp, ins->opcode == OP_IREM? OP_IDIV: OP_IDIV_UN);
2489 ins->opcode = OP_ISUB;
2491 NEW_INS (cfg, mul, OP_LMUL);
2492 NEW_INS (cfg, temp, ins->opcode == OP_LREM? OP_LDIV: OP_LDIV_UN);
2493 ins->opcode = OP_LSUB;
2495 temp->sreg1 = ins->sreg1;
2496 temp->sreg2 = ins->sreg2;
2497 temp->dreg = mono_alloc_ireg (cfg);
2498 mul->sreg1 = temp->dreg;
2499 mul->sreg2 = ins->sreg2;
2500 mul->dreg = mono_alloc_ireg (cfg);
2501 ins->sreg2 = mul->dreg;
2505 CASE_PPC64 (OP_LADD_IMM)
2508 if (!ppc_is_imm16 (ins->inst_imm)) {
2509 NEW_INS (cfg, temp, OP_ICONST);
2510 temp->inst_c0 = ins->inst_imm;
2511 temp->dreg = mono_alloc_ireg (cfg);
2512 ins->sreg2 = temp->dreg;
2513 ins->opcode = map_to_reg_reg_op (ins->opcode);
2517 CASE_PPC64 (OP_LSUB_IMM)
2519 if (!ppc_is_imm16 (-ins->inst_imm)) {
2520 NEW_INS (cfg, temp, OP_ICONST);
2521 temp->inst_c0 = ins->inst_imm;
2522 temp->dreg = mono_alloc_ireg (cfg);
2523 ins->sreg2 = temp->dreg;
2524 ins->opcode = map_to_reg_reg_op (ins->opcode);
2536 gboolean is_imm = ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff));
2537 #ifdef __mono_ppc64__
2538 if (ins->inst_imm & 0xffffffff00000000ULL)
2542 NEW_INS (cfg, temp, OP_ICONST);
2543 temp->inst_c0 = ins->inst_imm;
2544 temp->dreg = mono_alloc_ireg (cfg);
2545 ins->sreg2 = temp->dreg;
2546 ins->opcode = map_to_reg_reg_op (ins->opcode);
2555 NEW_INS (cfg, temp, OP_ICONST);
2556 temp->inst_c0 = ins->inst_imm;
2557 temp->dreg = mono_alloc_ireg (cfg);
2558 ins->sreg2 = temp->dreg;
2559 ins->opcode = map_to_reg_reg_op (ins->opcode);
2561 case OP_COMPARE_IMM:
2562 case OP_ICOMPARE_IMM:
2563 CASE_PPC64 (OP_LCOMPARE_IMM)
2565 /* Branch opts can eliminate the branch */
2566 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
2567 ins->opcode = OP_NOP;
2571 if (compare_opcode_is_unsigned (next->opcode)) {
2572 if (!ppc_is_uimm16 (ins->inst_imm)) {
2573 NEW_INS (cfg, temp, OP_ICONST);
2574 temp->inst_c0 = ins->inst_imm;
2575 temp->dreg = mono_alloc_ireg (cfg);
2576 ins->sreg2 = temp->dreg;
2577 ins->opcode = map_to_reg_reg_op (ins->opcode);
2580 if (!ppc_is_imm16 (ins->inst_imm)) {
2581 NEW_INS (cfg, temp, OP_ICONST);
2582 temp->inst_c0 = ins->inst_imm;
2583 temp->dreg = mono_alloc_ireg (cfg);
2584 ins->sreg2 = temp->dreg;
2585 ins->opcode = map_to_reg_reg_op (ins->opcode);
2591 if (ins->inst_imm == 1) {
2592 ins->opcode = OP_MOVE;
2595 if (ins->inst_imm == 0) {
2596 ins->opcode = OP_ICONST;
2600 imm = mono_is_power_of_two (ins->inst_imm);
2602 ins->opcode = OP_SHL_IMM;
2603 ins->inst_imm = imm;
2606 if (!ppc_is_imm16 (ins->inst_imm)) {
2607 NEW_INS (cfg, temp, OP_ICONST);
2608 temp->inst_c0 = ins->inst_imm;
2609 temp->dreg = mono_alloc_ireg (cfg);
2610 ins->sreg2 = temp->dreg;
2611 ins->opcode = map_to_reg_reg_op (ins->opcode);
2614 case OP_LOCALLOC_IMM:
2615 NEW_INS (cfg, temp, OP_ICONST);
2616 temp->inst_c0 = ins->inst_imm;
2617 temp->dreg = mono_alloc_ireg (cfg);
2618 ins->sreg1 = temp->dreg;
2619 ins->opcode = OP_LOCALLOC;
2621 case OP_LOAD_MEMBASE:
2622 case OP_LOADI4_MEMBASE:
2623 CASE_PPC64 (OP_LOADI8_MEMBASE)
2624 case OP_LOADU4_MEMBASE:
2625 case OP_LOADI2_MEMBASE:
2626 case OP_LOADU2_MEMBASE:
2627 case OP_LOADI1_MEMBASE:
2628 case OP_LOADU1_MEMBASE:
2629 case OP_LOADR4_MEMBASE:
2630 case OP_LOADR8_MEMBASE:
2631 case OP_STORE_MEMBASE_REG:
2632 CASE_PPC64 (OP_STOREI8_MEMBASE_REG)
2633 case OP_STOREI4_MEMBASE_REG:
2634 case OP_STOREI2_MEMBASE_REG:
2635 case OP_STOREI1_MEMBASE_REG:
2636 case OP_STORER4_MEMBASE_REG:
2637 case OP_STORER8_MEMBASE_REG:
2638 /* we can do two things: load the immed in a register
2639 * and use an indexed load, or see if the immed can be
2640 * represented as an ad_imm + a load with a smaller offset
2641 * that fits. We just do the first for now, optimize later.
2643 if (ppc_is_imm16 (ins->inst_offset))
2645 NEW_INS (cfg, temp, OP_ICONST);
2646 temp->inst_c0 = ins->inst_offset;
2647 temp->dreg = mono_alloc_ireg (cfg);
2648 ins->sreg2 = temp->dreg;
2649 ins->opcode = map_to_reg_reg_op (ins->opcode);
2651 case OP_STORE_MEMBASE_IMM:
2652 case OP_STOREI1_MEMBASE_IMM:
2653 case OP_STOREI2_MEMBASE_IMM:
2654 case OP_STOREI4_MEMBASE_IMM:
2655 CASE_PPC64 (OP_STOREI8_MEMBASE_IMM)
2656 NEW_INS (cfg, temp, OP_ICONST);
2657 temp->inst_c0 = ins->inst_imm;
2658 temp->dreg = mono_alloc_ireg (cfg);
2659 ins->sreg1 = temp->dreg;
2660 ins->opcode = map_to_reg_reg_op (ins->opcode);
2662 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2665 if (cfg->compile_aot) {
2666 /* Keep these in the aot case */
2669 NEW_INS (cfg, temp, OP_ICONST);
2670 temp->inst_c0 = (gulong)ins->inst_p0;
2671 temp->dreg = mono_alloc_ireg (cfg);
2672 ins->inst_basereg = temp->dreg;
2673 ins->inst_offset = 0;
2674 ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE;
2676 /* make it handle the possibly big ins->inst_offset
2677 * later optimize to use lis + load_membase
2683 bb->last_ins = last_ins;
2684 bb->max_vreg = cfg->next_vreg;
2688 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2690 long offset = cfg->arch.fp_conv_var_offset;
2692 /* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */
2693 #ifdef __mono_ppc64__
2695 ppc_fctidz (code, ppc_f0, sreg);
2700 ppc_fctiwz (code, ppc_f0, sreg);
2703 if (ppc_is_imm16 (offset + sub_offset)) {
2704 ppc_stfd (code, ppc_f0, offset, cfg->frame_reg);
2706 ppc_ldr (code, dreg, offset + sub_offset, cfg->frame_reg);
2708 ppc_lwz (code, dreg, offset + sub_offset, cfg->frame_reg);
2710 ppc_load (code, dreg, offset);
2711 ppc_add (code, dreg, dreg, cfg->frame_reg);
2712 ppc_stfd (code, ppc_f0, 0, dreg);
2714 ppc_ldr (code, dreg, sub_offset, dreg);
2716 ppc_lwz (code, dreg, sub_offset, dreg);
2720 ppc_andid (code, dreg, dreg, 0xff);
2722 ppc_andid (code, dreg, dreg, 0xffff);
2723 #ifdef __mono_ppc64__
2725 ppc_clrldi (code, dreg, dreg, 32);
2729 ppc_extsb (code, dreg, dreg);
2731 ppc_extsh (code, dreg, dreg);
2732 #ifdef __mono_ppc64__
2734 ppc_extsw (code, dreg, dreg);
2742 const guchar *target;
2747 #define is_call_imm(diff) ((glong)(diff) >= -33554432 && (glong)(diff) <= 33554431)
2750 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2751 #ifdef __mono_ppc64__
2752 g_assert_not_reached ();
2754 PatchData *pdata = (PatchData*)user_data;
2755 guchar *code = data;
2756 guint32 *thunks = data;
2757 guint32 *endthunks = (guint32*)(code + bsize);
2761 int difflow, diffhigh;
2763 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2764 difflow = (char*)pdata->code - (char*)thunks;
2765 diffhigh = (char*)pdata->code - (char*)endthunks;
2766 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2769 templ = (guchar*)load;
2770 ppc_load_sequence (templ, ppc_r0, pdata->target);
2772 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2773 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2774 while (thunks < endthunks) {
2775 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2776 if ((thunks [0] == load [0]) && (thunks [1] == load [1])) {
2777 ppc_patch (pdata->code, (guchar*)thunks);
2780 static int num_thunks = 0;
2782 if ((num_thunks % 20) == 0)
2783 g_print ("num_thunks lookup: %d\n", num_thunks);
2786 } else if ((thunks [0] == 0) && (thunks [1] == 0)) {
2787 /* found a free slot instead: emit thunk */
2788 code = (guchar*)thunks;
2789 ppc_lis (code, ppc_r0, (gulong)(pdata->target) >> 16);
2790 ppc_ori (code, ppc_r0, ppc_r0, (gulong)(pdata->target) & 0xffff);
2791 ppc_mtctr (code, ppc_r0);
2792 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
2793 mono_arch_flush_icache ((guchar*)thunks, 16);
2795 ppc_patch (pdata->code, (guchar*)thunks);
2798 static int num_thunks = 0;
2800 if ((num_thunks % 20) == 0)
2801 g_print ("num_thunks: %d\n", num_thunks);
2805 /* skip 16 bytes, the size of the thunk */
2809 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2816 handle_thunk (int absolute, guchar *code, const guchar *target) {
2817 MonoDomain *domain = mono_domain_get ();
2821 pdata.target = target;
2822 pdata.absolute = absolute;
2825 mono_domain_lock (domain);
2826 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2829 /* this uses the first available slot */
2831 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2833 mono_domain_unlock (domain);
2835 if (pdata.found != 1)
2836 g_print ("thunk failed for %p from %p\n", target, code);
2837 g_assert (pdata.found == 1);
2841 patch_ins (guint8 *code, guint32 ins)
2843 *(guint32*)code = ins;
2844 mono_arch_flush_icache (code, 4);
2848 ppc_patch_full (guchar *code, const guchar *target, gboolean is_fd)
2850 guint32 ins = *(guint32*)code;
2851 guint32 prim = ins >> 26;
2854 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2856 // prefer relative branches, they are more position independent (e.g. for AOT compilation).
2857 gint diff = target - code;
2860 if (diff <= 33554431){
2861 ins = (18 << 26) | (diff) | (ins & 1);
2862 patch_ins (code, ins);
2866 /* diff between 0 and -33554432 */
2867 if (diff >= -33554432){
2868 ins = (18 << 26) | (diff & ~0xfc000000) | (ins & 1);
2869 patch_ins (code, ins);
2874 if ((glong)target >= 0){
2875 if ((glong)target <= 33554431){
2876 ins = (18 << 26) | ((gulong) target) | (ins & 1) | 2;
2877 patch_ins (code, ins);
2881 if ((glong)target >= -33554432){
2882 ins = (18 << 26) | (((gulong)target) & ~0xfc000000) | (ins & 1) | 2;
2883 patch_ins (code, ins);
2888 handle_thunk (TRUE, code, target);
2891 g_assert_not_reached ();
2899 guint32 li = (gulong)target;
2900 ins = (ins & 0xffff0000) | (ins & 3);
2901 ovf = li & 0xffff0000;
2902 if (ovf != 0 && ovf != 0xffff0000)
2903 g_assert_not_reached ();
2906 // FIXME: assert the top bits of li are 0
2908 gint diff = target - code;
2909 ins = (ins & 0xffff0000) | (ins & 3);
2910 ovf = diff & 0xffff0000;
2911 if (ovf != 0 && ovf != 0xffff0000)
2912 g_assert_not_reached ();
2916 patch_ins (code, ins);
2920 if (prim == 15 || ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
2921 #ifdef __mono_ppc64__
2922 guint32 *seq = (guint32*)code;
2923 guint32 *branch_ins;
2925 /* the trampoline code will try to patch the blrl, blr, bcctr */
2926 if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
2928 if (ppc_is_load_op (seq [-3]) || ppc_opcode (seq [-3]) == 31) /* ld || lwz || mr */
2933 if (ppc_is_load_op (seq [5])
2934 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
2935 /* With function descs we need to do more careful
2937 || ppc_opcode (seq [5]) == 31 /* ld || lwz || mr */
2940 branch_ins = seq + 8;
2942 branch_ins = seq + 6;
2945 seq = (guint32*)code;
2946 /* this is the lis/ori/sldi/oris/ori/(ld/ld|mr/nop)/mtlr/blrl sequence */
2947 g_assert (mono_ppc_is_direct_call_sequence (branch_ins));
2949 if (ppc_is_load_op (seq [5])) {
2950 g_assert (ppc_is_load_op (seq [6]));
2953 guint8 *buf = (guint8*)&seq [5];
2954 ppc_mr (buf, PPC_CALL_REG, ppc_r12);
2959 target = mono_get_addr_from_ftnptr ((gpointer)target);
2962 /* FIXME: make this thread safe */
2963 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
2964 /* FIXME: we're assuming we're using r12 here */
2965 ppc_load_ptr_sequence (code, ppc_r12, target);
2967 ppc_load_ptr_sequence (code, PPC_CALL_REG, target);
2969 mono_arch_flush_icache ((guint8*)seq, 28);
2972 /* the trampoline code will try to patch the blrl, blr, bcctr */
2973 if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
2976 /* this is the lis/ori/mtlr/blrl sequence */
2977 seq = (guint32*)code;
2978 g_assert ((seq [0] >> 26) == 15);
2979 g_assert ((seq [1] >> 26) == 24);
2980 g_assert ((seq [2] >> 26) == 31);
2981 g_assert (seq [3] == 0x4e800021 || seq [3] == 0x4e800020 || seq [3] == 0x4e800420);
2982 /* FIXME: make this thread safe */
2983 ppc_lis (code, PPC_CALL_REG, (guint32)(target) >> 16);
2984 ppc_ori (code, PPC_CALL_REG, PPC_CALL_REG, (guint32)(target) & 0xffff);
2985 mono_arch_flush_icache (code - 8, 8);
2988 g_assert_not_reached ();
2990 // g_print ("patched with 0x%08x\n", ins);
2994 ppc_patch (guchar *code, const guchar *target)
2996 ppc_patch_full (code, target, FALSE);
3000 mono_ppc_patch (guchar *code, const guchar *target)
3002 ppc_patch (code, target);
3006 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
3008 switch (ins->opcode) {
3011 case OP_FCALL_MEMBASE:
3012 if (ins->dreg != ppc_f1)
3013 ppc_fmr (code, ins->dreg, ppc_f1);
3021 ins_native_length (MonoCompile *cfg, MonoInst *ins)
3023 return ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3027 emit_reserve_param_area (MonoCompile *cfg, guint8 *code)
3029 long size = cfg->param_area;
3031 size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3032 size &= -MONO_ARCH_FRAME_ALIGNMENT;
3037 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
3038 if (ppc_is_imm16 (-size)) {
3039 ppc_stptr_update (code, ppc_r0, -size, ppc_sp);
3041 ppc_load (code, ppc_r12, -size);
3042 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
3049 emit_unreserve_param_area (MonoCompile *cfg, guint8 *code)
3051 long size = cfg->param_area;
3053 size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3054 size &= -MONO_ARCH_FRAME_ALIGNMENT;
3059 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
3060 if (ppc_is_imm16 (size)) {
3061 ppc_stptr_update (code, ppc_r0, size, ppc_sp);
3063 ppc_load (code, ppc_r12, size);
3064 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
3070 #define MASK_SHIFT_IMM(i) ((i) & MONO_PPC_32_64_CASE (0x1f, 0x3f))
3074 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3076 MonoInst *ins, *next;
3079 guint8 *code = cfg->native_code + cfg->code_len;
3080 MonoInst *last_ins = NULL;
3081 guint last_offset = 0;
3085 /* we don't align basic blocks of loops on ppc */
3087 if (cfg->verbose_level > 2)
3088 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3090 cpos = bb->max_offset;
3092 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3093 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3094 //g_assert (!mono_compile_aot);
3097 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3098 /* this is not thread save, but good enough */
3099 /* fixme: howto handle overflows? */
3100 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3103 MONO_BB_FOR_EACH_INS (bb, ins) {
3104 offset = code - cfg->native_code;
3106 max_len = ins_native_length (cfg, ins);
3108 if (offset > (cfg->code_size - max_len - 16)) {
3109 cfg->code_size *= 2;
3110 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3111 code = cfg->native_code + offset;
3113 // if (ins->cil_code)
3114 // g_print ("cil code\n");
3115 mono_debug_record_line_number (cfg, ins, offset);
3117 switch (normalize_opcode (ins->opcode)) {
3118 case OP_RELAXED_NOP:
3121 case OP_DUMMY_STORE:
3122 case OP_NOT_REACHED:
3125 case OP_IL_SEQ_POINT:
3126 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3128 case OP_SEQ_POINT: {
3131 if (cfg->compile_aot)
3135 * Read from the single stepping trigger page. This will cause a
3136 * SIGSEGV when single stepping is enabled.
3137 * We do this _before_ the breakpoint, so single stepping after
3138 * a breakpoint is hit will step to the next IL offset.
3140 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3141 ppc_load (code, ppc_r12, (gsize)ss_trigger_page);
3142 ppc_ldptr (code, ppc_r12, 0, ppc_r12);
3145 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3148 * A placeholder for a possible breakpoint inserted by
3149 * mono_arch_set_breakpoint ().
3151 for (i = 0; i < BREAKPOINT_SIZE / 4; ++i)
3156 emit_tls_access (code, ins->dreg, ins->inst_offset);
3159 ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2);
3160 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3161 ppc_mr (code, ppc_r4, ppc_r0);
3164 ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2);
3165 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3166 ppc_mr (code, ppc_r4, ppc_r0);
3168 case OP_MEMORY_BARRIER:
3171 case OP_STOREI1_MEMBASE_REG:
3172 if (ppc_is_imm16 (ins->inst_offset)) {
3173 ppc_stb (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3175 if (ppc_is_imm32 (ins->inst_offset)) {
3176 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3177 ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r11);
3179 ppc_load (code, ppc_r0, ins->inst_offset);
3180 ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3184 case OP_STOREI2_MEMBASE_REG:
3185 if (ppc_is_imm16 (ins->inst_offset)) {
3186 ppc_sth (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3188 if (ppc_is_imm32 (ins->inst_offset)) {
3189 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3190 ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r11);
3192 ppc_load (code, ppc_r0, ins->inst_offset);
3193 ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3197 case OP_STORE_MEMBASE_REG:
3198 if (ppc_is_imm16 (ins->inst_offset)) {
3199 ppc_stptr (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3201 if (ppc_is_imm32 (ins->inst_offset)) {
3202 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3203 ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r11);
3205 ppc_load (code, ppc_r0, ins->inst_offset);
3206 ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3210 #ifdef __mono_ilp32__
3211 case OP_STOREI8_MEMBASE_REG:
3212 if (ppc_is_imm16 (ins->inst_offset)) {
3213 ppc_str (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3215 ppc_load (code, ppc_r0, ins->inst_offset);
3216 ppc_str_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3220 case OP_STOREI1_MEMINDEX:
3221 ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3223 case OP_STOREI2_MEMINDEX:
3224 ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3226 case OP_STORE_MEMINDEX:
3227 ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3230 g_assert_not_reached ();
3232 case OP_LOAD_MEMBASE:
3233 if (ppc_is_imm16 (ins->inst_offset)) {
3234 ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3236 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3237 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3238 ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->dreg);
3240 ppc_load (code, ppc_r0, ins->inst_offset);
3241 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3245 case OP_LOADI4_MEMBASE:
3246 #ifdef __mono_ppc64__
3247 if (ppc_is_imm16 (ins->inst_offset)) {
3248 ppc_lwa (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3250 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3251 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3252 ppc_lwa (code, ins->dreg, ins->inst_offset, ins->dreg);
3254 ppc_load (code, ppc_r0, ins->inst_offset);
3255 ppc_lwax (code, ins->dreg, ins->inst_basereg, ppc_r0);
3260 case OP_LOADU4_MEMBASE:
3261 if (ppc_is_imm16 (ins->inst_offset)) {
3262 ppc_lwz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3264 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3265 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3266 ppc_lwz (code, ins->dreg, ins->inst_offset, ins->dreg);
3268 ppc_load (code, ppc_r0, ins->inst_offset);
3269 ppc_lwzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3273 case OP_LOADI1_MEMBASE:
3274 case OP_LOADU1_MEMBASE:
3275 if (ppc_is_imm16 (ins->inst_offset)) {
3276 ppc_lbz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3278 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3279 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3280 ppc_lbz (code, ins->dreg, ins->inst_offset, ins->dreg);
3282 ppc_load (code, ppc_r0, ins->inst_offset);
3283 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3286 if (ins->opcode == OP_LOADI1_MEMBASE)
3287 ppc_extsb (code, ins->dreg, ins->dreg);
3289 case OP_LOADU2_MEMBASE:
3290 if (ppc_is_imm16 (ins->inst_offset)) {
3291 ppc_lhz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3293 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3294 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3295 ppc_lhz (code, ins->dreg, ins->inst_offset, ins->dreg);
3297 ppc_load (code, ppc_r0, ins->inst_offset);
3298 ppc_lhzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3302 case OP_LOADI2_MEMBASE:
3303 if (ppc_is_imm16 (ins->inst_offset)) {
3304 ppc_lha (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3306 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3307 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3308 ppc_lha (code, ins->dreg, ins->inst_offset, ins->dreg);
3310 ppc_load (code, ppc_r0, ins->inst_offset);
3311 ppc_lhax (code, ins->dreg, ins->inst_basereg, ppc_r0);
3315 #ifdef __mono_ilp32__
3316 case OP_LOADI8_MEMBASE:
3317 if (ppc_is_imm16 (ins->inst_offset)) {
3318 ppc_ldr (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3320 ppc_load (code, ppc_r0, ins->inst_offset);
3321 ppc_ldr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3325 case OP_LOAD_MEMINDEX:
3326 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3328 case OP_LOADI4_MEMINDEX:
3329 #ifdef __mono_ppc64__
3330 ppc_lwax (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3333 case OP_LOADU4_MEMINDEX:
3334 ppc_lwzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3336 case OP_LOADU2_MEMINDEX:
3337 ppc_lhzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3339 case OP_LOADI2_MEMINDEX:
3340 ppc_lhax (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3342 case OP_LOADU1_MEMINDEX:
3343 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3345 case OP_LOADI1_MEMINDEX:
3346 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3347 ppc_extsb (code, ins->dreg, ins->dreg);
3349 case OP_ICONV_TO_I1:
3350 CASE_PPC64 (OP_LCONV_TO_I1)
3351 ppc_extsb (code, ins->dreg, ins->sreg1);
3353 case OP_ICONV_TO_I2:
3354 CASE_PPC64 (OP_LCONV_TO_I2)
3355 ppc_extsh (code, ins->dreg, ins->sreg1);
3357 case OP_ICONV_TO_U1:
3358 CASE_PPC64 (OP_LCONV_TO_U1)
3359 ppc_clrlwi (code, ins->dreg, ins->sreg1, 24);
3361 case OP_ICONV_TO_U2:
3362 CASE_PPC64 (OP_LCONV_TO_U2)
3363 ppc_clrlwi (code, ins->dreg, ins->sreg1, 16);
3367 CASE_PPC64 (OP_LCOMPARE)
3368 L = (sizeof (mgreg_t) == 4 || ins->opcode == OP_ICOMPARE) ? 0 : 1;
3370 if (next && compare_opcode_is_unsigned (next->opcode))
3371 ppc_cmpl (code, 0, L, ins->sreg1, ins->sreg2);
3373 ppc_cmp (code, 0, L, ins->sreg1, ins->sreg2);
3375 case OP_COMPARE_IMM:
3376 case OP_ICOMPARE_IMM:
3377 CASE_PPC64 (OP_LCOMPARE_IMM)
3378 L = (sizeof (mgreg_t) == 4 || ins->opcode == OP_ICOMPARE_IMM) ? 0 : 1;
3380 if (next && compare_opcode_is_unsigned (next->opcode)) {
3381 if (ppc_is_uimm16 (ins->inst_imm)) {
3382 ppc_cmpli (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff));
3384 g_assert_not_reached ();
3387 if (ppc_is_imm16 (ins->inst_imm)) {
3388 ppc_cmpi (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff));
3390 g_assert_not_reached ();
3396 * gdb does not like encountering a trap in the debugged code. So
3397 * instead of emitting a trap, we emit a call a C function and place a
3401 ppc_mr (code, ppc_r3, ins->sreg1);
3402 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3403 (gpointer)"mono_break");
3404 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
3405 ppc_load_func (code, PPC_CALL_REG, 0);
3406 ppc_mtlr (code, PPC_CALL_REG);
3414 ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2);
3417 CASE_PPC64 (OP_LADD)
3418 ppc_add (code, ins->dreg, ins->sreg1, ins->sreg2);
3422 ppc_adde (code, ins->dreg, ins->sreg1, ins->sreg2);
3425 if (ppc_is_imm16 (ins->inst_imm)) {
3426 ppc_addic (code, ins->dreg, ins->sreg1, ins->inst_imm);
3428 g_assert_not_reached ();
3433 CASE_PPC64 (OP_LADD_IMM)
3434 if (ppc_is_imm16 (ins->inst_imm)) {
3435 ppc_addi (code, ins->dreg, ins->sreg1, ins->inst_imm);
3437 g_assert_not_reached ();
3441 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3443 ppc_addo (code, ins->dreg, ins->sreg1, ins->sreg2);
3444 ppc_mfspr (code, ppc_r0, ppc_xer);
3445 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3446 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3448 case OP_IADD_OVF_UN:
3449 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3451 ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2);
3452 ppc_mfspr (code, ppc_r0, ppc_xer);
3453 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3454 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3457 CASE_PPC64 (OP_LSUB_OVF)
3458 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3460 ppc_subfo (code, ins->dreg, ins->sreg2, ins->sreg1);
3461 ppc_mfspr (code, ppc_r0, ppc_xer);
3462 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3463 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3465 case OP_ISUB_OVF_UN:
3466 CASE_PPC64 (OP_LSUB_OVF_UN)
3467 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3469 ppc_subfc (code, ins->dreg, ins->sreg2, ins->sreg1);
3470 ppc_mfspr (code, ppc_r0, ppc_xer);
3471 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3472 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3474 case OP_ADD_OVF_CARRY:
3475 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3477 ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2);
3478 ppc_mfspr (code, ppc_r0, ppc_xer);
3479 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3480 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3482 case OP_ADD_OVF_UN_CARRY:
3483 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3485 ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2);
3486 ppc_mfspr (code, ppc_r0, ppc_xer);
3487 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3488 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3490 case OP_SUB_OVF_CARRY:
3491 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3493 ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1);
3494 ppc_mfspr (code, ppc_r0, ppc_xer);
3495 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3496 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3498 case OP_SUB_OVF_UN_CARRY:
3499 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3501 ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1);
3502 ppc_mfspr (code, ppc_r0, ppc_xer);
3503 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3504 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3508 ppc_subfco (code, ins->dreg, ins->sreg2, ins->sreg1);
3511 CASE_PPC64 (OP_LSUB)
3512 ppc_subf (code, ins->dreg, ins->sreg2, ins->sreg1);
3516 ppc_subfe (code, ins->dreg, ins->sreg2, ins->sreg1);
3520 CASE_PPC64 (OP_LSUB_IMM)
3521 // we add the negated value
3522 if (ppc_is_imm16 (-ins->inst_imm))
3523 ppc_addi (code, ins->dreg, ins->sreg1, -ins->inst_imm);
3525 g_assert_not_reached ();
3529 g_assert (ppc_is_imm16 (ins->inst_imm));
3530 ppc_subfic (code, ins->dreg, ins->sreg1, ins->inst_imm);
3533 ppc_subfze (code, ins->dreg, ins->sreg1);
3536 CASE_PPC64 (OP_LAND)
3537 /* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */
3538 ppc_and (code, ins->sreg1, ins->dreg, ins->sreg2);
3542 CASE_PPC64 (OP_LAND_IMM)
3543 if (!(ins->inst_imm & 0xffff0000)) {
3544 ppc_andid (code, ins->sreg1, ins->dreg, ins->inst_imm);
3545 } else if (!(ins->inst_imm & 0xffff)) {
3546 ppc_andisd (code, ins->sreg1, ins->dreg, ((guint32)ins->inst_imm >> 16));
3548 g_assert_not_reached ();
3552 CASE_PPC64 (OP_LDIV) {
3553 guint8 *divisor_is_m1;
3554 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3556 ppc_compare_reg_imm (code, 0, ins->sreg2, -1);
3557 divisor_is_m1 = code;
3558 ppc_bc (code, PPC_BR_FALSE | PPC_BR_LIKELY, PPC_BR_EQ, 0);
3559 ppc_lis (code, ppc_r0, 0x8000);
3560 #ifdef __mono_ppc64__
3561 if (ins->opcode == OP_LDIV)
3562 ppc_sldi (code, ppc_r0, ppc_r0, 32);
3564 ppc_compare (code, 0, ins->sreg1, ppc_r0);
3565 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3566 ppc_patch (divisor_is_m1, code);
3567 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3569 if (ins->opcode == OP_IDIV)
3570 ppc_divwod (code, ins->dreg, ins->sreg1, ins->sreg2);
3571 #ifdef __mono_ppc64__
3573 ppc_divdod (code, ins->dreg, ins->sreg1, ins->sreg2);
3575 ppc_mfspr (code, ppc_r0, ppc_xer);
3576 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3577 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
3581 CASE_PPC64 (OP_LDIV_UN)
3582 if (ins->opcode == OP_IDIV_UN)
3583 ppc_divwuod (code, ins->dreg, ins->sreg1, ins->sreg2);
3584 #ifdef __mono_ppc64__
3586 ppc_divduod (code, ins->dreg, ins->sreg1, ins->sreg2);
3588 ppc_mfspr (code, ppc_r0, ppc_xer);
3589 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3590 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
3596 g_assert_not_reached ();
3599 ppc_or (code, ins->dreg, ins->sreg1, ins->sreg2);
3603 CASE_PPC64 (OP_LOR_IMM)
3604 if (!(ins->inst_imm & 0xffff0000)) {
3605 ppc_ori (code, ins->sreg1, ins->dreg, ins->inst_imm);
3606 } else if (!(ins->inst_imm & 0xffff)) {
3607 ppc_oris (code, ins->dreg, ins->sreg1, ((guint32)(ins->inst_imm) >> 16));
3609 g_assert_not_reached ();
3613 CASE_PPC64 (OP_LXOR)
3614 ppc_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
3618 CASE_PPC64 (OP_LXOR_IMM)
3619 if (!(ins->inst_imm & 0xffff0000)) {
3620 ppc_xori (code, ins->sreg1, ins->dreg, ins->inst_imm);
3621 } else if (!(ins->inst_imm & 0xffff)) {
3622 ppc_xoris (code, ins->sreg1, ins->dreg, ((guint32)(ins->inst_imm) >> 16));
3624 g_assert_not_reached ();
3628 CASE_PPC64 (OP_LSHL)
3629 ppc_shift_left (code, ins->dreg, ins->sreg1, ins->sreg2);
3633 CASE_PPC64 (OP_LSHL_IMM)
3634 ppc_shift_left_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3637 ppc_sraw (code, ins->dreg, ins->sreg1, ins->sreg2);
3640 ppc_shift_right_arith_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3643 if (MASK_SHIFT_IMM (ins->inst_imm))
3644 ppc_shift_right_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3646 ppc_mr (code, ins->dreg, ins->sreg1);
3649 ppc_srw (code, ins->dreg, ins->sreg1, ins->sreg2);
3652 CASE_PPC64 (OP_LNOT)
3653 ppc_not (code, ins->dreg, ins->sreg1);
3656 CASE_PPC64 (OP_LNEG)
3657 ppc_neg (code, ins->dreg, ins->sreg1);
3660 CASE_PPC64 (OP_LMUL)
3661 ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2);
3665 CASE_PPC64 (OP_LMUL_IMM)
3666 if (ppc_is_imm16 (ins->inst_imm)) {
3667 ppc_mulli (code, ins->dreg, ins->sreg1, ins->inst_imm);
3669 g_assert_not_reached ();
3673 CASE_PPC64 (OP_LMUL_OVF)
3674 /* we annot use mcrxr, since it's not implemented on some processors
3675 * XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3677 if (ins->opcode == OP_IMUL_OVF)
3678 ppc_mullwo (code, ins->dreg, ins->sreg1, ins->sreg2);
3679 #ifdef __mono_ppc64__
3681 ppc_mulldo (code, ins->dreg, ins->sreg1, ins->sreg2);
3683 ppc_mfspr (code, ppc_r0, ppc_xer);
3684 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3685 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3687 case OP_IMUL_OVF_UN:
3688 CASE_PPC64 (OP_LMUL_OVF_UN)
3689 /* we first multiply to get the high word and compare to 0
3690 * to set the flags, then the result is discarded and then
3691 * we multiply to get the lower * bits result
3693 if (ins->opcode == OP_IMUL_OVF_UN)
3694 ppc_mulhwu (code, ppc_r0, ins->sreg1, ins->sreg2);
3695 #ifdef __mono_ppc64__
3697 ppc_mulhdu (code, ppc_r0, ins->sreg1, ins->sreg2);
3699 ppc_cmpi (code, 0, 0, ppc_r0, 0);
3700 EMIT_COND_SYSTEM_EXCEPTION (CEE_BNE_UN - CEE_BEQ, "OverflowException");
3701 ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2);
3704 ppc_load (code, ins->dreg, ins->inst_c0);
3707 ppc_load (code, ins->dreg, ins->inst_l);
3710 case OP_LOAD_GOTADDR:
3711 /* The PLT implementation depends on this */
3712 g_assert (ins->dreg == ppc_r30);
3714 code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
3717 // FIXME: Fix max instruction length
3718 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
3719 /* arch_emit_got_access () patches this */
3720 ppc_load32 (code, ppc_r0, 0);
3721 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3724 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3725 ppc_load_sequence (code, ins->dreg, 0);
3727 CASE_PPC32 (OP_ICONV_TO_I4)
3728 CASE_PPC32 (OP_ICONV_TO_U4)
3730 if (ins->dreg != ins->sreg1)
3731 ppc_mr (code, ins->dreg, ins->sreg1);
3734 int saved = ins->sreg1;
3735 if (ins->sreg1 == ppc_r3) {
3736 ppc_mr (code, ppc_r0, ins->sreg1);
3739 if (ins->sreg2 != ppc_r3)
3740 ppc_mr (code, ppc_r3, ins->sreg2);
3741 if (saved != ppc_r4)
3742 ppc_mr (code, ppc_r4, saved);
3746 if (ins->dreg != ins->sreg1)
3747 ppc_fmr (code, ins->dreg, ins->sreg1);
3749 case OP_MOVE_F_TO_I4:
3750 ppc_stfs (code, ins->sreg1, -4, ppc_r1);
3751 ppc_ldptr (code, ins->dreg, -4, ppc_r1);
3753 case OP_MOVE_I4_TO_F:
3754 ppc_stw (code, ins->sreg1, -4, ppc_r1);
3755 ppc_lfs (code, ins->dreg, -4, ppc_r1);
3757 case OP_FCONV_TO_R4:
3758 ppc_frsp (code, ins->dreg, ins->sreg1);
3762 MonoCallInst *call = (MonoCallInst*)ins;
3765 * Keep in sync with mono_arch_emit_epilog
3767 g_assert (!cfg->method->save_lmf);
3769 * Note: we can use ppc_r12 here because it is dead anyway:
3770 * we're leaving the method.
3772 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
3773 long ret_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
3774 if (ppc_is_imm16 (ret_offset)) {
3775 ppc_ldptr (code, ppc_r0, ret_offset, cfg->frame_reg);
3777 ppc_load (code, ppc_r12, ret_offset);
3778 ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
3780 ppc_mtlr (code, ppc_r0);
3783 if (ppc_is_imm16 (cfg->stack_usage)) {
3784 ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
3786 /* cfg->stack_usage is an int, so we can use
3787 * an addis/addi sequence here even in 64-bit. */
3788 ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
3789 ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
3791 if (!cfg->method->save_lmf) {
3793 for (i = 31; i >= 13; --i) {
3794 if (cfg->used_int_regs & (1 << i)) {
3795 pos += sizeof (gpointer);
3796 ppc_ldptr (code, i, -pos, ppc_r12);
3800 /* FIXME restore from MonoLMF: though this can't happen yet */
3803 /* Copy arguments on the stack to our argument area */
3804 if (call->stack_usage) {
3805 code = emit_memcpy (code, call->stack_usage, ppc_r12, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET);
3806 /* r12 was clobbered */
3807 g_assert (cfg->frame_reg == ppc_sp);
3808 if (ppc_is_imm16 (cfg->stack_usage)) {
3809 ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
3811 /* cfg->stack_usage is an int, so we can use
3812 * an addis/addi sequence here even in 64-bit. */
3813 ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
3814 ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
3818 ppc_mr (code, ppc_sp, ppc_r12);
3819 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
3820 if (cfg->compile_aot) {
3821 /* arch_emit_got_access () patches this */
3822 ppc_load32 (code, ppc_r0, 0);
3823 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3824 ppc_ldptr_indexed (code, ppc_r12, ppc_r30, ppc_r0);
3825 ppc_ldptr (code, ppc_r0, 0, ppc_r12);
3827 ppc_ldptr_indexed (code, ppc_r0, ppc_r30, ppc_r0);
3829 ppc_mtctr (code, ppc_r0);
3830 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
3837 /* ensure ins->sreg1 is not NULL */
3838 ppc_ldptr (code, ppc_r0, 0, ins->sreg1);
3841 long cookie_offset = cfg->sig_cookie + cfg->stack_usage;
3842 if (ppc_is_imm16 (cookie_offset)) {
3843 ppc_addi (code, ppc_r0, cfg->frame_reg, cookie_offset);
3845 ppc_load (code, ppc_r0, cookie_offset);
3846 ppc_add (code, ppc_r0, cfg->frame_reg, ppc_r0);
3848 ppc_stptr (code, ppc_r0, 0, ins->sreg1);
3857 call = (MonoCallInst*)ins;
3858 if (ins->flags & MONO_INST_HAS_METHOD)
3859 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3861 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3862 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
3863 ppc_load_func (code, PPC_CALL_REG, 0);
3864 ppc_mtlr (code, PPC_CALL_REG);
3869 /* FIXME: this should be handled somewhere else in the new jit */
3870 code = emit_move_return_value (cfg, ins, code);
3876 case OP_VOIDCALL_REG:
3878 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3879 ppc_ldptr (code, ppc_r0, 0, ins->sreg1);
3880 /* FIXME: if we know that this is a method, we
3881 can omit this load */
3882 ppc_ldptr (code, ppc_r2, 8, ins->sreg1);
3883 ppc_mtlr (code, ppc_r0);
3885 ppc_mtlr (code, ins->sreg1);
3888 /* FIXME: this should be handled somewhere else in the new jit */
3889 code = emit_move_return_value (cfg, ins, code);
3891 case OP_FCALL_MEMBASE:
3892 case OP_LCALL_MEMBASE:
3893 case OP_VCALL_MEMBASE:
3894 case OP_VCALL2_MEMBASE:
3895 case OP_VOIDCALL_MEMBASE:
3896 case OP_CALL_MEMBASE:
3897 if (cfg->compile_aot && ins->sreg1 == ppc_r12) {
3898 /* The trampolines clobber this */
3899 ppc_mr (code, ppc_r29, ins->sreg1);
3900 ppc_ldptr (code, ppc_r0, ins->inst_offset, ppc_r29);
3902 ppc_ldptr (code, ppc_r0, ins->inst_offset, ins->sreg1);
3904 ppc_mtlr (code, ppc_r0);
3906 /* FIXME: this should be handled somewhere else in the new jit */
3907 code = emit_move_return_value (cfg, ins, code);
3910 guint8 * zero_loop_jump, * zero_loop_start;
3911 /* keep alignment */
3912 int alloca_waste = PPC_STACK_PARAM_OFFSET + cfg->param_area + 31;
3913 int area_offset = alloca_waste;
3915 ppc_addi (code, ppc_r12, ins->sreg1, alloca_waste + 31);
3916 /* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */
3917 ppc_clear_right_imm (code, ppc_r12, ppc_r12, 4);
3918 /* use ctr to store the number of words to 0 if needed */
3919 if (ins->flags & MONO_INST_INIT) {
3920 /* we zero 4 bytes at a time:
3921 * we add 7 instead of 3 so that we set the counter to
3922 * at least 1, otherwise the bdnz instruction will make
3923 * it negative and iterate billions of times.
3925 ppc_addi (code, ppc_r0, ins->sreg1, 7);
3926 ppc_shift_right_arith_imm (code, ppc_r0, ppc_r0, 2);
3927 ppc_mtctr (code, ppc_r0);
3929 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
3930 ppc_neg (code, ppc_r12, ppc_r12);
3931 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
3933 /* FIXME: make this loop work in 8 byte
3934 increments on PPC64 */
3935 if (ins->flags & MONO_INST_INIT) {
3936 /* adjust the dest reg by -4 so we can use stwu */
3937 /* we actually adjust -8 because we let the loop
3940 ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 8));
3941 ppc_li (code, ppc_r12, 0);
3942 zero_loop_start = code;
3943 ppc_stwu (code, ppc_r12, 4, ins->dreg);
3944 zero_loop_jump = code;
3945 ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
3946 ppc_patch (zero_loop_jump, zero_loop_start);
3948 ppc_addi (code, ins->dreg, ppc_sp, area_offset);
3953 ppc_mr (code, ppc_r3, ins->sreg1);
3954 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3955 (gpointer)"mono_arch_throw_exception");
3956 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
3957 ppc_load_func (code, PPC_CALL_REG, 0);
3958 ppc_mtlr (code, PPC_CALL_REG);
3967 ppc_mr (code, ppc_r3, ins->sreg1);
3968 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3969 (gpointer)"mono_arch_rethrow_exception");
3970 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
3971 ppc_load_func (code, PPC_CALL_REG, 0);
3972 ppc_mtlr (code, PPC_CALL_REG);
3979 case OP_START_HANDLER: {
3980 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3981 g_assert (spvar->inst_basereg != ppc_sp);
3982 code = emit_reserve_param_area (cfg, code);
3983 ppc_mflr (code, ppc_r0);
3984 if (ppc_is_imm16 (spvar->inst_offset)) {
3985 ppc_stptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
3987 ppc_load (code, ppc_r12, spvar->inst_offset);
3988 ppc_stptr_indexed (code, ppc_r0, ppc_r12, spvar->inst_basereg);
3992 case OP_ENDFILTER: {
3993 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3994 g_assert (spvar->inst_basereg != ppc_sp);
3995 code = emit_unreserve_param_area (cfg, code);
3996 if (ins->sreg1 != ppc_r3)
3997 ppc_mr (code, ppc_r3, ins->sreg1);
3998 if (ppc_is_imm16 (spvar->inst_offset)) {
3999 ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4001 ppc_load (code, ppc_r12, spvar->inst_offset);
4002 ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r12);
4004 ppc_mtlr (code, ppc_r0);
4008 case OP_ENDFINALLY: {
4009 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4010 g_assert (spvar->inst_basereg != ppc_sp);
4011 code = emit_unreserve_param_area (cfg, code);
4012 ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4013 ppc_mtlr (code, ppc_r0);
4017 case OP_CALL_HANDLER:
4018 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4020 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
4023 ins->inst_c0 = code - cfg->native_code;
4026 /*if (ins->inst_target_bb->native_offset) {
4028 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
4030 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4035 ppc_mtctr (code, ins->sreg1);
4036 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
4040 CASE_PPC64 (OP_LCEQ)
4041 ppc_li (code, ins->dreg, 0);
4042 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2);
4043 ppc_li (code, ins->dreg, 1);
4049 CASE_PPC64 (OP_LCLT)
4050 CASE_PPC64 (OP_LCLT_UN)
4051 ppc_li (code, ins->dreg, 1);
4052 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4053 ppc_li (code, ins->dreg, 0);
4059 CASE_PPC64 (OP_LCGT)
4060 CASE_PPC64 (OP_LCGT_UN)
4061 ppc_li (code, ins->dreg, 1);
4062 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4063 ppc_li (code, ins->dreg, 0);
4065 case OP_COND_EXC_EQ:
4066 case OP_COND_EXC_NE_UN:
4067 case OP_COND_EXC_LT:
4068 case OP_COND_EXC_LT_UN:
4069 case OP_COND_EXC_GT:
4070 case OP_COND_EXC_GT_UN:
4071 case OP_COND_EXC_GE:
4072 case OP_COND_EXC_GE_UN:
4073 case OP_COND_EXC_LE:
4074 case OP_COND_EXC_LE_UN:
4075 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
4077 case OP_COND_EXC_IEQ:
4078 case OP_COND_EXC_INE_UN:
4079 case OP_COND_EXC_ILT:
4080 case OP_COND_EXC_ILT_UN:
4081 case OP_COND_EXC_IGT:
4082 case OP_COND_EXC_IGT_UN:
4083 case OP_COND_EXC_IGE:
4084 case OP_COND_EXC_IGE_UN:
4085 case OP_COND_EXC_ILE:
4086 case OP_COND_EXC_ILE_UN:
4087 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
4099 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
4102 /* floating point opcodes */
4104 g_assert (cfg->compile_aot);
4106 /* FIXME: Optimize this */
4108 ppc_mflr (code, ppc_r12);
4110 *(double*)code = *(double*)ins->inst_p0;
4112 ppc_lfd (code, ins->dreg, 8, ppc_r12);
4115 g_assert_not_reached ();
4117 case OP_STORER8_MEMBASE_REG:
4118 if (ppc_is_imm16 (ins->inst_offset)) {
4119 ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4121 if (ppc_is_imm32 (ins->inst_offset)) {
4122 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4123 ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r11);
4125 ppc_load (code, ppc_r0, ins->inst_offset);
4126 ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4130 case OP_LOADR8_MEMBASE:
4131 if (ppc_is_imm16 (ins->inst_offset)) {
4132 ppc_lfd (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
4134 if (ppc_is_imm32 (ins->inst_offset)) {
4135 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4136 ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r11);
4138 ppc_load (code, ppc_r0, ins->inst_offset);
4139 ppc_lfdx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
4143 case OP_STORER4_MEMBASE_REG:
4144 ppc_frsp (code, ins->sreg1, ins->sreg1);
4145 if (ppc_is_imm16 (ins->inst_offset)) {
4146 ppc_stfs (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4148 if (ppc_is_imm32 (ins->inst_offset)) {
4149 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4150 ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r11);
4152 ppc_load (code, ppc_r0, ins->inst_offset);
4153 ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4157 case OP_LOADR4_MEMBASE:
4158 if (ppc_is_imm16 (ins->inst_offset)) {
4159 ppc_lfs (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
4161 if (ppc_is_imm32 (ins->inst_offset)) {
4162 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4163 ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r11);
4165 ppc_load (code, ppc_r0, ins->inst_offset);
4166 ppc_lfsx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
4170 case OP_LOADR4_MEMINDEX:
4171 ppc_lfsx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4173 case OP_LOADR8_MEMINDEX:
4174 ppc_lfdx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4176 case OP_STORER4_MEMINDEX:
4177 ppc_frsp (code, ins->sreg1, ins->sreg1);
4178 ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4180 case OP_STORER8_MEMINDEX:
4181 ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4184 case CEE_CONV_R4: /* FIXME: change precision */
4186 g_assert_not_reached ();
4187 case OP_FCONV_TO_I1:
4188 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4190 case OP_FCONV_TO_U1:
4191 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4193 case OP_FCONV_TO_I2:
4194 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4196 case OP_FCONV_TO_U2:
4197 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4199 case OP_FCONV_TO_I4:
4201 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4203 case OP_FCONV_TO_U4:
4205 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4207 case OP_LCONV_TO_R_UN:
4208 g_assert_not_reached ();
4209 /* Implemented as helper calls */
4211 case OP_LCONV_TO_OVF_I4_2:
4212 case OP_LCONV_TO_OVF_I: {
4213 #ifdef __mono_ppc64__
4216 guint8 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
4217 // Check if its negative
4218 ppc_cmpi (code, 0, 0, ins->sreg1, 0);
4219 negative_branch = code;
4220 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0);
4221 // Its positive msword == 0
4222 ppc_cmpi (code, 0, 0, ins->sreg2, 0);
4223 msword_positive_branch = code;
4224 ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
4226 ovf_ex_target = code;
4227 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException");
4229 ppc_patch (negative_branch, code);
4230 ppc_cmpi (code, 0, 0, ins->sreg2, -1);
4231 msword_negative_branch = code;
4232 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4233 ppc_patch (msword_negative_branch, ovf_ex_target);
4235 ppc_patch (msword_positive_branch, code);
4236 if (ins->dreg != ins->sreg1)
4237 ppc_mr (code, ins->dreg, ins->sreg1);
4242 ppc_fsqrtd (code, ins->dreg, ins->sreg1);
4245 ppc_fadd (code, ins->dreg, ins->sreg1, ins->sreg2);
4248 ppc_fsub (code, ins->dreg, ins->sreg1, ins->sreg2);
4251 ppc_fmul (code, ins->dreg, ins->sreg1, ins->sreg2);
4254 ppc_fdiv (code, ins->dreg, ins->sreg1, ins->sreg2);
4257 ppc_fneg (code, ins->dreg, ins->sreg1);
4261 g_assert_not_reached ();
4264 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4267 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4268 ppc_li (code, ins->dreg, 0);
4269 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2);
4270 ppc_li (code, ins->dreg, 1);
4273 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4274 ppc_li (code, ins->dreg, 1);
4275 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4276 ppc_li (code, ins->dreg, 0);
4279 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4280 ppc_li (code, ins->dreg, 1);
4281 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3);
4282 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4283 ppc_li (code, ins->dreg, 0);
4286 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4287 ppc_li (code, ins->dreg, 1);
4288 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4289 ppc_li (code, ins->dreg, 0);
4292 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4293 ppc_li (code, ins->dreg, 1);
4294 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3);
4295 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4296 ppc_li (code, ins->dreg, 0);
4299 EMIT_COND_BRANCH (ins, CEE_BEQ - CEE_BEQ);
4302 EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ);
4305 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4306 EMIT_COND_BRANCH (ins, CEE_BLT - CEE_BEQ);
4309 EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO);
4310 EMIT_COND_BRANCH (ins, CEE_BLT_UN - CEE_BEQ);
4313 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4314 EMIT_COND_BRANCH (ins, CEE_BGT - CEE_BEQ);
4317 EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO);
4318 EMIT_COND_BRANCH (ins, CEE_BGT_UN - CEE_BEQ);
4321 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4322 EMIT_COND_BRANCH (ins, CEE_BGE - CEE_BEQ);
4325 EMIT_COND_BRANCH (ins, CEE_BGE_UN - CEE_BEQ);
4328 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4329 EMIT_COND_BRANCH (ins, CEE_BLE - CEE_BEQ);
4332 EMIT_COND_BRANCH (ins, CEE_BLE_UN - CEE_BEQ);
4335 g_assert_not_reached ();
4336 case OP_CHECK_FINITE: {
4337 ppc_rlwinm (code, ins->sreg1, ins->sreg1, 0, 1, 31);
4338 ppc_addis (code, ins->sreg1, ins->sreg1, -32752);
4339 ppc_rlwinmd (code, ins->sreg1, ins->sreg1, 1, 31, 31);
4340 EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException");
4343 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_c1, ins->inst_p0);
4344 #ifdef __mono_ppc64__
4345 ppc_load_sequence (code, ins->dreg, (guint64)0x0f0f0f0f0f0f0f0fLL);
4347 ppc_load_sequence (code, ins->dreg, (gulong)0x0f0f0f0fL);
4352 #ifdef __mono_ppc64__
4353 case OP_ICONV_TO_I4:
4355 ppc_extsw (code, ins->dreg, ins->sreg1);
4357 case OP_ICONV_TO_U4:
4359 ppc_clrldi (code, ins->dreg, ins->sreg1, 32);
4361 case OP_ICONV_TO_R4:
4362 case OP_ICONV_TO_R8:
4363 case OP_LCONV_TO_R4:
4364 case OP_LCONV_TO_R8: {
4366 if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_ICONV_TO_R8) {
4367 ppc_extsw (code, ppc_r0, ins->sreg1);
4372 if (cpu_hw_caps & PPC_MOVE_FPR_GPR) {
4373 ppc_mffgpr (code, ins->dreg, tmp);
4375 ppc_str (code, tmp, -8, ppc_r1);
4376 ppc_lfd (code, ins->dreg, -8, ppc_r1);
4378 ppc_fcfid (code, ins->dreg, ins->dreg);
4379 if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_LCONV_TO_R4)
4380 ppc_frsp (code, ins->dreg, ins->dreg);
4384 ppc_srad (code, ins->dreg, ins->sreg1, ins->sreg2);
4387 ppc_srd (code, ins->dreg, ins->sreg1, ins->sreg2);
4390 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
4392 ppc_mfspr (code, ppc_r0, ppc_xer);
4393 ppc_andisd (code, ppc_r0, ppc_r0, (1 << 13)); /* CA */
4394 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, ins->inst_p1);
4396 case OP_COND_EXC_OV:
4397 ppc_mfspr (code, ppc_r0, ppc_xer);
4398 ppc_andisd (code, ppc_r0, ppc_r0, (1 << 14)); /* OV */
4399 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, ins->inst_p1);
4411 EMIT_COND_BRANCH (ins, ins->opcode - OP_LBEQ);
4413 case OP_FCONV_TO_I8:
4414 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
4416 case OP_FCONV_TO_U8:
4417 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE);
4419 case OP_STOREI4_MEMBASE_REG:
4420 if (ppc_is_imm16 (ins->inst_offset)) {
4421 ppc_stw (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4423 ppc_load (code, ppc_r0, ins->inst_offset);
4424 ppc_stwx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4427 case OP_STOREI4_MEMINDEX:
4428 ppc_stwx (code, ins->sreg1, ins->sreg2, ins->inst_destbasereg);
4431 ppc_srawi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4433 case OP_ISHR_UN_IMM:
4434 if (ins->inst_imm & 0x1f)
4435 ppc_srwi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4437 ppc_mr (code, ins->dreg, ins->sreg1);
4440 case OP_ICONV_TO_R4:
4441 case OP_ICONV_TO_R8: {
4442 if (cpu_hw_caps & PPC_ISA_64) {
4443 ppc_srawi(code, ppc_r0, ins->sreg1, 31);
4444 ppc_stw (code, ppc_r0, -8, ppc_r1);
4445 ppc_stw (code, ins->sreg1, -4, ppc_r1);
4446 ppc_lfd (code, ins->dreg, -8, ppc_r1);
4447 ppc_fcfid (code, ins->dreg, ins->dreg);
4448 if (ins->opcode == OP_ICONV_TO_R4)
4449 ppc_frsp (code, ins->dreg, ins->dreg);
4455 case OP_ATOMIC_ADD_I4:
4456 CASE_PPC64 (OP_ATOMIC_ADD_I8) {
4457 int location = ins->inst_basereg;
4458 int addend = ins->sreg2;
4459 guint8 *loop, *branch;
4460 g_assert (ins->inst_offset == 0);
4464 if (ins->opcode == OP_ATOMIC_ADD_I4)
4465 ppc_lwarx (code, ppc_r0, 0, location);
4466 #ifdef __mono_ppc64__
4468 ppc_ldarx (code, ppc_r0, 0, location);
4471 ppc_add (code, ppc_r0, ppc_r0, addend);
4473 if (ins->opcode == OP_ATOMIC_ADD_I4)
4474 ppc_stwcxd (code, ppc_r0, 0, location);
4475 #ifdef __mono_ppc64__
4477 ppc_stdcxd (code, ppc_r0, 0, location);
4481 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4482 ppc_patch (branch, loop);
4485 ppc_mr (code, ins->dreg, ppc_r0);
4488 case OP_ATOMIC_CAS_I4:
4489 CASE_PPC64 (OP_ATOMIC_CAS_I8) {
4490 int location = ins->sreg1;
4491 int value = ins->sreg2;
4492 int comparand = ins->sreg3;
4493 guint8 *start, *not_equal, *lost_reservation;
4497 if (ins->opcode == OP_ATOMIC_CAS_I4)
4498 ppc_lwarx (code, ppc_r0, 0, location);
4499 #ifdef __mono_ppc64__
4501 ppc_ldarx (code, ppc_r0, 0, location);
4504 ppc_cmp (code, 0, ins->opcode == OP_ATOMIC_CAS_I4 ? 0 : 1, ppc_r0, comparand);
4506 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4508 if (ins->opcode == OP_ATOMIC_CAS_I4)
4509 ppc_stwcxd (code, value, 0, location);
4510 #ifdef __mono_ppc64__
4512 ppc_stdcxd (code, value, 0, location);
4515 lost_reservation = code;
4516 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4517 ppc_patch (lost_reservation, start);
4518 ppc_patch (not_equal, code);
4521 ppc_mr (code, ins->dreg, ppc_r0);
4526 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4527 g_assert_not_reached ();
4530 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4531 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4532 mono_inst_name (ins->opcode), max_len, (glong)(code - cfg->native_code - offset));
4533 g_assert_not_reached ();
4539 last_offset = offset;
4542 cfg->code_len = code - cfg->native_code;
4544 #endif /* !DISABLE_JIT */
4547 mono_arch_register_lowlevel_calls (void)
4549 /* The signature doesn't matter */
4550 mono_register_jit_icall (mono_ppc_throw_exception, "mono_ppc_throw_exception", mono_create_icall_signature ("void"), TRUE);
4553 #ifdef __mono_ppc64__
4554 #ifdef _LITTLE_ENDIAN
4555 #define patch_load_sequence(ip,val) do {\
4556 guint16 *__load = (guint16*)(ip); \
4557 g_assert (sizeof (val) == sizeof (gsize)); \
4558 __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4559 __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4560 __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4561 __load [8] = ((guint64)(gsize)(val)) & 0xffff; \
4563 #elif defined _BIG_ENDIAN
4564 #define patch_load_sequence(ip,val) do {\
4565 guint16 *__load = (guint16*)(ip); \
4566 g_assert (sizeof (val) == sizeof (gsize)); \
4567 __load [1] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4568 __load [3] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4569 __load [7] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4570 __load [9] = ((guint64)(gsize)(val)) & 0xffff; \
4573 #error huh? No endianess defined by compiler
4576 #define patch_load_sequence(ip,val) do {\
4577 guint16 *__lis_ori = (guint16*)(ip); \
4578 __lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \
4579 __lis_ori [3] = ((gulong)(val)) & 0xffff; \
4585 mono_arch_patch_code (MonoCompile *cfg, MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4587 MonoJumpInfo *patch_info;
4588 gboolean compile_aot = !run_cctors;
4590 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4591 unsigned char *ip = patch_info->ip.i + code;
4592 unsigned char *target;
4593 gboolean is_fd = FALSE;
4595 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4598 switch (patch_info->type) {
4599 case MONO_PATCH_INFO_BB:
4600 case MONO_PATCH_INFO_LABEL:
4603 /* No need to patch these */
4608 switch (patch_info->type) {
4609 case MONO_PATCH_INFO_IP:
4610 patch_load_sequence (ip, ip);
4612 case MONO_PATCH_INFO_METHOD_REL:
4613 g_assert_not_reached ();
4614 *((gpointer *)(ip)) = code + patch_info->data.offset;
4616 case MONO_PATCH_INFO_SWITCH: {
4617 gpointer *table = (gpointer *)patch_info->data.table->table;
4620 patch_load_sequence (ip, table);
4622 for (i = 0; i < patch_info->data.table->table_size; i++) {
4623 table [i] = (glong)patch_info->data.table->table [i] + code;
4625 /* we put into the table the absolute address, no need for ppc_patch in this case */
4628 case MONO_PATCH_INFO_METHODCONST:
4629 case MONO_PATCH_INFO_CLASS:
4630 case MONO_PATCH_INFO_IMAGE:
4631 case MONO_PATCH_INFO_FIELD:
4632 case MONO_PATCH_INFO_VTABLE:
4633 case MONO_PATCH_INFO_IID:
4634 case MONO_PATCH_INFO_SFLDA:
4635 case MONO_PATCH_INFO_LDSTR:
4636 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4637 case MONO_PATCH_INFO_LDTOKEN:
4638 /* from OP_AOTCONST : lis + ori */
4639 patch_load_sequence (ip, target);
4641 case MONO_PATCH_INFO_R4:
4642 case MONO_PATCH_INFO_R8:
4643 g_assert_not_reached ();
4644 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4646 case MONO_PATCH_INFO_EXC_NAME:
4647 g_assert_not_reached ();
4648 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4650 case MONO_PATCH_INFO_NONE:
4651 case MONO_PATCH_INFO_BB_OVF:
4652 case MONO_PATCH_INFO_EXC_OVF:
4653 /* everything is dealt with at epilog output time */
4655 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
4656 case MONO_PATCH_INFO_INTERNAL_METHOD:
4657 case MONO_PATCH_INFO_ABS:
4658 case MONO_PATCH_INFO_RGCTX_FETCH:
4659 case MONO_PATCH_INFO_JIT_ICALL_ADDR:
4666 ppc_patch_full (ip, target, is_fd);
4671 * Emit code to save the registers in used_int_regs or the registers in the MonoLMF
4672 * structure at positive offset pos from register base_reg. pos is guaranteed to fit into
4673 * the instruction offset immediate for all the registers.
4676 save_registers (MonoCompile *cfg, guint8* code, int pos, int base_reg, gboolean save_lmf, guint32 used_int_regs, int cfa_offset)
4680 for (i = 13; i <= 31; i++) {
4681 if (used_int_regs & (1 << i)) {
4682 ppc_str (code, i, pos, base_reg);
4683 mono_emit_unwind_op_offset (cfg, code, i, pos - cfa_offset);
4684 pos += sizeof (mgreg_t);
4688 /* pos is the start of the MonoLMF structure */
4689 int offset = pos + G_STRUCT_OFFSET (MonoLMF, iregs);
4690 for (i = 13; i <= 31; i++) {
4691 ppc_str (code, i, offset, base_reg);
4692 mono_emit_unwind_op_offset (cfg, code, i, offset - cfa_offset);
4693 offset += sizeof (mgreg_t);
4695 offset = pos + G_STRUCT_OFFSET (MonoLMF, fregs);
4696 for (i = 14; i < 32; i++) {
4697 ppc_stfd (code, i, offset, base_reg);
4698 offset += sizeof (gdouble);
4705 * Stack frame layout:
4707 * ------------------- sp
4708 * MonoLMF structure or saved registers
4709 * -------------------
4711 * -------------------
4713 * -------------------
4714 * optional 8 bytes for tracing
4715 * -------------------
4716 * param area size is cfg->param_area
4717 * -------------------
4718 * linkage area size is PPC_STACK_PARAM_OFFSET
4719 * ------------------- sp
4723 mono_arch_emit_prolog (MonoCompile *cfg)
4725 MonoMethod *method = cfg->method;
4727 MonoMethodSignature *sig;
4729 long alloc_size, pos, max_offset, cfa_offset;
4735 int tailcall_struct_index;
4737 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4740 sig = mono_method_signature (method);
4741 cfg->code_size = 512 + sig->param_count * 32;
4742 code = cfg->native_code = g_malloc (cfg->code_size);
4746 /* We currently emit unwind info for aot, but don't use it */
4747 mono_emit_unwind_op_def_cfa (cfg, code, ppc_r1, 0);
4749 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
4750 ppc_mflr (code, ppc_r0);
4751 ppc_str (code, ppc_r0, PPC_RET_ADDR_OFFSET, ppc_sp);
4752 mono_emit_unwind_op_offset (cfg, code, ppc_lr, PPC_RET_ADDR_OFFSET);
4755 alloc_size = cfg->stack_offset;
4758 if (!method->save_lmf) {
4759 for (i = 31; i >= 13; --i) {
4760 if (cfg->used_int_regs & (1 << i)) {
4761 pos += sizeof (mgreg_t);
4765 pos += sizeof (MonoLMF);
4769 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4770 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4771 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4772 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4775 cfg->stack_usage = alloc_size;
4776 g_assert ((alloc_size & (MONO_ARCH_FRAME_ALIGNMENT-1)) == 0);
4778 if (ppc_is_imm16 (-alloc_size)) {
4779 ppc_str_update (code, ppc_sp, -alloc_size, ppc_sp);
4780 cfa_offset = alloc_size;
4781 mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
4782 code = save_registers (cfg, code, alloc_size - pos, ppc_sp, method->save_lmf, cfg->used_int_regs, cfa_offset);
4785 ppc_addi (code, ppc_r12, ppc_sp, -pos);
4786 ppc_load (code, ppc_r0, -alloc_size);
4787 ppc_str_update_indexed (code, ppc_sp, ppc_sp, ppc_r0);
4788 cfa_offset = alloc_size;
4789 mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
4790 code = save_registers (cfg, code, 0, ppc_r12, method->save_lmf, cfg->used_int_regs, cfa_offset);
4793 if (cfg->frame_reg != ppc_sp) {
4794 ppc_mr (code, cfg->frame_reg, ppc_sp);
4795 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4798 /* store runtime generic context */
4799 if (cfg->rgctx_var) {
4800 g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET &&
4801 (cfg->rgctx_var->inst_basereg == ppc_r1 || cfg->rgctx_var->inst_basereg == ppc_r31));
4803 ppc_stptr (code, MONO_ARCH_RGCTX_REG, cfg->rgctx_var->inst_offset, cfg->rgctx_var->inst_basereg);
4806 /* compute max_offset in order to use short forward jumps
4807 * we always do it on ppc because the immediate displacement
4808 * for jumps is too small
4811 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4813 bb->max_offset = max_offset;
4815 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4818 MONO_BB_FOR_EACH_INS (bb, ins)
4819 max_offset += ins_native_length (cfg, ins);
4822 /* load arguments allocated to register from the stack */
4825 cinfo = get_call_info (sig);
4827 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
4828 ArgInfo *ainfo = &cinfo->ret;
4830 inst = cfg->vret_addr;
4833 if (ppc_is_imm16 (inst->inst_offset)) {
4834 ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4836 ppc_load (code, ppc_r12, inst->inst_offset);
4837 ppc_stptr_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
4841 tailcall_struct_index = 0;
4842 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4843 ArgInfo *ainfo = cinfo->args + i;
4844 inst = cfg->args [pos];
4846 if (cfg->verbose_level > 2)
4847 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
4848 if (inst->opcode == OP_REGVAR) {
4849 if (ainfo->regtype == RegTypeGeneral)
4850 ppc_mr (code, inst->dreg, ainfo->reg);
4851 else if (ainfo->regtype == RegTypeFP)
4852 ppc_fmr (code, inst->dreg, ainfo->reg);
4853 else if (ainfo->regtype == RegTypeBase) {
4854 ppc_ldr (code, ppc_r12, 0, ppc_sp);
4855 ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r12);
4857 g_assert_not_reached ();
4859 if (cfg->verbose_level > 2)
4860 g_print ("Argument %ld assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4862 /* the argument should be put on the stack: FIXME handle size != word */
4863 if (ainfo->regtype == RegTypeGeneral) {
4864 switch (ainfo->size) {
4866 if (ppc_is_imm16 (inst->inst_offset)) {
4867 ppc_stb (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4869 if (ppc_is_imm32 (inst->inst_offset)) {
4870 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4871 ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r12);
4873 ppc_load (code, ppc_r12, inst->inst_offset);
4874 ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
4879 if (ppc_is_imm16 (inst->inst_offset)) {
4880 ppc_sth (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4882 if (ppc_is_imm32 (inst->inst_offset)) {
4883 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4884 ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r12);
4886 ppc_load (code, ppc_r12, inst->inst_offset);
4887 ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
4891 #ifdef __mono_ppc64__
4893 if (ppc_is_imm16 (inst->inst_offset)) {
4894 ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4896 if (ppc_is_imm32 (inst->inst_offset)) {
4897 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4898 ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r12);
4900 ppc_load (code, ppc_r12, inst->inst_offset);
4901 ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
4906 if (ppc_is_imm16 (inst->inst_offset)) {
4907 ppc_str (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4909 ppc_load (code, ppc_r12, inst->inst_offset);
4910 ppc_str_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
4915 if (ppc_is_imm16 (inst->inst_offset + 4)) {
4916 ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4917 ppc_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg);
4919 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4920 ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
4921 ppc_stw (code, ainfo->reg, 0, ppc_r12);
4922 ppc_stw (code, ainfo->reg + 1, 4, ppc_r12);
4927 if (ppc_is_imm16 (inst->inst_offset)) {
4928 ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4930 if (ppc_is_imm32 (inst->inst_offset)) {
4931 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4932 ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r12);
4934 ppc_load (code, ppc_r12, inst->inst_offset);
4935 ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r12);
4940 } else if (ainfo->regtype == RegTypeBase) {
4941 g_assert (ppc_is_imm16 (ainfo->offset));
4942 /* load the previous stack pointer in r12 */
4943 ppc_ldr (code, ppc_r12, 0, ppc_sp);
4944 ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r12);
4945 switch (ainfo->size) {
4947 if (ppc_is_imm16 (inst->inst_offset)) {
4948 ppc_stb (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
4950 if (ppc_is_imm32 (inst->inst_offset)) {
4951 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4952 ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r12);
4954 ppc_load (code, ppc_r12, inst->inst_offset);
4955 ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r12);
4960 if (ppc_is_imm16 (inst->inst_offset)) {
4961 ppc_sth (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
4963 if (ppc_is_imm32 (inst->inst_offset)) {
4964 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4965 ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r12);
4967 ppc_load (code, ppc_r12, inst->inst_offset);
4968 ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r12);
4972 #ifdef __mono_ppc64__
4974 if (ppc_is_imm16 (inst->inst_offset)) {
4975 ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
4977 if (ppc_is_imm32 (inst->inst_offset)) {
4978 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4979 ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r12);
4981 ppc_load (code, ppc_r12, inst->inst_offset);
4982 ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r12);
4987 if (ppc_is_imm16 (inst->inst_offset)) {
4988 ppc_str (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
4990 ppc_load (code, ppc_r12, inst->inst_offset);
4991 ppc_str_indexed (code, ppc_r0, ppc_r12, inst->inst_basereg);
4996 g_assert (ppc_is_imm16 (ainfo->offset + 4));
4997 if (ppc_is_imm16 (inst->inst_offset + 4)) {
4998 ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
4999 ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r12);
5000 ppc_stw (code, ppc_r0, inst->inst_offset + 4, inst->inst_basereg);
5002 /* use r11 to load the 2nd half of the long before we clobber r12. */
5003 ppc_lwz (code, ppc_r11, ainfo->offset + 4, ppc_r12);
5004 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5005 ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
5006 ppc_stw (code, ppc_r0, 0, ppc_r12);
5007 ppc_stw (code, ppc_r11, 4, ppc_r12);
5012 if (ppc_is_imm16 (inst->inst_offset)) {
5013 ppc_stptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5015 if (ppc_is_imm32 (inst->inst_offset)) {
5016 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5017 ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r12);
5019 ppc_load (code, ppc_r12, inst->inst_offset);
5020 ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r12);
5025 } else if (ainfo->regtype == RegTypeFP) {
5026 g_assert (ppc_is_imm16 (inst->inst_offset));
5027 if (ainfo->size == 8)
5028 ppc_stfd (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5029 else if (ainfo->size == 4)
5030 ppc_stfs (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5032 g_assert_not_reached ();
5033 } else if (ainfo->regtype == RegTypeStructByVal) {
5034 int doffset = inst->inst_offset;
5038 g_assert (ppc_is_imm16 (inst->inst_offset));
5039 g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (gpointer)));
5040 /* FIXME: what if there is no class? */
5041 if (sig->pinvoke && mono_class_from_mono_type (inst->inst_vtype))
5042 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
5043 for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) {
5046 * Darwin handles 1 and 2 byte
5047 * structs specially by
5048 * loading h/b into the arg
5049 * register. Only done for
5053 ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5055 ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5059 #ifdef __mono_ppc64__
5061 g_assert (cur_reg == 0);
5062 ppc_sldi (code, ppc_r0, ainfo->reg,
5063 (sizeof (gpointer) - ainfo->bytes) * 8);
5064 ppc_stptr (code, ppc_r0, doffset, inst->inst_basereg);
5068 ppc_stptr (code, ainfo->reg + cur_reg, doffset,
5069 inst->inst_basereg);
5072 soffset += sizeof (gpointer);
5073 doffset += sizeof (gpointer);
5075 if (ainfo->vtsize) {
5076 /* FIXME: we need to do the shifting here, too */
5079 /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */
5080 ppc_ldr (code, ppc_r12, 0, ppc_sp);
5081 if ((size & MONO_PPC_32_64_CASE (3, 7)) != 0) {
5082 code = emit_memcpy (code, size - soffset,
5083 inst->inst_basereg, doffset,
5084 ppc_r12, ainfo->offset + soffset);
5086 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer),
5087 inst->inst_basereg, doffset,
5088 ppc_r12, ainfo->offset + soffset);
5091 } else if (ainfo->regtype == RegTypeStructByAddr) {
5092 /* if it was originally a RegTypeBase */
5093 if (ainfo->offset) {
5094 /* load the previous stack pointer in r12 */
5095 ppc_ldr (code, ppc_r12, 0, ppc_sp);
5096 ppc_ldptr (code, ppc_r12, ainfo->offset, ppc_r12);
5098 ppc_mr (code, ppc_r12, ainfo->reg);
5101 if (cfg->tailcall_valuetype_addrs) {
5102 MonoInst *addr = cfg->tailcall_valuetype_addrs [tailcall_struct_index];
5104 g_assert (ppc_is_imm16 (addr->inst_offset));
5105 ppc_stptr (code, ppc_r12, addr->inst_offset, addr->inst_basereg);
5107 tailcall_struct_index++;
5110 g_assert (ppc_is_imm16 (inst->inst_offset));
5111 code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r12, 0);
5112 /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
5114 g_assert_not_reached ();
5119 if (method->save_lmf) {
5120 if (lmf_pthread_key != -1) {
5121 emit_tls_access (code, ppc_r3, lmf_pthread_key);
5122 if (tls_mode != TLS_MODE_NPTL && G_STRUCT_OFFSET (MonoJitTlsData, lmf))
5123 ppc_addi (code, ppc_r3, ppc_r3, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
5125 if (cfg->compile_aot) {
5126 /* Compute the got address which is needed by the PLT entry */
5127 code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
5129 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5130 (gpointer)"mono_get_lmf_addr");
5131 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
5132 ppc_load_func (code, PPC_CALL_REG, 0);
5133 ppc_mtlr (code, PPC_CALL_REG);
5139 /* we build the MonoLMF structure on the stack - see mini-ppc.h */
5140 /* lmf_offset is the offset from the previous stack pointer,
5141 * alloc_size is the total stack space allocated, so the offset
5142 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
5143 * The pointer to the struct is put in ppc_r12 (new_lmf).
5144 * The callee-saved registers are already in the MonoLMF structure
5146 ppc_addi (code, ppc_r12, ppc_sp, alloc_size - lmf_offset);
5147 /* ppc_r3 is the result from mono_get_lmf_addr () */
5148 ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
5149 /* new_lmf->previous_lmf = *lmf_addr */
5150 ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
5151 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
5152 /* *(lmf_addr) = r12 */
5153 ppc_stptr (code, ppc_r12, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
5154 /* save method info */
5155 if (cfg->compile_aot)
5157 ppc_load (code, ppc_r0, 0);
5159 ppc_load_ptr (code, ppc_r0, method);
5160 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r12);
5161 ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r12);
5162 /* save the current IP */
5163 if (cfg->compile_aot) {
5165 ppc_mflr (code, ppc_r0);
5167 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
5168 #ifdef __mono_ppc64__
5169 ppc_load_sequence (code, ppc_r0, (guint64)0x0101010101010101LL);
5171 ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L);
5174 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r12);
5178 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5180 cfg->code_len = code - cfg->native_code;
5181 g_assert (cfg->code_len <= cfg->code_size);
5188 mono_arch_emit_epilog (MonoCompile *cfg)
5190 MonoMethod *method = cfg->method;
5192 int max_epilog_size = 16 + 20*4;
5195 if (cfg->method->save_lmf)
5196 max_epilog_size += 128;
5198 if (mono_jit_trace_calls != NULL)
5199 max_epilog_size += 50;
5201 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5202 max_epilog_size += 50;
5204 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5205 cfg->code_size *= 2;
5206 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5207 cfg->stat_code_reallocs++;
5211 * Keep in sync with OP_JMP
5213 code = cfg->native_code + cfg->code_len;
5215 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
5216 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5220 if (method->save_lmf) {
5222 pos += sizeof (MonoLMF);
5224 /* save the frame reg in r8 */
5225 ppc_mr (code, ppc_r8, cfg->frame_reg);
5226 ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage - lmf_offset);
5227 /* r5 = previous_lmf */
5228 ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
5230 ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
5231 /* *(lmf_addr) = previous_lmf */
5232 ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6);
5233 /* FIXME: speedup: there is no actual need to restore the registers if
5234 * we didn't actually change them (idea from Zoltan).
5237 ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r12);
5239 /*for (i = 14; i < 32; i++) {
5240 ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12);
5242 g_assert (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET));
5243 /* use the saved copy of the frame reg in r8 */
5244 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
5245 ppc_ldr (code, ppc_r0, cfg->stack_usage + PPC_RET_ADDR_OFFSET, ppc_r8);
5246 ppc_mtlr (code, ppc_r0);
5248 ppc_addic (code, ppc_sp, ppc_r8, cfg->stack_usage);
5250 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
5251 long return_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
5252 if (ppc_is_imm16 (return_offset)) {
5253 ppc_ldr (code, ppc_r0, return_offset, cfg->frame_reg);
5255 ppc_load (code, ppc_r12, return_offset);
5256 ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
5258 ppc_mtlr (code, ppc_r0);
5260 if (ppc_is_imm16 (cfg->stack_usage)) {
5261 int offset = cfg->stack_usage;
5262 for (i = 13; i <= 31; i++) {
5263 if (cfg->used_int_regs & (1 << i))
5264 offset -= sizeof (mgreg_t);
5266 if (cfg->frame_reg != ppc_sp)
5267 ppc_mr (code, ppc_r12, cfg->frame_reg);
5268 /* note r31 (possibly the frame register) is restored last */
5269 for (i = 13; i <= 31; i++) {
5270 if (cfg->used_int_regs & (1 << i)) {
5271 ppc_ldr (code, i, offset, cfg->frame_reg);
5272 offset += sizeof (mgreg_t);
5275 if (cfg->frame_reg != ppc_sp)
5276 ppc_addi (code, ppc_sp, ppc_r12, cfg->stack_usage);
5278 ppc_addi (code, ppc_sp, ppc_sp, cfg->stack_usage);
5280 ppc_load32 (code, ppc_r12, cfg->stack_usage);
5281 if (cfg->used_int_regs) {
5282 ppc_add (code, ppc_r12, cfg->frame_reg, ppc_r12);
5283 for (i = 31; i >= 13; --i) {
5284 if (cfg->used_int_regs & (1 << i)) {
5285 pos += sizeof (mgreg_t);
5286 ppc_ldr (code, i, -pos, ppc_r12);
5289 ppc_mr (code, ppc_sp, ppc_r12);
5291 ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r12);
5298 cfg->code_len = code - cfg->native_code;
5300 g_assert (cfg->code_len < cfg->code_size);
5303 #endif /* ifndef DISABLE_JIT */
5305 /* remove once throw_exception_by_name is eliminated */
5307 exception_id_by_name (const char *name)
5309 if (strcmp (name, "IndexOutOfRangeException") == 0)
5310 return MONO_EXC_INDEX_OUT_OF_RANGE;
5311 if (strcmp (name, "OverflowException") == 0)
5312 return MONO_EXC_OVERFLOW;
5313 if (strcmp (name, "ArithmeticException") == 0)
5314 return MONO_EXC_ARITHMETIC;
5315 if (strcmp (name, "DivideByZeroException") == 0)
5316 return MONO_EXC_DIVIDE_BY_ZERO;
5317 if (strcmp (name, "InvalidCastException") == 0)
5318 return MONO_EXC_INVALID_CAST;
5319 if (strcmp (name, "NullReferenceException") == 0)
5320 return MONO_EXC_NULL_REF;
5321 if (strcmp (name, "ArrayTypeMismatchException") == 0)
5322 return MONO_EXC_ARRAY_TYPE_MISMATCH;
5323 if (strcmp (name, "ArgumentException") == 0)
5324 return MONO_EXC_ARGUMENT;
5325 g_error ("Unknown intrinsic exception %s\n", name);
5331 mono_arch_emit_exceptions (MonoCompile *cfg)
5333 MonoJumpInfo *patch_info;
5336 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
5337 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
5338 int max_epilog_size = 50;
5340 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
5341 exc_throw_pos [i] = NULL;
5342 exc_throw_found [i] = 0;
5345 /* count the number of exception infos */
5348 * make sure we have enough space for exceptions
5350 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5351 if (patch_info->type == MONO_PATCH_INFO_EXC) {
5352 i = exception_id_by_name (patch_info->data.target);
5353 if (!exc_throw_found [i]) {
5354 max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4;
5355 exc_throw_found [i] = TRUE;
5357 } else if (patch_info->type == MONO_PATCH_INFO_BB_OVF)
5358 max_epilog_size += 12;
5359 else if (patch_info->type == MONO_PATCH_INFO_EXC_OVF) {
5360 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5361 i = exception_id_by_name (ovfj->data.exception);
5362 if (!exc_throw_found [i]) {
5363 max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4;
5364 exc_throw_found [i] = TRUE;
5366 max_epilog_size += 8;
5370 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5371 cfg->code_size *= 2;
5372 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5373 cfg->stat_code_reallocs++;
5376 code = cfg->native_code + cfg->code_len;
5378 /* add code to raise exceptions */
5379 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5380 switch (patch_info->type) {
5381 case MONO_PATCH_INFO_BB_OVF: {
5382 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5383 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5384 /* patch the initial jump */
5385 ppc_patch (ip, code);
5386 ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 2);
5388 ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */
5389 /* jump back to the true target */
5391 ip = ovfj->data.bb->native_offset + cfg->native_code;
5392 ppc_patch (code - 4, ip);
5393 patch_info->type = MONO_PATCH_INFO_NONE;
5396 case MONO_PATCH_INFO_EXC_OVF: {
5397 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5398 MonoJumpInfo *newji;
5399 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5400 unsigned char *bcl = code;
5401 /* patch the initial jump: we arrived here with a call */
5402 ppc_patch (ip, code);
5403 ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 0);
5405 ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */
5406 /* patch the conditional jump to the right handler */
5407 /* make it processed next */
5408 newji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
5409 newji->type = MONO_PATCH_INFO_EXC;
5410 newji->ip.i = bcl - cfg->native_code;
5411 newji->data.target = ovfj->data.exception;
5412 newji->next = patch_info->next;
5413 patch_info->next = newji;
5414 patch_info->type = MONO_PATCH_INFO_NONE;
5417 case MONO_PATCH_INFO_EXC: {
5418 MonoClass *exc_class;
5420 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5421 i = exception_id_by_name (patch_info->data.target);
5422 if (exc_throw_pos [i] && !(ip > exc_throw_pos [i] && ip - exc_throw_pos [i] > 50000)) {
5423 ppc_patch (ip, exc_throw_pos [i]);
5424 patch_info->type = MONO_PATCH_INFO_NONE;
5427 exc_throw_pos [i] = code;
5430 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5431 g_assert (exc_class);
5433 ppc_patch (ip, code);
5434 /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
5435 ppc_load (code, ppc_r3, exc_class->type_token);
5436 /* we got here from a conditional call, so the calling ip is set in lr */
5437 ppc_mflr (code, ppc_r4);
5438 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5439 patch_info->data.name = "mono_arch_throw_corlib_exception";
5440 patch_info->ip.i = code - cfg->native_code;
5441 if (FORCE_INDIR_CALL || cfg->method->dynamic) {
5442 ppc_load_func (code, PPC_CALL_REG, 0);
5443 ppc_mtctr (code, PPC_CALL_REG);
5444 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5456 cfg->code_len = code - cfg->native_code;
5458 g_assert (cfg->code_len <= cfg->code_size);
5464 try_offset_access (void *value, guint32 idx)
5466 register void* me __asm__ ("r2");
5467 void ***p = (void***)((char*)me + 284);
5468 int idx1 = idx / 32;
5469 int idx2 = idx % 32;
5472 if (value != p[idx1][idx2])
5479 setup_tls_access (void)
5481 #if defined(__linux__) && defined(_CS_GNU_LIBPTHREAD_VERSION)
5482 size_t conf_size = 0;
5485 /* FIXME for darwin */
5486 guint32 *ins, *code;
5487 guint32 cmplwi_1023, li_0x48, blr_ins;
5491 tls_mode = TLS_MODE_FAILED;
5494 if (tls_mode == TLS_MODE_FAILED)
5496 if (g_getenv ("MONO_NO_TLS")) {
5497 tls_mode = TLS_MODE_FAILED;
5501 if (tls_mode == TLS_MODE_DETECT) {
5502 #if defined(__APPLE__) && defined(__mono_ppc__) && !defined(__mono_ppc64__)
5503 tls_mode = TLS_MODE_DARWIN_G4;
5504 #elif defined(__linux__) && defined(_CS_GNU_LIBPTHREAD_VERSION)
5505 conf_size = confstr ( _CS_GNU_LIBPTHREAD_VERSION, confbuf, sizeof(confbuf));
5506 if ((conf_size > 4) && (strncmp (confbuf, "NPTL", 4) == 0))
5507 tls_mode = TLS_MODE_NPTL;
5508 #elif !defined(TARGET_PS3)
5509 ins = (guint32*)pthread_getspecific;
5510 /* uncond branch to the real method */
5511 if ((*ins >> 26) == 18) {
5513 val = (*ins & ~3) << 6;
5517 ins = (guint32*)(long)val;
5519 ins = (guint32*) ((char*)ins + val);
5522 code = &cmplwi_1023;
5523 ppc_cmpli (code, 0, 0, ppc_r3, 1023);
5525 ppc_li (code, ppc_r4, 0x48);
5528 if (*ins == cmplwi_1023) {
5529 int found_lwz_284 = 0;
5531 for (ptk = 0; ptk < 20; ++ptk) {
5533 if (!*ins || *ins == blr_ins)
5535 if ((guint16)*ins == 284 && (*ins >> 26) == 32) {
5540 if (!found_lwz_284) {
5541 tls_mode = TLS_MODE_FAILED;
5544 tls_mode = TLS_MODE_LTHREADS;
5545 } else if (*ins == li_0x48) {
5547 /* uncond branch to the real method */
5548 if ((*ins >> 26) == 18) {
5550 val = (*ins & ~3) << 6;
5554 ins = (guint32*)(long)val;
5556 ins = (guint32*) ((char*)ins + val);
5558 code = (guint32*)&val;
5559 ppc_li (code, ppc_r0, 0x7FF2);
5560 if (ins [1] == val) {
5561 /* Darwin on G4, implement */
5562 tls_mode = TLS_MODE_FAILED;
5565 code = (guint32*)&val;
5566 ppc_mfspr (code, ppc_r3, 104);
5567 if (ins [1] != val) {
5568 tls_mode = TLS_MODE_FAILED;
5571 tls_mode = TLS_MODE_DARWIN_G5;
5574 tls_mode = TLS_MODE_FAILED;
5578 tls_mode = TLS_MODE_FAILED;
5584 if (tls_mode == TLS_MODE_DETECT)
5585 tls_mode = TLS_MODE_FAILED;
5586 if (tls_mode == TLS_MODE_FAILED)
5588 if ((lmf_pthread_key == -1) && (tls_mode == TLS_MODE_NPTL)) {
5589 lmf_pthread_key = mono_get_lmf_addr_tls_offset();
5593 /* if not TLS_MODE_NPTL or local dynamic (as indicated by
5594 mono_get_lmf_addr_tls_offset returning -1) then use keyed access. */
5595 if (lmf_pthread_key == -1) {
5596 guint32 ptk = mono_jit_tls_id;
5598 /*g_print ("MonoLMF at: %d\n", ptk);*/
5599 /*if (!try_offset_access (mono_get_lmf_addr (), ptk)) {
5600 init_tls_failed = 1;
5603 lmf_pthread_key = ptk;
5612 mono_arch_finish_init (void)
5614 setup_tls_access ();
5618 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5622 #define CMP_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
5624 #define LOADSTORE_SIZE 4
5625 #define JUMP_IMM_SIZE 12
5626 #define JUMP_IMM32_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 8)
5627 #define ENABLE_WRONG_METHOD_CHECK 0
5630 * LOCKING: called with the domain lock held
5633 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5634 gpointer fail_tramp)
5638 guint8 *code, *start;
5640 for (i = 0; i < count; ++i) {
5641 MonoIMTCheckItem *item = imt_entries [i];
5642 if (item->is_equals) {
5643 if (item->check_target_idx) {
5644 if (!item->compare_done)
5645 item->chunk_size += CMP_SIZE;
5646 if (item->has_target_code)
5647 item->chunk_size += BR_SIZE + JUMP_IMM32_SIZE;
5649 item->chunk_size += LOADSTORE_SIZE + BR_SIZE + JUMP_IMM_SIZE;
5652 item->chunk_size += CMP_SIZE + BR_SIZE + JUMP_IMM32_SIZE * 2;
5653 if (!item->has_target_code)
5654 item->chunk_size += LOADSTORE_SIZE;
5656 item->chunk_size += LOADSTORE_SIZE + JUMP_IMM_SIZE;
5657 #if ENABLE_WRONG_METHOD_CHECK
5658 item->chunk_size += CMP_SIZE + BR_SIZE + 4;
5663 item->chunk_size += CMP_SIZE + BR_SIZE;
5664 imt_entries [item->check_target_idx]->compare_done = TRUE;
5666 size += item->chunk_size;
5668 /* the initial load of the vtable address */
5669 size += PPC_LOAD_SEQUENCE_LENGTH + LOADSTORE_SIZE;
5671 code = mono_method_alloc_generic_virtual_thunk (domain, size);
5673 code = mono_domain_code_reserve (domain, size);
5678 * We need to save and restore r12 because it might be
5679 * used by the caller as the vtable register, so
5680 * clobbering it will trip up the magic trampoline.
5682 * FIXME: Get rid of this by making sure that r12 is
5683 * not used as the vtable register in interface calls.
5685 ppc_stptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
5686 ppc_load (code, ppc_r12, (gsize)(& (vtable->vtable [0])));
5688 for (i = 0; i < count; ++i) {
5689 MonoIMTCheckItem *item = imt_entries [i];
5690 item->code_target = code;
5691 if (item->is_equals) {
5692 if (item->check_target_idx) {
5693 if (!item->compare_done) {
5694 ppc_load (code, ppc_r0, (gsize)item->key);
5695 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5697 item->jmp_code = code;
5698 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5699 if (item->has_target_code) {
5700 ppc_load_ptr (code, ppc_r0, item->value.target_code);
5702 ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
5703 ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
5705 ppc_mtctr (code, ppc_r0);
5706 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5709 ppc_load (code, ppc_r0, (gulong)item->key);
5710 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5711 item->jmp_code = code;
5712 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5713 if (item->has_target_code) {
5714 ppc_load_ptr (code, ppc_r0, item->value.target_code);
5717 ppc_load_ptr (code, ppc_r0, & (vtable->vtable [item->value.vtable_slot]));
5718 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
5720 ppc_mtctr (code, ppc_r0);
5721 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5722 ppc_patch (item->jmp_code, code);
5723 ppc_load_ptr (code, ppc_r0, fail_tramp);
5724 ppc_mtctr (code, ppc_r0);
5725 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5726 item->jmp_code = NULL;
5728 /* enable the commented code to assert on wrong method */
5729 #if ENABLE_WRONG_METHOD_CHECK
5730 ppc_load (code, ppc_r0, (guint32)item->key);
5731 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5732 item->jmp_code = code;
5733 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5735 ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
5736 ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
5737 ppc_mtctr (code, ppc_r0);
5738 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5739 #if ENABLE_WRONG_METHOD_CHECK
5740 ppc_patch (item->jmp_code, code);
5742 item->jmp_code = NULL;
5747 ppc_load (code, ppc_r0, (gulong)item->key);
5748 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5749 item->jmp_code = code;
5750 ppc_bc (code, PPC_BR_FALSE, PPC_BR_LT, 0);
5753 /* patch the branches to get to the target items */
5754 for (i = 0; i < count; ++i) {
5755 MonoIMTCheckItem *item = imt_entries [i];
5756 if (item->jmp_code) {
5757 if (item->check_target_idx) {
5758 ppc_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5764 mono_stats.imt_thunks_size += code - start;
5765 g_assert (code - start <= size);
5766 mono_arch_flush_icache (start, size);
5768 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);
5774 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5776 mgreg_t *r = (mgreg_t*)regs;
5778 return (MonoMethod*)(gsize) r [MONO_ARCH_IMT_REG];
5782 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5784 mgreg_t *r = (mgreg_t*)regs;
5786 return (MonoVTable*)(gsize) r [MONO_ARCH_RGCTX_REG];
5790 mono_arch_get_cie_program (void)
5794 mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ppc_r1, 0);
5800 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5807 mono_arch_print_tree (MonoInst *tree, int arity)
5813 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5816 return (mgreg_t)MONO_CONTEXT_GET_SP (ctx);
5818 g_assert (reg >= ppc_r13);
5820 return ctx->regs [reg - ppc_r13];
5824 mono_arch_get_patch_offset (guint8 *code)
5830 * mono_aot_emit_load_got_addr:
5832 * Emit code to load the got address.
5833 * On PPC, the result is placed into r30.
5836 mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji)
5839 ppc_mflr (code, ppc_r30);
5841 mono_add_patch_info (cfg, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
5843 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
5844 /* arch_emit_got_address () patches this */
5845 #if defined(TARGET_POWERPC64)
5851 ppc_load32 (code, ppc_r0, 0);
5852 ppc_add (code, ppc_r30, ppc_r30, ppc_r0);
5859 * mono_ppc_emit_load_aotconst:
5861 * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
5862 * TARGET from the mscorlib GOT in full-aot code.
5863 * On PPC, the GOT address is assumed to be in r30, and the result is placed into
5867 mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target)
5869 /* Load the mscorlib got address */
5870 ppc_ldptr (code, ppc_r12, sizeof (gpointer), ppc_r30);
5871 *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
5872 /* arch_emit_got_access () patches this */
5873 ppc_load32 (code, ppc_r0, 0);
5874 ppc_ldptr_indexed (code, ppc_r12, ppc_r12, ppc_r0);
5879 /* Soft Debug support */
5880 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5887 * mono_arch_set_breakpoint:
5889 * See mini-amd64.c for docs.
5892 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5895 guint8 *orig_code = code;
5897 ppc_load_sequence (code, ppc_r12, (gsize)bp_trigger_page);
5898 ppc_ldptr (code, ppc_r12, 0, ppc_r12);
5900 g_assert (code - orig_code == BREAKPOINT_SIZE);
5902 mono_arch_flush_icache (orig_code, code - orig_code);
5906 * mono_arch_clear_breakpoint:
5908 * See mini-amd64.c for docs.
5911 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5916 for (i = 0; i < BREAKPOINT_SIZE / 4; ++i)
5919 mono_arch_flush_icache (ip, code - ip);
5923 * mono_arch_is_breakpoint_event:
5925 * See mini-amd64.c for docs.
5928 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5930 siginfo_t* sinfo = (siginfo_t*) info;
5931 /* Sometimes the address is off by 4 */
5932 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5939 * mono_arch_skip_breakpoint:
5941 * See mini-amd64.c for docs.
5944 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
5946 /* skip the ldptr */
5947 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5955 * mono_arch_start_single_stepping:
5957 * See mini-amd64.c for docs.
5960 mono_arch_start_single_stepping (void)
5962 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5966 * mono_arch_stop_single_stepping:
5968 * See mini-amd64.c for docs.
5971 mono_arch_stop_single_stepping (void)
5973 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5977 * mono_arch_is_single_step_event:
5979 * See mini-amd64.c for docs.
5982 mono_arch_is_single_step_event (void *info, void *sigctx)
5984 siginfo_t* sinfo = (siginfo_t*) info;
5985 /* Sometimes the address is off by 4 */
5986 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5993 * mono_arch_skip_single_step:
5995 * See mini-amd64.c for docs.
5998 mono_arch_skip_single_step (MonoContext *ctx)
6000 /* skip the ldptr */
6001 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6005 * mono_arch_create_seq_point_info:
6007 * See mini-amd64.c for docs.
6010 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
6017 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
6019 ext->lmf.previous_lmf = prev_lmf;
6020 /* Mark that this is a MonoLMFExt */
6021 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
6022 ext->lmf.ebp = (gssize)ext;
6028 mono_arch_opcode_supported (int opcode)
6031 case OP_ATOMIC_ADD_I4:
6032 case OP_ATOMIC_CAS_I4:
6033 #ifdef TARGET_POWERPC64
6034 case OP_ATOMIC_ADD_I8:
6035 case OP_ATOMIC_CAS_I8: