3 * PowerPC backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
8 * Andreas Faerber <andreas.faerber@web.de>
10 * (C) 2003 Ximian, Inc.
11 * (C) 2007-2008 Andreas Faerber
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/utils/mono-proclib.h>
20 #include <mono/utils/mono-mmap.h>
21 #include <mono/utils/mono-hwcap.h>
24 #ifdef TARGET_POWERPC64
25 #include "cpu-ppc64.h"
32 #include <sys/sysctl.h>
38 #define FORCE_INDIR_CALL 1
49 /* cpu_hw_caps contains the flags defined below */
50 static int cpu_hw_caps = 0;
51 static int cachelinesize = 0;
52 static int cachelineinc = 0;
54 PPC_ICACHE_SNOOP = 1 << 0,
55 PPC_MULTIPLE_LS_UNITS = 1 << 1,
56 PPC_SMP_CAPABLE = 1 << 2,
59 PPC_MOVE_FPR_GPR = 1 << 5,
63 #define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
65 /* This mutex protects architecture specific caches */
66 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
67 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
68 static mono_mutex_t mini_arch_mutex;
70 int mono_exc_esp_offset = 0;
73 * The code generated for sequence points reads from this location, which is
74 * made read-only when single stepping is enabled.
76 static gpointer ss_trigger_page;
78 /* Enabled breakpoints read from this trigger page */
79 static gpointer bp_trigger_page;
81 #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \
83 MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \
84 inst->type = STACK_R8; \
86 inst->inst_p0 = (void*)(addr); \
87 mono_bblock_add_inst (cfg->cbb, inst); \
91 mono_arch_regname (int reg) {
92 static const char rnames[][4] = {
93 "r0", "sp", "r2", "r3", "r4",
94 "r5", "r6", "r7", "r8", "r9",
95 "r10", "r11", "r12", "r13", "r14",
96 "r15", "r16", "r17", "r18", "r19",
97 "r20", "r21", "r22", "r23", "r24",
98 "r25", "r26", "r27", "r28", "r29",
101 if (reg >= 0 && reg < 32)
107 mono_arch_fregname (int reg) {
108 static const char rnames[][4] = {
109 "f0", "f1", "f2", "f3", "f4",
110 "f5", "f6", "f7", "f8", "f9",
111 "f10", "f11", "f12", "f13", "f14",
112 "f15", "f16", "f17", "f18", "f19",
113 "f20", "f21", "f22", "f23", "f24",
114 "f25", "f26", "f27", "f28", "f29",
117 if (reg >= 0 && reg < 32)
122 /* this function overwrites r0, r11, r12 */
124 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
126 /* unrolled, use the counter in big */
127 if (size > sizeof (gpointer) * 5) {
128 long shifted = size / SIZEOF_VOID_P;
129 guint8 *copy_loop_start, *copy_loop_jump;
131 ppc_load (code, ppc_r0, shifted);
132 ppc_mtctr (code, ppc_r0);
133 //g_assert (sreg == ppc_r12);
134 ppc_addi (code, ppc_r11, dreg, (doffset - sizeof (gpointer)));
135 ppc_addi (code, ppc_r12, sreg, (soffset - sizeof (gpointer)));
136 copy_loop_start = code;
137 ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r12);
138 ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r11);
139 copy_loop_jump = code;
140 ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
141 ppc_patch (copy_loop_jump, copy_loop_start);
142 size -= shifted * sizeof (gpointer);
143 doffset = soffset = 0;
146 #ifdef __mono_ppc64__
147 /* the hardware has multiple load/store units and the move is long
148 enough to use more then one register, then use load/load/store/store
149 to execute 2 instructions per cycle. */
150 if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
152 ppc_ldptr (code, ppc_r0, soffset, sreg);
153 ppc_ldptr (code, ppc_r11, soffset+8, sreg);
154 ppc_stptr (code, ppc_r0, doffset, dreg);
155 ppc_stptr (code, ppc_r11, doffset+8, dreg);
162 ppc_ldr (code, ppc_r0, soffset, sreg);
163 ppc_str (code, ppc_r0, doffset, dreg);
169 if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
171 ppc_lwz (code, ppc_r0, soffset, sreg);
172 ppc_lwz (code, ppc_r11, soffset+4, sreg);
173 ppc_stw (code, ppc_r0, doffset, dreg);
174 ppc_stw (code, ppc_r11, doffset+4, dreg);
182 ppc_lwz (code, ppc_r0, soffset, sreg);
183 ppc_stw (code, ppc_r0, doffset, dreg);
189 ppc_lhz (code, ppc_r0, soffset, sreg);
190 ppc_sth (code, ppc_r0, doffset, dreg);
196 ppc_lbz (code, ppc_r0, soffset, sreg);
197 ppc_stb (code, ppc_r0, doffset, dreg);
206 * mono_arch_get_argument_info:
207 * @csig: a method signature
208 * @param_count: the number of parameters to consider
209 * @arg_info: an array to store the result infos
211 * Gathers information on parameters such as size, alignment and
212 * padding. arg_info should be large enought to hold param_count + 1 entries.
214 * Returns the size of the activation frame.
217 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
219 #ifdef __mono_ppc64__
223 int k, frame_size = 0;
224 int size, align, pad;
227 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
228 frame_size += sizeof (gpointer);
232 arg_info [0].offset = offset;
235 frame_size += sizeof (gpointer);
239 arg_info [0].size = frame_size;
241 for (k = 0; k < param_count; k++) {
244 size = mono_type_native_stack_size (csig->params [k], (guint32*)&align);
246 size = mini_type_stack_size (csig->params [k], &align);
248 /* ignore alignment for now */
251 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
252 arg_info [k].pad = pad;
254 arg_info [k + 1].pad = 0;
255 arg_info [k + 1].size = size;
257 arg_info [k + 1].offset = offset;
261 align = MONO_ARCH_FRAME_ALIGNMENT;
262 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
263 arg_info [k].pad = pad;
269 #ifdef __mono_ppc64__
271 is_load_sequence (guint32 *seq)
273 return ppc_opcode (seq [0]) == 15 && /* lis */
274 ppc_opcode (seq [1]) == 24 && /* ori */
275 ppc_opcode (seq [2]) == 30 && /* sldi */
276 ppc_opcode (seq [3]) == 25 && /* oris */
277 ppc_opcode (seq [4]) == 24; /* ori */
280 #define ppc_load_get_dest(l) (((l)>>21) & 0x1f)
281 #define ppc_load_get_off(l) ((gint16)((l) & 0xffff))
285 #define ppc_is_load_op(opcode) (ppc_opcode ((opcode)) == 58 || ppc_opcode ((opcode)) == 32)
287 /* code must point to the blrl */
289 mono_ppc_is_direct_call_sequence (guint32 *code)
291 #ifdef __mono_ppc64__
292 g_assert(*code == 0x4e800021 || *code == 0x4e800020 || *code == 0x4e800420);
294 /* the thunk-less direct call sequence: lis/ori/sldi/oris/ori/mtlr/blrl */
295 if (ppc_opcode (code [-1]) == 31) { /* mtlr */
296 if (ppc_is_load_op (code [-2]) && ppc_is_load_op (code [-3])) { /* ld/ld */
297 if (!is_load_sequence (&code [-8]))
299 /* one of the loads must be "ld r2,8(rX)" or "ld r2,4(rX) for ilp32 */
300 return (ppc_load_get_dest (code [-2]) == ppc_r2 && ppc_load_get_off (code [-2]) == sizeof (gpointer)) ||
301 (ppc_load_get_dest (code [-3]) == ppc_r2 && ppc_load_get_off (code [-3]) == sizeof (gpointer));
303 if (ppc_opcode (code [-2]) == 24 && ppc_opcode (code [-3]) == 31) /* mr/nop */
304 return is_load_sequence (&code [-8]);
306 return is_load_sequence (&code [-6]);
310 g_assert(*code == 0x4e800021);
312 /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */
313 return ppc_opcode (code [-1]) == 31 &&
314 ppc_opcode (code [-2]) == 24 &&
315 ppc_opcode (code [-3]) == 15;
319 #define MAX_ARCH_DELEGATE_PARAMS 7
322 get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count, gboolean aot)
324 guint8 *code, *start;
327 int size = MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE;
329 start = code = mono_global_codeman_reserve (size);
331 code = mono_ppc_create_pre_code_ftnptr (code);
333 /* Replace the this argument with the target */
334 ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
335 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
336 /* it's a function descriptor */
337 /* Can't use ldptr as it doesn't work with r0 */
338 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
340 ppc_mtctr (code, ppc_r0);
341 ppc_ldptr (code, ppc_r3, MONO_STRUCT_OFFSET (MonoDelegate, target), ppc_r3);
342 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
344 g_assert ((code - start) <= size);
346 mono_arch_flush_icache (start, size);
350 size = MONO_PPC_32_64_CASE (32, 32) + param_count * 4 + PPC_FTNPTR_SIZE;
351 start = code = mono_global_codeman_reserve (size);
353 code = mono_ppc_create_pre_code_ftnptr (code);
355 ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
356 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
357 /* it's a function descriptor */
358 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
360 ppc_mtctr (code, ppc_r0);
361 /* slide down the arguments */
362 for (i = 0; i < param_count; ++i) {
363 ppc_mr (code, (ppc_r3 + i), (ppc_r3 + i + 1));
365 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
367 g_assert ((code - start) <= size);
369 mono_arch_flush_icache (start, size);
373 *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL);
375 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
376 *info = mono_tramp_info_create (name, start, code - start, NULL, NULL);
384 mono_arch_get_delegate_invoke_impls (void)
390 get_delegate_invoke_impl (&info, TRUE, 0, TRUE);
391 res = g_slist_prepend (res, info);
393 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
394 get_delegate_invoke_impl (&info, FALSE, i, TRUE);
395 res = g_slist_prepend (res, info);
402 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
404 guint8 *code, *start;
406 /* FIXME: Support more cases */
407 if (MONO_TYPE_ISSTRUCT (sig->ret))
411 static guint8* cached = NULL;
417 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
420 start = get_delegate_invoke_impl (&info, TRUE, 0, FALSE);
421 mono_tramp_info_register (info, NULL);
423 mono_memory_barrier ();
427 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
430 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
432 for (i = 0; i < sig->param_count; ++i)
433 if (!mono_is_regsize_var (sig->params [i]))
437 code = cache [sig->param_count];
442 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
443 start = mono_aot_get_trampoline (name);
447 start = get_delegate_invoke_impl (&info, FALSE, sig->param_count, FALSE);
448 mono_tramp_info_register (info, NULL);
451 mono_memory_barrier ();
453 cache [sig->param_count] = start;
459 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
465 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
467 mgreg_t *r = (mgreg_t*)regs;
469 return (gpointer)(gsize)r [ppc_r3];
477 #define MAX_AUX_ENTRIES 128
479 * PPC_FEATURE_POWER4, PPC_FEATURE_POWER5, PPC_FEATURE_POWER5_PLUS, PPC_FEATURE_CELL,
480 * PPC_FEATURE_PA6T, PPC_FEATURE_ARCH_2_05 are considered supporting 2X ISA features
482 #define ISA_2X (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000 | 0x00000800 | 0x00001000)
484 /* define PPC_FEATURE_64 HWCAP for 64-bit category. */
485 #define ISA_64 0x40000000
487 /* define PPC_FEATURE_POWER6_EXT HWCAP for power6x mffgpr/mftgpr instructions. */
488 #define ISA_MOVE_FPR_GPR 0x00000200
490 * Initialize the cpu to execute managed code.
493 mono_arch_cpu_init (void)
498 * Initialize architecture specific code.
501 mono_arch_init (void)
503 #if defined(MONO_CROSS_COMPILE)
504 #elif defined(__APPLE__)
506 size_t len = sizeof (cachelinesize);
509 mib [1] = HW_CACHELINE;
511 if (sysctl (mib, 2, &cachelinesize, &len, NULL, 0) == -1) {
515 cachelineinc = cachelinesize;
517 #elif defined(__linux__)
518 AuxVec vec [MAX_AUX_ENTRIES];
519 int i, vec_entries = 0;
520 /* sadly this will work only with 2.6 kernels... */
521 FILE* f = fopen ("/proc/self/auxv", "rb");
524 vec_entries = fread (&vec, sizeof (AuxVec), MAX_AUX_ENTRIES, f);
528 for (i = 0; i < vec_entries; i++) {
529 int type = vec [i].type;
531 if (type == 19) { /* AT_DCACHEBSIZE */
532 cachelinesize = vec [i].value;
536 #elif defined(G_COMPILER_CODEWARRIOR)
540 //#error Need a way to get cache line size
543 if (mono_hwcap_ppc_has_icache_snoop)
544 cpu_hw_caps |= PPC_ICACHE_SNOOP;
546 if (mono_hwcap_ppc_is_isa_2x)
547 cpu_hw_caps |= PPC_ISA_2X;
549 if (mono_hwcap_ppc_is_isa_64)
550 cpu_hw_caps |= PPC_ISA_64;
552 if (mono_hwcap_ppc_has_move_fpr_gpr)
553 cpu_hw_caps |= PPC_MOVE_FPR_GPR;
555 if (mono_hwcap_ppc_has_multiple_ls_units)
556 cpu_hw_caps |= PPC_MULTIPLE_LS_UNITS;
562 cachelineinc = cachelinesize;
564 if (mono_cpu_count () > 1)
565 cpu_hw_caps |= PPC_SMP_CAPABLE;
567 mono_os_mutex_init_recursive (&mini_arch_mutex);
569 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
570 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
571 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
573 mono_aot_register_jit_icall ("mono_ppc_throw_exception", mono_ppc_throw_exception);
575 // FIXME: Fix partial sharing for power and remove this
576 mono_set_partial_sharing_supported (FALSE);
580 * Cleanup architecture specific code.
583 mono_arch_cleanup (void)
585 mono_os_mutex_destroy (&mini_arch_mutex);
589 mono_arch_have_fast_tls (void)
595 * This function returns the optimizations supported on this cpu.
598 mono_arch_cpu_optimizations (guint32 *exclude_mask)
602 /* no ppc-specific optimizations yet */
608 * This function test for all SIMD functions supported.
610 * Returns a bitmask corresponding to all supported versions.
614 mono_arch_cpu_enumerate_simd_versions (void)
616 /* SIMD is currently unimplemented */
620 #ifdef __mono_ppc64__
621 #define CASE_PPC32(c)
622 #define CASE_PPC64(c) case c:
624 #define CASE_PPC32(c) case c:
625 #define CASE_PPC64(c)
629 is_regsize_var (MonoType *t) {
632 t = mini_get_underlying_type (t);
636 CASE_PPC64 (MONO_TYPE_I8)
637 CASE_PPC64 (MONO_TYPE_U8)
641 case MONO_TYPE_FNPTR:
643 case MONO_TYPE_OBJECT:
644 case MONO_TYPE_STRING:
645 case MONO_TYPE_CLASS:
646 case MONO_TYPE_SZARRAY:
647 case MONO_TYPE_ARRAY:
649 case MONO_TYPE_GENERICINST:
650 if (!mono_type_generic_inst_is_valuetype (t))
653 case MONO_TYPE_VALUETYPE:
661 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
666 for (i = 0; i < cfg->num_varinfo; i++) {
667 MonoInst *ins = cfg->varinfo [i];
668 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
671 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
674 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
677 /* we can only allocate 32 bit values */
678 if (is_regsize_var (ins->inst_vtype)) {
679 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
680 g_assert (i == vmv->idx);
681 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
687 #endif /* ifndef DISABLE_JIT */
690 mono_arch_get_global_int_regs (MonoCompile *cfg)
694 if (cfg->frame_reg != ppc_sp)
696 /* ppc_r13 is used by the system on PPC EABI */
697 for (i = 14; i < top; ++i) {
699 * Reserve r29 for holding the vtable address for virtual calls in AOT mode,
700 * since the trampolines can clobber r12.
702 if (!(cfg->compile_aot && i == 29))
703 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
710 * mono_arch_regalloc_cost:
712 * Return the cost, in number of memory references, of the action of
713 * allocating the variable VMV into a register during global register
717 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
724 mono_arch_flush_icache (guint8 *code, gint size)
726 #ifdef MONO_CROSS_COMPILE
729 guint8 *endp, *start;
733 start = (guint8*)((gsize)start & ~(cachelinesize - 1));
734 /* use dcbf for smp support, later optimize for UP, see pem._64bit.d20030611.pdf page 211 */
735 #if defined(G_COMPILER_CODEWARRIOR)
736 if (cpu_hw_caps & PPC_SMP_CAPABLE) {
737 for (p = start; p < endp; p += cachelineinc) {
741 for (p = start; p < endp; p += cachelineinc) {
747 for (p = start; p < endp; p += cachelineinc) {
758 /* For POWER5/6 with ICACHE_SNOOPing only one icbi in the range is required.
759 * The sync is required to insure that the store queue is completely empty.
760 * While the icbi performs no cache operations, icbi/isync is required to
761 * kill local prefetch.
763 if (cpu_hw_caps & PPC_ICACHE_SNOOP) {
765 asm ("icbi 0,%0;" : : "r"(code) : "memory");
769 /* use dcbf for smp support, see pem._64bit.d20030611.pdf page 211 */
770 if (cpu_hw_caps & PPC_SMP_CAPABLE) {
771 for (p = start; p < endp; p += cachelineinc) {
772 asm ("dcbf 0,%0;" : : "r"(p) : "memory");
775 for (p = start; p < endp; p += cachelineinc) {
776 asm ("dcbst 0,%0;" : : "r"(p) : "memory");
781 for (p = start; p < endp; p += cachelineinc) {
782 /* for ISA2.0+ implementations we should not need any extra sync between the
783 * icbi instructions. Both the 2.0 PEM and the PowerISA-2.05 say this.
784 * So I am not sure which chip had this problem but its not an issue on
785 * of the ISA V2 chips.
787 if (cpu_hw_caps & PPC_ISA_2X)
788 asm ("icbi 0,%0;" : : "r"(p) : "memory");
790 asm ("icbi 0,%0; sync;" : : "r"(p) : "memory");
792 if (!(cpu_hw_caps & PPC_ISA_2X))
800 mono_arch_flush_register_windows (void)
805 #define ALWAYS_ON_STACK(s) s
806 #define FP_ALSO_IN_REG(s) s
808 #ifdef __mono_ppc64__
809 #define ALWAYS_ON_STACK(s) s
810 #define FP_ALSO_IN_REG(s) s
812 #define ALWAYS_ON_STACK(s)
813 #define FP_ALSO_IN_REG(s)
815 #define ALIGN_DOUBLES
824 RegTypeFPStructByVal, // For the v2 ABI, floats should be passed in FRs instead of GRs. Only valid for ABI v2!
829 guint32 vtsize; /* in param area */
831 guint8 vtregs; /* number of registers used to pass a RegTypeStructByVal/RegTypeFPStructByVal */
832 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
833 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal/RegTypeFPStructByVal */
834 guint8 bytes : 4; /* size in bytes - only valid for
835 RegTypeStructByVal/RegTypeFPStructByVal if the struct fits
836 in one word, otherwise it's 0*/
845 gboolean vtype_retaddr;
853 #if PPC_RETURN_SMALL_FLOAT_STRUCTS_IN_FR_REGS
855 // Test if a structure is completely composed of either float XOR double fields and has fewer than
856 // PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTER members.
857 // If this is true the structure can be returned directly via float registers instead of by a hidden parameter
858 // pointing to where the return value should be stored.
859 // This is as per the ELF ABI v2.
862 is_float_struct_returnable_via_regs (MonoType *type, int* member_cnt, int* member_size)
864 int local_member_cnt, local_member_size;
866 member_cnt = &local_member_cnt;
869 member_size = &local_member_size;
872 gboolean is_all_floats = mini_type_is_hfa(type, member_cnt, member_size);
873 return is_all_floats && (*member_cnt <= PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTERS);
877 #define is_float_struct_returnable_via_regs(a,b,c) (FALSE)
881 #if PPC_RETURN_SMALL_STRUCTS_IN_REGS
883 // Test if a structure is smaller in size than 2 doublewords (PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS) and is
884 // completely composed of fields all of basic types.
885 // If this is true the structure can be returned directly via registers r3/r4 instead of by a hidden parameter
886 // pointing to where the return value should be stored.
887 // This is as per the ELF ABI v2.
890 is_struct_returnable_via_regs (MonoClass *klass, gboolean is_pinvoke)
892 gboolean has_a_field = FALSE;
895 gpointer iter = NULL;
898 size = mono_type_native_stack_size (&klass->byval_arg, 0);
900 size = mini_type_stack_size (&klass->byval_arg, 0);
903 if (size > PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS)
905 while ((f = mono_class_get_fields (klass, &iter))) {
906 if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
907 // TBD: Is there a better way to check for the basic types?
908 if (f->type->byref) {
910 } else if ((f->type->type >= MONO_TYPE_BOOLEAN) && (f->type->type <= MONO_TYPE_R8)) {
912 } else if (MONO_TYPE_ISSTRUCT (f->type)) {
913 MonoClass *klass = mono_class_from_mono_type (f->type);
914 if (is_struct_returnable_via_regs(klass, is_pinvoke)) {
929 #define is_struct_returnable_via_regs(a,b) (FALSE)
934 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
936 #ifdef __mono_ppc64__
941 if (*gr >= 3 + PPC_NUM_REG_ARGS) {
942 ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size;
943 ainfo->reg = ppc_sp; /* in the caller */
944 ainfo->regtype = RegTypeBase;
945 *stack_size += sizeof (gpointer);
947 ALWAYS_ON_STACK (*stack_size += sizeof (gpointer));
951 if (*gr >= 3 + PPC_NUM_REG_ARGS - 1) {
953 //*stack_size += (*stack_size % 8);
955 ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size;
956 ainfo->reg = ppc_sp; /* in the caller */
957 ainfo->regtype = RegTypeBase;
964 ALWAYS_ON_STACK (*stack_size += 8);
972 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
974 has_only_a_r48_field (MonoClass *klass)
978 gboolean have_field = FALSE;
980 while ((f = mono_class_get_fields (klass, &iter))) {
981 if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
984 if (!f->type->byref && (f->type->type == MONO_TYPE_R4 || f->type->type == MONO_TYPE_R8))
995 get_call_info (MonoMethodSignature *sig)
997 guint i, fr, gr, pstart;
998 int n = sig->hasthis + sig->param_count;
999 MonoType *simpletype;
1000 guint32 stack_size = 0;
1001 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
1002 gboolean is_pinvoke = sig->pinvoke;
1004 fr = PPC_FIRST_FPARG_REG;
1005 gr = PPC_FIRST_ARG_REG;
1007 /* FIXME: handle returning a struct */
1008 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1009 cinfo->vtype_retaddr = TRUE;
1015 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1016 * the first argument, allowing 'this' to be always passed in the first arg reg.
1017 * Also do this if the first argument is a reference type, since virtual calls
1018 * are sometimes made using calli without sig->hasthis set, like in the delegate
1021 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
1023 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1026 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1030 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1031 cinfo->struct_ret = cinfo->ret.reg;
1032 cinfo->vret_arg_index = 1;
1036 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1040 if (cinfo->vtype_retaddr) {
1041 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1042 cinfo->struct_ret = cinfo->ret.reg;
1046 DEBUG(printf("params: %d\n", sig->param_count));
1047 for (i = pstart; i < sig->param_count; ++i) {
1048 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1049 /* Prevent implicit arguments and sig_cookie from
1050 being passed in registers */
1051 gr = PPC_LAST_ARG_REG + 1;
1052 /* FIXME: don't we have to set fr, too? */
1053 /* Emit the signature cookie just before the implicit arguments */
1054 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1056 DEBUG(printf("param %d: ", i));
1057 if (sig->params [i]->byref) {
1058 DEBUG(printf("byref\n"));
1059 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1063 simpletype = mini_get_underlying_type (sig->params [i]);
1064 switch (simpletype->type) {
1065 case MONO_TYPE_BOOLEAN:
1068 cinfo->args [n].size = 1;
1069 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1072 case MONO_TYPE_CHAR:
1075 cinfo->args [n].size = 2;
1076 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1081 cinfo->args [n].size = 4;
1082 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1088 case MONO_TYPE_FNPTR:
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_OBJECT:
1091 case MONO_TYPE_STRING:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1094 cinfo->args [n].size = sizeof (gpointer);
1095 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1098 case MONO_TYPE_GENERICINST:
1099 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1100 cinfo->args [n].size = sizeof (gpointer);
1101 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1106 case MONO_TYPE_VALUETYPE:
1107 case MONO_TYPE_TYPEDBYREF: {
1109 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1110 if (simpletype->type == MONO_TYPE_TYPEDBYREF)
1111 size = sizeof (MonoTypedRef);
1112 else if (is_pinvoke)
1113 size = mono_class_native_size (klass, NULL);
1115 size = mono_class_value_size (klass, NULL);
1117 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
1118 if ((size == 4 || size == 8) && has_only_a_r48_field (klass)) {
1119 cinfo->args [n].size = size;
1121 /* It was 7, now it is 8 in LinuxPPC */
1122 if (fr <= PPC_LAST_FPARG_REG) {
1123 cinfo->args [n].regtype = RegTypeFP;
1124 cinfo->args [n].reg = fr;
1126 FP_ALSO_IN_REG (gr ++);
1127 #if !defined(__mono_ppc64__)
1129 FP_ALSO_IN_REG (gr ++);
1131 ALWAYS_ON_STACK (stack_size += size);
1133 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1134 cinfo->args [n].regtype = RegTypeBase;
1135 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1142 DEBUG(printf ("load %d bytes struct\n",
1143 mono_class_native_size (sig->params [i]->data.klass, NULL)));
1145 #if PPC_PASS_STRUCTS_BY_VALUE
1147 int align_size = size;
1149 int rest = PPC_LAST_ARG_REG - gr + 1;
1152 #if PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS
1155 gboolean is_all_floats = is_float_struct_returnable_via_regs (sig->params [i], &mbr_cnt, &mbr_size);
1157 if (is_all_floats) {
1158 rest = PPC_LAST_FPARG_REG - fr + 1;
1160 // Pass small (<= 8 member) structures entirely made up of either float or double members
1161 // in FR registers. There have to be at least mbr_cnt registers left.
1162 if (is_all_floats &&
1163 (rest >= mbr_cnt)) {
1165 n_in_regs = MIN (rest, nregs);
1166 cinfo->args [n].regtype = RegTypeFPStructByVal;
1167 cinfo->args [n].vtregs = n_in_regs;
1168 cinfo->args [n].size = mbr_size;
1169 cinfo->args [n].vtsize = nregs - n_in_regs;
1170 cinfo->args [n].reg = fr;
1172 if (mbr_size == 4) {
1174 FP_ALSO_IN_REG (gr += (n_in_regs+1)/2);
1177 FP_ALSO_IN_REG (gr += (n_in_regs));
1182 align_size += (sizeof (gpointer) - 1);
1183 align_size &= ~(sizeof (gpointer) - 1);
1184 nregs = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1185 n_in_regs = MIN (rest, nregs);
1189 /* FIXME: check this */
1190 if (size >= 3 && size % 4 != 0)
1193 cinfo->args [n].regtype = RegTypeStructByVal;
1194 cinfo->args [n].vtregs = n_in_regs;
1195 cinfo->args [n].size = n_in_regs;
1196 cinfo->args [n].vtsize = nregs - n_in_regs;
1197 cinfo->args [n].reg = gr;
1201 #ifdef __mono_ppc64__
1202 if (nregs == 1 && is_pinvoke)
1203 cinfo->args [n].bytes = size;
1206 cinfo->args [n].bytes = 0;
1207 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1208 /*g_print ("offset for arg %d at %d\n", n, PPC_STACK_PARAM_OFFSET + stack_size);*/
1209 stack_size += nregs * sizeof (gpointer);
1212 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1213 cinfo->args [n].regtype = RegTypeStructByAddr;
1214 cinfo->args [n].vtsize = size;
1221 cinfo->args [n].size = 8;
1222 add_general (&gr, &stack_size, cinfo->args + n, SIZEOF_REGISTER == 8);
1226 cinfo->args [n].size = 4;
1228 /* It was 7, now it is 8 in LinuxPPC */
1229 if (fr <= PPC_LAST_FPARG_REG
1230 // For non-native vararg calls the parms must go in storage
1231 && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1233 cinfo->args [n].regtype = RegTypeFP;
1234 cinfo->args [n].reg = fr;
1236 FP_ALSO_IN_REG (gr ++);
1237 ALWAYS_ON_STACK (stack_size += SIZEOF_REGISTER);
1239 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size + MONO_PPC_32_64_CASE (0, 4);
1240 cinfo->args [n].regtype = RegTypeBase;
1241 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1242 stack_size += SIZEOF_REGISTER;
1247 cinfo->args [n].size = 8;
1248 /* It was 7, now it is 8 in LinuxPPC */
1249 if (fr <= PPC_LAST_FPARG_REG
1250 // For non-native vararg calls the parms must go in storage
1251 && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1253 cinfo->args [n].regtype = RegTypeFP;
1254 cinfo->args [n].reg = fr;
1256 FP_ALSO_IN_REG (gr += sizeof (double) / SIZEOF_REGISTER);
1257 ALWAYS_ON_STACK (stack_size += 8);
1259 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1260 cinfo->args [n].regtype = RegTypeBase;
1261 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1267 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1272 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1273 /* Prevent implicit arguments and sig_cookie from
1274 being passed in registers */
1275 gr = PPC_LAST_ARG_REG + 1;
1276 /* Emit the signature cookie just before the implicit arguments */
1277 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1281 simpletype = mini_get_underlying_type (sig->ret);
1282 switch (simpletype->type) {
1283 case MONO_TYPE_BOOLEAN:
1288 case MONO_TYPE_CHAR:
1294 case MONO_TYPE_FNPTR:
1295 case MONO_TYPE_CLASS:
1296 case MONO_TYPE_OBJECT:
1297 case MONO_TYPE_SZARRAY:
1298 case MONO_TYPE_ARRAY:
1299 case MONO_TYPE_STRING:
1300 cinfo->ret.reg = ppc_r3;
1304 cinfo->ret.reg = ppc_r3;
1308 cinfo->ret.reg = ppc_f1;
1309 cinfo->ret.regtype = RegTypeFP;
1311 case MONO_TYPE_GENERICINST:
1312 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1313 cinfo->ret.reg = ppc_r3;
1317 case MONO_TYPE_VALUETYPE:
1319 case MONO_TYPE_TYPEDBYREF:
1320 case MONO_TYPE_VOID:
1323 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1327 /* align stack size to 16 */
1328 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1329 stack_size = (stack_size + 15) & ~15;
1331 cinfo->stack_usage = stack_size;
1336 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1342 c1 = get_call_info (caller_sig);
1343 c2 = get_call_info (callee_sig);
1344 res = c1->stack_usage >= c2->stack_usage;
1345 if (callee_sig->ret && MONO_TYPE_ISSTRUCT (callee_sig->ret))
1346 /* An address on the callee's stack is passed as the first argument */
1348 for (i = 0; i < c2->nargs; ++i) {
1349 if (c2->args [i].regtype == RegTypeStructByAddr)
1350 /* An address on the callee's stack is passed as the argument */
1355 if (!mono_debug_count ())
1366 * Set var information according to the calling convention. ppc version.
1367 * The locals var stuff should most likely be split in another method.
1370 mono_arch_allocate_vars (MonoCompile *m)
1372 MonoMethodSignature *sig;
1373 MonoMethodHeader *header;
1375 int i, offset, size, align, curinst;
1376 int frame_reg = ppc_sp;
1378 guint32 locals_stack_size, locals_stack_align;
1380 m->flags |= MONO_CFG_HAS_SPILLUP;
1382 /* allow room for the vararg method args: void* and long/double */
1383 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
1384 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1385 /* this is bug #60332: remove when #59509 is fixed, so no weird vararg
1386 * call convs needs to be handled this way.
1388 if (m->flags & MONO_CFG_HAS_VARARGS)
1389 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1390 /* gtk-sharp and other broken code will dllimport vararg functions even with
1391 * non-varargs signatures. Since there is little hope people will get this right
1392 * we assume they won't.
1394 if (m->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)
1395 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1400 * We use the frame register also for any method that has
1401 * exception clauses. This way, when the handlers are called,
1402 * the code will reference local variables using the frame reg instead of
1403 * the stack pointer: if we had to restore the stack pointer, we'd
1404 * corrupt the method frames that are already on the stack (since
1405 * filters get called before stack unwinding happens) when the filter
1406 * code would call any method (this also applies to finally etc.).
1408 if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1409 frame_reg = ppc_r31;
1410 m->frame_reg = frame_reg;
1411 if (frame_reg != ppc_sp) {
1412 m->used_int_regs |= 1 << frame_reg;
1415 sig = mono_method_signature (m->method);
1419 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1420 m->ret->opcode = OP_REGVAR;
1421 m->ret->inst_c0 = m->ret->dreg = ppc_r3;
1423 /* FIXME: handle long values? */
1424 switch (mini_get_underlying_type (sig->ret)->type) {
1425 case MONO_TYPE_VOID:
1429 m->ret->opcode = OP_REGVAR;
1430 m->ret->inst_c0 = m->ret->dreg = ppc_f1;
1433 m->ret->opcode = OP_REGVAR;
1434 m->ret->inst_c0 = m->ret->dreg = ppc_r3;
1438 /* local vars are at a positive offset from the stack pointer */
1440 * also note that if the function uses alloca, we use ppc_r31
1441 * to point at the local variables.
1443 offset = PPC_MINIMAL_STACK_SIZE; /* linkage area */
1444 /* align the offset to 16 bytes: not sure this is needed here */
1446 //offset &= ~(16 - 1);
1448 /* add parameter area size for called functions */
1449 offset += m->param_area;
1451 offset &= ~(16 - 1);
1453 /* allow room to save the return value */
1454 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
1457 /* the MonoLMF structure is stored just below the stack pointer */
1460 /* this stuff should not be needed on ppc and the new jit,
1461 * because a call on ppc to the handlers doesn't change the
1462 * stack pointer and the jist doesn't manipulate the stack pointer
1463 * for operations involving valuetypes.
1465 /* reserve space to store the esp */
1466 offset += sizeof (gpointer);
1468 /* this is a global constant */
1469 mono_exc_esp_offset = offset;
1472 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1473 offset += sizeof(gpointer) - 1;
1474 offset &= ~(sizeof(gpointer) - 1);
1476 m->vret_addr->opcode = OP_REGOFFSET;
1477 m->vret_addr->inst_basereg = frame_reg;
1478 m->vret_addr->inst_offset = offset;
1480 if (G_UNLIKELY (m->verbose_level > 1)) {
1481 printf ("vret_addr =");
1482 mono_print_ins (m->vret_addr);
1485 offset += sizeof(gpointer);
1488 offsets = mono_allocate_stack_slots (m, FALSE, &locals_stack_size, &locals_stack_align);
1489 if (locals_stack_align) {
1490 offset += (locals_stack_align - 1);
1491 offset &= ~(locals_stack_align - 1);
1493 for (i = m->locals_start; i < m->num_varinfo; i++) {
1494 if (offsets [i] != -1) {
1495 MonoInst *inst = m->varinfo [i];
1496 inst->opcode = OP_REGOFFSET;
1497 inst->inst_basereg = frame_reg;
1498 inst->inst_offset = offset + offsets [i];
1500 g_print ("allocating local %d (%s) to %d\n",
1501 i, mono_type_get_name (inst->inst_vtype), inst->inst_offset);
1505 offset += locals_stack_size;
1509 inst = m->args [curinst];
1510 if (inst->opcode != OP_REGVAR) {
1511 inst->opcode = OP_REGOFFSET;
1512 inst->inst_basereg = frame_reg;
1513 offset += sizeof (gpointer) - 1;
1514 offset &= ~(sizeof (gpointer) - 1);
1515 inst->inst_offset = offset;
1516 offset += sizeof (gpointer);
1521 for (i = 0; i < sig->param_count; ++i) {
1522 inst = m->args [curinst];
1523 if (inst->opcode != OP_REGVAR) {
1524 inst->opcode = OP_REGOFFSET;
1525 inst->inst_basereg = frame_reg;
1527 size = mono_type_native_stack_size (sig->params [i], (guint32*)&align);
1528 inst->backend.is_pinvoke = 1;
1530 size = mono_type_size (sig->params [i], &align);
1532 if (MONO_TYPE_ISSTRUCT (sig->params [i]) && size < sizeof (gpointer))
1533 size = align = sizeof (gpointer);
1535 * Use at least 4/8 byte alignment, since these might be passed in registers, and
1536 * they are saved using std in the prolog.
1538 align = sizeof (gpointer);
1539 offset += align - 1;
1540 offset &= ~(align - 1);
1541 inst->inst_offset = offset;
1547 /* some storage for fp conversions */
1550 m->arch.fp_conv_var_offset = offset;
1553 /* align the offset to 16 bytes */
1555 offset &= ~(16 - 1);
1558 m->stack_offset = offset;
1560 if (sig->call_convention == MONO_CALL_VARARG) {
1561 CallInfo *cinfo = get_call_info (m->method->signature);
1563 m->sig_cookie = cinfo->sig_cookie.offset;
1570 mono_arch_create_vars (MonoCompile *cfg)
1572 MonoMethodSignature *sig = mono_method_signature (cfg->method);
1574 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1575 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1579 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1580 * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
1584 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1586 int sig_reg = mono_alloc_ireg (cfg);
1588 /* FIXME: Add support for signature tokens to AOT */
1589 cfg->disable_aot = TRUE;
1591 MONO_EMIT_NEW_ICONST (cfg, sig_reg, (gulong)call->signature);
1592 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG,
1593 ppc_r1, cinfo->sig_cookie.offset, sig_reg);
1597 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1600 MonoMethodSignature *sig;
1604 sig = call->signature;
1605 n = sig->param_count + sig->hasthis;
1607 cinfo = get_call_info (sig);
1609 for (i = 0; i < n; ++i) {
1610 ArgInfo *ainfo = cinfo->args + i;
1613 if (i >= sig->hasthis)
1614 t = sig->params [i - sig->hasthis];
1616 t = &mono_defaults.int_class->byval_arg;
1617 t = mini_get_underlying_type (t);
1619 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos))
1620 emit_sig_cookie (cfg, call, cinfo);
1622 in = call->args [i];
1624 if (ainfo->regtype == RegTypeGeneral) {
1625 #ifndef __mono_ppc64__
1626 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1627 MONO_INST_NEW (cfg, ins, OP_MOVE);
1628 ins->dreg = mono_alloc_ireg (cfg);
1629 ins->sreg1 = MONO_LVREG_LS (in->dreg);
1630 MONO_ADD_INS (cfg->cbb, ins);
1631 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1633 MONO_INST_NEW (cfg, ins, OP_MOVE);
1634 ins->dreg = mono_alloc_ireg (cfg);
1635 ins->sreg1 = MONO_LVREG_MS (in->dreg);
1636 MONO_ADD_INS (cfg->cbb, ins);
1637 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1641 MONO_INST_NEW (cfg, ins, OP_MOVE);
1642 ins->dreg = mono_alloc_ireg (cfg);
1643 ins->sreg1 = in->dreg;
1644 MONO_ADD_INS (cfg->cbb, ins);
1646 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1648 } else if (ainfo->regtype == RegTypeStructByAddr) {
1649 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1650 ins->opcode = OP_OUTARG_VT;
1651 ins->sreg1 = in->dreg;
1652 ins->klass = in->klass;
1653 ins->inst_p0 = call;
1654 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1655 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1656 MONO_ADD_INS (cfg->cbb, ins);
1657 } else if (ainfo->regtype == RegTypeStructByVal) {
1658 /* this is further handled in mono_arch_emit_outarg_vt () */
1659 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1660 ins->opcode = OP_OUTARG_VT;
1661 ins->sreg1 = in->dreg;
1662 ins->klass = in->klass;
1663 ins->inst_p0 = call;
1664 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1665 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1666 MONO_ADD_INS (cfg->cbb, ins);
1667 } else if (ainfo->regtype == RegTypeFPStructByVal) {
1668 /* this is further handled in mono_arch_emit_outarg_vt () */
1669 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1670 ins->opcode = OP_OUTARG_VT;
1671 ins->sreg1 = in->dreg;
1672 ins->klass = in->klass;
1673 ins->inst_p0 = call;
1674 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1675 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1676 MONO_ADD_INS (cfg->cbb, ins);
1677 cfg->flags |= MONO_CFG_HAS_FPOUT;
1678 } else if (ainfo->regtype == RegTypeBase) {
1679 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1681 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1682 if (t->type == MONO_TYPE_R8)
1683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1689 } else if (ainfo->regtype == RegTypeFP) {
1690 if (t->type == MONO_TYPE_VALUETYPE) {
1691 /* this is further handled in mono_arch_emit_outarg_vt () */
1692 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1693 ins->opcode = OP_OUTARG_VT;
1694 ins->sreg1 = in->dreg;
1695 ins->klass = in->klass;
1696 ins->inst_p0 = call;
1697 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1698 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1699 MONO_ADD_INS (cfg->cbb, ins);
1701 cfg->flags |= MONO_CFG_HAS_FPOUT;
1703 int dreg = mono_alloc_freg (cfg);
1705 if (ainfo->size == 4) {
1706 MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, dreg, in->dreg);
1708 MONO_INST_NEW (cfg, ins, OP_FMOVE);
1710 ins->sreg1 = in->dreg;
1711 MONO_ADD_INS (cfg->cbb, ins);
1714 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
1715 cfg->flags |= MONO_CFG_HAS_FPOUT;
1718 g_assert_not_reached ();
1722 /* Emit the signature cookie in the case that there is no
1723 additional argument */
1724 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1725 emit_sig_cookie (cfg, call, cinfo);
1727 if (cinfo->struct_ret) {
1730 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1731 vtarg->sreg1 = call->vret_var->dreg;
1732 vtarg->dreg = mono_alloc_preg (cfg);
1733 MONO_ADD_INS (cfg->cbb, vtarg);
1735 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->struct_ret, FALSE);
1738 call->stack_usage = cinfo->stack_usage;
1739 cfg->param_area = MAX (PPC_MINIMAL_PARAM_AREA_SIZE, MAX (cfg->param_area, cinfo->stack_usage));
1740 cfg->flags |= MONO_CFG_HAS_CALLS;
1748 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1750 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1751 ArgInfo *ainfo = ins->inst_p1;
1752 int ovf_size = ainfo->vtsize;
1753 int doffset = ainfo->offset;
1754 int i, soffset, dreg;
1756 if (ainfo->regtype == RegTypeStructByVal) {
1763 * Darwin pinvokes needs some special handling for 1
1764 * and 2 byte arguments
1766 g_assert (ins->klass);
1767 if (call->signature->pinvoke)
1768 size = mono_class_native_size (ins->klass, NULL);
1769 if (size == 2 || size == 1) {
1770 int tmpr = mono_alloc_ireg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmpr, src->dreg, soffset);
1774 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmpr, src->dreg, soffset);
1775 dreg = mono_alloc_ireg (cfg);
1776 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, dreg, tmpr);
1777 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, FALSE);
1780 for (i = 0; i < ainfo->vtregs; ++i) {
1781 dreg = mono_alloc_ireg (cfg);
1782 #if G_BYTE_ORDER == G_BIG_ENDIAN
1783 int antipadding = 0;
1786 antipadding = sizeof (gpointer) - ainfo->bytes;
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, dreg, dreg, antipadding * 8);
1792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1794 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1795 soffset += sizeof (gpointer);
1798 mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (gpointer), SIZEOF_VOID_P);
1799 } else if (ainfo->regtype == RegTypeFPStructByVal) {
1801 for (i = 0; i < ainfo->vtregs; ++i) {
1802 int tmpr = mono_alloc_freg (cfg);
1803 if (ainfo->size == 4)
1804 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, soffset);
1806 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, soffset);
1807 dreg = mono_alloc_freg (cfg);
1808 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr);
1809 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg+i, TRUE);
1810 soffset += ainfo->size;
1813 mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (gpointer), SIZEOF_VOID_P);
1814 } else if (ainfo->regtype == RegTypeFP) {
1815 int tmpr = mono_alloc_freg (cfg);
1816 if (ainfo->size == 4)
1817 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, 0);
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, 0);
1820 dreg = mono_alloc_freg (cfg);
1821 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr);
1822 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
1824 MonoInst *vtcopy = mono_compile_create_var (cfg, &src->klass->byval_arg, OP_LOCAL);
1828 /* FIXME: alignment? */
1829 if (call->signature->pinvoke) {
1830 size = mono_type_native_stack_size (&src->klass->byval_arg, NULL);
1831 vtcopy->backend.is_pinvoke = 1;
1833 size = mini_type_stack_size (&src->klass->byval_arg, NULL);
1836 g_assert (ovf_size > 0);
1838 EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype);
1839 mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, SIZEOF_VOID_P);
1842 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, load->dreg);
1844 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE);
1849 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1851 MonoType *ret = mini_get_underlying_type (mono_method_signature (method)->ret);
1853 #ifndef __mono_ppc64__
1854 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1857 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1858 ins->sreg1 = MONO_LVREG_LS (val->dreg);
1859 ins->sreg2 = MONO_LVREG_MS (val->dreg);
1860 MONO_ADD_INS (cfg->cbb, ins);
1864 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1865 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1869 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1872 /* FIXME: this is just a useless hint: fix the interface to include the opcode */
1874 mono_arch_is_inst_imm (gint64 imm)
1879 #endif /* DISABLE_JIT */
1882 * Allow tracing to work with this interface (with an optional argument)
1886 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1890 ppc_load_ptr (code, ppc_r3, cfg->method);
1891 ppc_li (code, ppc_r4, 0); /* NULL ebp for now */
1892 ppc_load_func (code, PPC_CALL_REG, func);
1893 ppc_mtlr (code, PPC_CALL_REG);
1907 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1910 int save_mode = SAVE_NONE;
1912 MonoMethod *method = cfg->method;
1913 int rtype = mini_get_underlying_type (mono_method_signature (method)->ret)->type;
1914 int save_offset = PPC_STACK_PARAM_OFFSET + cfg->param_area;
1918 offset = code - cfg->native_code;
1919 /* we need about 16 instructions */
1920 if (offset > (cfg->code_size - 16 * 4)) {
1921 cfg->code_size *= 2;
1922 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1923 code = cfg->native_code + offset;
1927 case MONO_TYPE_VOID:
1928 /* special case string .ctor icall */
1929 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1930 save_mode = SAVE_ONE;
1932 save_mode = SAVE_NONE;
1934 #ifndef __mono_ppc64__
1937 save_mode = SAVE_TWO;
1942 save_mode = SAVE_FP;
1944 case MONO_TYPE_VALUETYPE:
1945 save_mode = SAVE_STRUCT;
1948 save_mode = SAVE_ONE;
1952 switch (save_mode) {
1954 ppc_stw (code, ppc_r3, save_offset, cfg->frame_reg);
1955 ppc_stw (code, ppc_r4, save_offset + 4, cfg->frame_reg);
1956 if (enable_arguments) {
1957 ppc_mr (code, ppc_r5, ppc_r4);
1958 ppc_mr (code, ppc_r4, ppc_r3);
1962 ppc_stptr (code, ppc_r3, save_offset, cfg->frame_reg);
1963 if (enable_arguments) {
1964 ppc_mr (code, ppc_r4, ppc_r3);
1968 ppc_stfd (code, ppc_f1, save_offset, cfg->frame_reg);
1969 if (enable_arguments) {
1970 /* FIXME: what reg? */
1971 ppc_fmr (code, ppc_f3, ppc_f1);
1972 /* FIXME: use 8 byte load on PPC64 */
1973 ppc_lwz (code, ppc_r4, save_offset, cfg->frame_reg);
1974 ppc_lwz (code, ppc_r5, save_offset + 4, cfg->frame_reg);
1978 if (enable_arguments) {
1979 /* FIXME: get the actual address */
1980 ppc_mr (code, ppc_r4, ppc_r3);
1981 // FIXME: Support the new v2 ABI!
1989 ppc_load_ptr (code, ppc_r3, cfg->method);
1990 ppc_load_func (code, PPC_CALL_REG, func);
1991 ppc_mtlr (code, PPC_CALL_REG);
1994 switch (save_mode) {
1996 ppc_lwz (code, ppc_r3, save_offset, cfg->frame_reg);
1997 ppc_lwz (code, ppc_r4, save_offset + 4, cfg->frame_reg);
2000 ppc_ldptr (code, ppc_r3, save_offset, cfg->frame_reg);
2003 ppc_lfd (code, ppc_f1, save_offset, cfg->frame_reg);
2013 * Conditional branches have a small offset, so if it is likely overflowed,
2014 * we do a branch to the end of the method (uncond branches have much larger
2015 * offsets) where we perform the conditional and jump back unconditionally.
2016 * It's slightly slower, since we add two uncond branches, but it's very simple
2017 * with the current patch implementation and such large methods are likely not
2018 * going to be perf critical anyway.
2023 const char *exception;
2030 #define EMIT_COND_BRANCH_FLAGS(ins,b0,b1) \
2031 if (0 && ins->inst_true_bb->native_offset) { \
2032 ppc_bc (code, (b0), (b1), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffff); \
2034 int br_disp = ins->inst_true_bb->max_offset - offset; \
2035 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
2036 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
2037 ovfj->data.bb = ins->inst_true_bb; \
2038 ovfj->ip_offset = 0; \
2039 ovfj->b0_cond = (b0); \
2040 ovfj->b1_cond = (b1); \
2041 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB_OVF, ovfj); \
2044 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2045 ppc_bc (code, (b0), (b1), 0); \
2049 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_b0_table [(cond)], branch_b1_table [(cond)])
2051 /* emit an exception if condition is fail
2053 * We assign the extra code used to throw the implicit exceptions
2054 * to cfg->bb_exit as far as the big branch handling is concerned
2056 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(b0,b1,exc_name) \
2058 int br_disp = cfg->bb_exit->max_offset - offset; \
2059 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
2060 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
2061 ovfj->data.exception = (exc_name); \
2062 ovfj->ip_offset = code - cfg->native_code; \
2063 ovfj->b0_cond = (b0); \
2064 ovfj->b1_cond = (b1); \
2065 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
2067 cfg->bb_exit->max_offset += 24; \
2069 mono_add_patch_info (cfg, code - cfg->native_code, \
2070 MONO_PATCH_INFO_EXC, exc_name); \
2071 ppc_bcl (code, (b0), (b1), 0); \
2075 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
2078 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2083 normalize_opcode (int opcode)
2086 #ifndef __mono_ilp32__
2087 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMBASE, OP_LOADI8_MEMBASE):
2088 return OP_LOAD_MEMBASE;
2089 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMINDEX, OP_LOADI8_MEMINDEX):
2090 return OP_LOAD_MEMINDEX;
2091 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_REG, OP_STOREI8_MEMBASE_REG):
2092 return OP_STORE_MEMBASE_REG;
2093 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_IMM, OP_STOREI8_MEMBASE_IMM):
2094 return OP_STORE_MEMBASE_IMM;
2095 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMINDEX, OP_STOREI8_MEMINDEX):
2096 return OP_STORE_MEMINDEX;
2098 case MONO_PPC_32_64_CASE (OP_ISHR_IMM, OP_LSHR_IMM):
2100 case MONO_PPC_32_64_CASE (OP_ISHR_UN_IMM, OP_LSHR_UN_IMM):
2101 return OP_SHR_UN_IMM;
2108 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2110 MonoInst *ins, *n, *last_ins = NULL;
2112 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2113 switch (normalize_opcode (ins->opcode)) {
2115 /* remove unnecessary multiplication with 1 */
2116 if (ins->inst_imm == 1) {
2117 if (ins->dreg != ins->sreg1) {
2118 ins->opcode = OP_MOVE;
2120 MONO_DELETE_INS (bb, ins);
2124 int power2 = mono_is_power_of_two (ins->inst_imm);
2126 ins->opcode = OP_SHL_IMM;
2127 ins->inst_imm = power2;
2131 case OP_LOAD_MEMBASE:
2133 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2134 * OP_LOAD_MEMBASE offset(basereg), reg
2136 if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_REG &&
2137 ins->inst_basereg == last_ins->inst_destbasereg &&
2138 ins->inst_offset == last_ins->inst_offset) {
2139 if (ins->dreg == last_ins->sreg1) {
2140 MONO_DELETE_INS (bb, ins);
2143 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2144 ins->opcode = OP_MOVE;
2145 ins->sreg1 = last_ins->sreg1;
2149 * Note: reg1 must be different from the basereg in the second load
2150 * OP_LOAD_MEMBASE offset(basereg), reg1
2151 * OP_LOAD_MEMBASE offset(basereg), reg2
2153 * OP_LOAD_MEMBASE offset(basereg), reg1
2154 * OP_MOVE reg1, reg2
2156 } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_LOAD_MEMBASE &&
2157 ins->inst_basereg != last_ins->dreg &&
2158 ins->inst_basereg == last_ins->inst_basereg &&
2159 ins->inst_offset == last_ins->inst_offset) {
2161 if (ins->dreg == last_ins->dreg) {
2162 MONO_DELETE_INS (bb, ins);
2165 ins->opcode = OP_MOVE;
2166 ins->sreg1 = last_ins->dreg;
2169 //g_assert_not_reached ();
2173 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2174 * OP_LOAD_MEMBASE offset(basereg), reg
2176 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2177 * OP_ICONST reg, imm
2179 } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_IMM &&
2180 ins->inst_basereg == last_ins->inst_destbasereg &&
2181 ins->inst_offset == last_ins->inst_offset) {
2182 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2183 ins->opcode = OP_ICONST;
2184 ins->inst_c0 = last_ins->inst_imm;
2185 g_assert_not_reached (); // check this rule
2189 case OP_LOADU1_MEMBASE:
2190 case OP_LOADI1_MEMBASE:
2191 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2192 ins->inst_basereg == last_ins->inst_destbasereg &&
2193 ins->inst_offset == last_ins->inst_offset) {
2194 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2195 ins->sreg1 = last_ins->sreg1;
2198 case OP_LOADU2_MEMBASE:
2199 case OP_LOADI2_MEMBASE:
2200 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2201 ins->inst_basereg == last_ins->inst_destbasereg &&
2202 ins->inst_offset == last_ins->inst_offset) {
2203 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2204 ins->sreg1 = last_ins->sreg1;
2207 #ifdef __mono_ppc64__
2208 case OP_LOADU4_MEMBASE:
2209 case OP_LOADI4_MEMBASE:
2210 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
2211 ins->inst_basereg == last_ins->inst_destbasereg &&
2212 ins->inst_offset == last_ins->inst_offset) {
2213 ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4;
2214 ins->sreg1 = last_ins->sreg1;
2219 ins->opcode = OP_MOVE;
2223 if (ins->dreg == ins->sreg1) {
2224 MONO_DELETE_INS (bb, ins);
2228 * OP_MOVE sreg, dreg
2229 * OP_MOVE dreg, sreg
2231 if (last_ins && last_ins->opcode == OP_MOVE &&
2232 ins->sreg1 == last_ins->dreg &&
2233 ins->dreg == last_ins->sreg1) {
2234 MONO_DELETE_INS (bb, ins);
2242 bb->last_ins = last_ins;
2246 mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
2248 switch (ins->opcode) {
2249 case OP_ICONV_TO_R_UN: {
2250 // This value is OK as-is for both big and little endian because of how it is stored
2251 static const guint64 adjust_val = 0x4330000000000000ULL;
2252 int msw_reg = mono_alloc_ireg (cfg);
2253 int adj_reg = mono_alloc_freg (cfg);
2254 int tmp_reg = mono_alloc_freg (cfg);
2255 int basereg = ppc_sp;
2257 MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000);
2258 if (!ppc_is_imm16 (offset + 4)) {
2259 basereg = mono_alloc_ireg (cfg);
2260 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2262 #if G_BYTE_ORDER == G_BIG_ENDIAN
2263 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg);
2264 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, ins->sreg1);
2266 // For little endian the words are reversed
2267 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, msw_reg);
2268 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, ins->sreg1);
2270 MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, &adjust_val);
2271 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset);
2272 MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg);
2273 ins->opcode = OP_NOP;
2276 #ifndef __mono_ppc64__
2277 case OP_ICONV_TO_R4:
2278 case OP_ICONV_TO_R8: {
2279 /* If we have a PPC_FEATURE_64 machine we can avoid
2280 this and use the fcfid instruction. Otherwise
2281 on an old 32-bit chip and we have to do this the
2283 if (!(cpu_hw_caps & PPC_ISA_64)) {
2284 /* FIXME: change precision for CEE_CONV_R4 */
2285 static const guint64 adjust_val = 0x4330000080000000ULL;
2286 int msw_reg = mono_alloc_ireg (cfg);
2287 int xored = mono_alloc_ireg (cfg);
2288 int adj_reg = mono_alloc_freg (cfg);
2289 int tmp_reg = mono_alloc_freg (cfg);
2290 int basereg = ppc_sp;
2292 if (!ppc_is_imm16 (offset + 4)) {
2293 basereg = mono_alloc_ireg (cfg);
2294 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2296 MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000);
2297 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg);
2298 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_XOR_IMM, xored, ins->sreg1, 0x80000000);
2299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, xored);
2300 MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, (gpointer)&adjust_val);
2301 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset);
2302 MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg);
2303 if (ins->opcode == OP_ICONV_TO_R4)
2304 MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, ins->dreg, ins->dreg);
2305 ins->opcode = OP_NOP;
2311 int msw_reg = mono_alloc_ireg (cfg);
2312 int basereg = ppc_sp;
2314 if (!ppc_is_imm16 (offset + 4)) {
2315 basereg = mono_alloc_ireg (cfg);
2316 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2318 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, basereg, offset, ins->sreg1);
2319 #if G_BYTE_ORDER == G_BIG_ENDIAN
2320 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset);
2322 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset+4);
2324 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_FINITE, -1, msw_reg);
2325 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
2326 ins->opcode = OP_NOP;
2329 #ifdef __mono_ppc64__
2331 case OP_IADD_OVF_UN:
2333 int shifted1_reg = mono_alloc_ireg (cfg);
2334 int shifted2_reg = mono_alloc_ireg (cfg);
2335 int result_shifted_reg = mono_alloc_ireg (cfg);
2337 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted1_reg, ins->sreg1, 32);
2338 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted2_reg, ins->sreg2, 32);
2339 MONO_EMIT_NEW_BIALU (cfg, ins->opcode, result_shifted_reg, shifted1_reg, shifted2_reg);
2340 if (ins->opcode == OP_IADD_OVF_UN)
2341 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, ins->dreg, result_shifted_reg, 32);
2343 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, ins->dreg, result_shifted_reg, 32);
2344 ins->opcode = OP_NOP;
2354 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
2356 switch (ins->opcode) {
2358 /* ADC sets the condition code */
2359 MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2));
2360 MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2));
2363 case OP_LADD_OVF_UN:
2364 /* ADC sets the condition code */
2365 MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2));
2366 MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_UN_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2));
2370 /* SBB sets the condition code */
2371 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2));
2372 MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2));
2375 case OP_LSUB_OVF_UN:
2376 /* SBB sets the condition code */
2377 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2));
2378 MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_UN_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2));
2382 /* From gcc generated code */
2383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PPC_SUBFIC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0);
2384 MONO_EMIT_NEW_UNALU (cfg, OP_PPC_SUBFZE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
2393 * the branch_b0_table should maintain the order of these
2407 branch_b0_table [] = {
2422 branch_b1_table [] = {
2436 #define NEW_INS(cfg,dest,op) do { \
2437 MONO_INST_NEW((cfg), (dest), (op)); \
2438 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
2442 map_to_reg_reg_op (int op)
2451 case OP_COMPARE_IMM:
2453 case OP_ICOMPARE_IMM:
2455 case OP_LCOMPARE_IMM:
2471 case OP_LOAD_MEMBASE:
2472 return OP_LOAD_MEMINDEX;
2473 case OP_LOADI4_MEMBASE:
2474 return OP_LOADI4_MEMINDEX;
2475 case OP_LOADU4_MEMBASE:
2476 return OP_LOADU4_MEMINDEX;
2477 case OP_LOADI8_MEMBASE:
2478 return OP_LOADI8_MEMINDEX;
2479 case OP_LOADU1_MEMBASE:
2480 return OP_LOADU1_MEMINDEX;
2481 case OP_LOADI2_MEMBASE:
2482 return OP_LOADI2_MEMINDEX;
2483 case OP_LOADU2_MEMBASE:
2484 return OP_LOADU2_MEMINDEX;
2485 case OP_LOADI1_MEMBASE:
2486 return OP_LOADI1_MEMINDEX;
2487 case OP_LOADR4_MEMBASE:
2488 return OP_LOADR4_MEMINDEX;
2489 case OP_LOADR8_MEMBASE:
2490 return OP_LOADR8_MEMINDEX;
2491 case OP_STOREI1_MEMBASE_REG:
2492 return OP_STOREI1_MEMINDEX;
2493 case OP_STOREI2_MEMBASE_REG:
2494 return OP_STOREI2_MEMINDEX;
2495 case OP_STOREI4_MEMBASE_REG:
2496 return OP_STOREI4_MEMINDEX;
2497 case OP_STOREI8_MEMBASE_REG:
2498 return OP_STOREI8_MEMINDEX;
2499 case OP_STORE_MEMBASE_REG:
2500 return OP_STORE_MEMINDEX;
2501 case OP_STORER4_MEMBASE_REG:
2502 return OP_STORER4_MEMINDEX;
2503 case OP_STORER8_MEMBASE_REG:
2504 return OP_STORER8_MEMINDEX;
2505 case OP_STORE_MEMBASE_IMM:
2506 return OP_STORE_MEMBASE_REG;
2507 case OP_STOREI1_MEMBASE_IMM:
2508 return OP_STOREI1_MEMBASE_REG;
2509 case OP_STOREI2_MEMBASE_IMM:
2510 return OP_STOREI2_MEMBASE_REG;
2511 case OP_STOREI4_MEMBASE_IMM:
2512 return OP_STOREI4_MEMBASE_REG;
2513 case OP_STOREI8_MEMBASE_IMM:
2514 return OP_STOREI8_MEMBASE_REG;
2516 if (mono_op_imm_to_op (op) == -1)
2517 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (op));
2518 return mono_op_imm_to_op (op);
2521 //#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op))
2523 #define compare_opcode_is_unsigned(opcode) \
2524 (((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \
2525 ((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \
2526 ((opcode) >= OP_LBNE_UN && (opcode) <= OP_LBLT_UN) || \
2527 ((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \
2528 ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \
2529 ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || \
2530 (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN || \
2531 (opcode) == OP_LCLT_UN || (opcode) == OP_LCGT_UN))
2534 * Remove from the instruction list the instructions that can't be
2535 * represented with very simple instructions with no register
2539 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2541 MonoInst *ins, *next, *temp, *last_ins = NULL;
2544 MONO_BB_FOR_EACH_INS (bb, ins) {
2546 switch (ins->opcode) {
2547 case OP_IDIV_UN_IMM:
2550 case OP_IREM_UN_IMM:
2551 CASE_PPC64 (OP_LREM_IMM) {
2552 NEW_INS (cfg, temp, OP_ICONST);
2553 temp->inst_c0 = ins->inst_imm;
2554 temp->dreg = mono_alloc_ireg (cfg);
2555 ins->sreg2 = temp->dreg;
2556 if (ins->opcode == OP_IDIV_IMM)
2557 ins->opcode = OP_IDIV;
2558 else if (ins->opcode == OP_IREM_IMM)
2559 ins->opcode = OP_IREM;
2560 else if (ins->opcode == OP_IDIV_UN_IMM)
2561 ins->opcode = OP_IDIV_UN;
2562 else if (ins->opcode == OP_IREM_UN_IMM)
2563 ins->opcode = OP_IREM_UN;
2564 else if (ins->opcode == OP_LREM_IMM)
2565 ins->opcode = OP_LREM;
2567 /* handle rem separately */
2572 CASE_PPC64 (OP_LREM)
2573 CASE_PPC64 (OP_LREM_UN) {
2575 /* we change a rem dest, src1, src2 to
2576 * div temp1, src1, src2
2577 * mul temp2, temp1, src2
2578 * sub dest, src1, temp2
2580 if (ins->opcode == OP_IREM || ins->opcode == OP_IREM_UN) {
2581 NEW_INS (cfg, mul, OP_IMUL);
2582 NEW_INS (cfg, temp, ins->opcode == OP_IREM? OP_IDIV: OP_IDIV_UN);
2583 ins->opcode = OP_ISUB;
2585 NEW_INS (cfg, mul, OP_LMUL);
2586 NEW_INS (cfg, temp, ins->opcode == OP_LREM? OP_LDIV: OP_LDIV_UN);
2587 ins->opcode = OP_LSUB;
2589 temp->sreg1 = ins->sreg1;
2590 temp->sreg2 = ins->sreg2;
2591 temp->dreg = mono_alloc_ireg (cfg);
2592 mul->sreg1 = temp->dreg;
2593 mul->sreg2 = ins->sreg2;
2594 mul->dreg = mono_alloc_ireg (cfg);
2595 ins->sreg2 = mul->dreg;
2599 CASE_PPC64 (OP_LADD_IMM)
2602 if (!ppc_is_imm16 (ins->inst_imm)) {
2603 NEW_INS (cfg, temp, OP_ICONST);
2604 temp->inst_c0 = ins->inst_imm;
2605 temp->dreg = mono_alloc_ireg (cfg);
2606 ins->sreg2 = temp->dreg;
2607 ins->opcode = map_to_reg_reg_op (ins->opcode);
2611 CASE_PPC64 (OP_LSUB_IMM)
2613 if (!ppc_is_imm16 (-ins->inst_imm)) {
2614 NEW_INS (cfg, temp, OP_ICONST);
2615 temp->inst_c0 = ins->inst_imm;
2616 temp->dreg = mono_alloc_ireg (cfg);
2617 ins->sreg2 = temp->dreg;
2618 ins->opcode = map_to_reg_reg_op (ins->opcode);
2630 gboolean is_imm = ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff));
2631 #ifdef __mono_ppc64__
2632 if (ins->inst_imm & 0xffffffff00000000ULL)
2636 NEW_INS (cfg, temp, OP_ICONST);
2637 temp->inst_c0 = ins->inst_imm;
2638 temp->dreg = mono_alloc_ireg (cfg);
2639 ins->sreg2 = temp->dreg;
2640 ins->opcode = map_to_reg_reg_op (ins->opcode);
2649 NEW_INS (cfg, temp, OP_ICONST);
2650 temp->inst_c0 = ins->inst_imm;
2651 temp->dreg = mono_alloc_ireg (cfg);
2652 ins->sreg2 = temp->dreg;
2653 ins->opcode = map_to_reg_reg_op (ins->opcode);
2655 case OP_COMPARE_IMM:
2656 case OP_ICOMPARE_IMM:
2657 CASE_PPC64 (OP_LCOMPARE_IMM)
2659 /* Branch opts can eliminate the branch */
2660 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
2661 ins->opcode = OP_NOP;
2665 if (compare_opcode_is_unsigned (next->opcode)) {
2666 if (!ppc_is_uimm16 (ins->inst_imm)) {
2667 NEW_INS (cfg, temp, OP_ICONST);
2668 temp->inst_c0 = ins->inst_imm;
2669 temp->dreg = mono_alloc_ireg (cfg);
2670 ins->sreg2 = temp->dreg;
2671 ins->opcode = map_to_reg_reg_op (ins->opcode);
2674 if (!ppc_is_imm16 (ins->inst_imm)) {
2675 NEW_INS (cfg, temp, OP_ICONST);
2676 temp->inst_c0 = ins->inst_imm;
2677 temp->dreg = mono_alloc_ireg (cfg);
2678 ins->sreg2 = temp->dreg;
2679 ins->opcode = map_to_reg_reg_op (ins->opcode);
2685 if (ins->inst_imm == 1) {
2686 ins->opcode = OP_MOVE;
2689 if (ins->inst_imm == 0) {
2690 ins->opcode = OP_ICONST;
2694 imm = mono_is_power_of_two (ins->inst_imm);
2696 ins->opcode = OP_SHL_IMM;
2697 ins->inst_imm = imm;
2700 if (!ppc_is_imm16 (ins->inst_imm)) {
2701 NEW_INS (cfg, temp, OP_ICONST);
2702 temp->inst_c0 = ins->inst_imm;
2703 temp->dreg = mono_alloc_ireg (cfg);
2704 ins->sreg2 = temp->dreg;
2705 ins->opcode = map_to_reg_reg_op (ins->opcode);
2708 case OP_LOCALLOC_IMM:
2709 NEW_INS (cfg, temp, OP_ICONST);
2710 temp->inst_c0 = ins->inst_imm;
2711 temp->dreg = mono_alloc_ireg (cfg);
2712 ins->sreg1 = temp->dreg;
2713 ins->opcode = OP_LOCALLOC;
2715 case OP_LOAD_MEMBASE:
2716 case OP_LOADI4_MEMBASE:
2717 CASE_PPC64 (OP_LOADI8_MEMBASE)
2718 case OP_LOADU4_MEMBASE:
2719 case OP_LOADI2_MEMBASE:
2720 case OP_LOADU2_MEMBASE:
2721 case OP_LOADI1_MEMBASE:
2722 case OP_LOADU1_MEMBASE:
2723 case OP_LOADR4_MEMBASE:
2724 case OP_LOADR8_MEMBASE:
2725 case OP_STORE_MEMBASE_REG:
2726 CASE_PPC64 (OP_STOREI8_MEMBASE_REG)
2727 case OP_STOREI4_MEMBASE_REG:
2728 case OP_STOREI2_MEMBASE_REG:
2729 case OP_STOREI1_MEMBASE_REG:
2730 case OP_STORER4_MEMBASE_REG:
2731 case OP_STORER8_MEMBASE_REG:
2732 /* we can do two things: load the immed in a register
2733 * and use an indexed load, or see if the immed can be
2734 * represented as an ad_imm + a load with a smaller offset
2735 * that fits. We just do the first for now, optimize later.
2737 if (ppc_is_imm16 (ins->inst_offset))
2739 NEW_INS (cfg, temp, OP_ICONST);
2740 temp->inst_c0 = ins->inst_offset;
2741 temp->dreg = mono_alloc_ireg (cfg);
2742 ins->sreg2 = temp->dreg;
2743 ins->opcode = map_to_reg_reg_op (ins->opcode);
2745 case OP_STORE_MEMBASE_IMM:
2746 case OP_STOREI1_MEMBASE_IMM:
2747 case OP_STOREI2_MEMBASE_IMM:
2748 case OP_STOREI4_MEMBASE_IMM:
2749 CASE_PPC64 (OP_STOREI8_MEMBASE_IMM)
2750 NEW_INS (cfg, temp, OP_ICONST);
2751 temp->inst_c0 = ins->inst_imm;
2752 temp->dreg = mono_alloc_ireg (cfg);
2753 ins->sreg1 = temp->dreg;
2754 ins->opcode = map_to_reg_reg_op (ins->opcode);
2756 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2759 if (cfg->compile_aot) {
2760 /* Keep these in the aot case */
2763 NEW_INS (cfg, temp, OP_ICONST);
2764 temp->inst_c0 = (gulong)ins->inst_p0;
2765 temp->dreg = mono_alloc_ireg (cfg);
2766 ins->inst_basereg = temp->dreg;
2767 ins->inst_offset = 0;
2768 ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE;
2770 /* make it handle the possibly big ins->inst_offset
2771 * later optimize to use lis + load_membase
2777 bb->last_ins = last_ins;
2778 bb->max_vreg = cfg->next_vreg;
2782 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2784 long offset = cfg->arch.fp_conv_var_offset;
2786 /* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */
2787 #ifdef __mono_ppc64__
2789 ppc_fctidz (code, ppc_f0, sreg);
2794 ppc_fctiwz (code, ppc_f0, sreg);
2797 if (ppc_is_imm16 (offset + sub_offset)) {
2798 ppc_stfd (code, ppc_f0, offset, cfg->frame_reg);
2800 ppc_ldr (code, dreg, offset + sub_offset, cfg->frame_reg);
2802 ppc_lwz (code, dreg, offset + sub_offset, cfg->frame_reg);
2804 ppc_load (code, dreg, offset);
2805 ppc_add (code, dreg, dreg, cfg->frame_reg);
2806 ppc_stfd (code, ppc_f0, 0, dreg);
2808 ppc_ldr (code, dreg, sub_offset, dreg);
2810 ppc_lwz (code, dreg, sub_offset, dreg);
2814 ppc_andid (code, dreg, dreg, 0xff);
2816 ppc_andid (code, dreg, dreg, 0xffff);
2817 #ifdef __mono_ppc64__
2819 ppc_clrldi (code, dreg, dreg, 32);
2823 ppc_extsb (code, dreg, dreg);
2825 ppc_extsh (code, dreg, dreg);
2826 #ifdef __mono_ppc64__
2828 ppc_extsw (code, dreg, dreg);
2836 const guchar *target;
2841 #define is_call_imm(diff) ((glong)(diff) >= -33554432 && (glong)(diff) <= 33554431)
2844 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2845 #ifdef __mono_ppc64__
2846 g_assert_not_reached ();
2848 PatchData *pdata = (PatchData*)user_data;
2849 guchar *code = data;
2850 guint32 *thunks = data;
2851 guint32 *endthunks = (guint32*)(code + bsize);
2855 int difflow, diffhigh;
2857 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2858 difflow = (char*)pdata->code - (char*)thunks;
2859 diffhigh = (char*)pdata->code - (char*)endthunks;
2860 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2863 templ = (guchar*)load;
2864 ppc_load_sequence (templ, ppc_r0, pdata->target);
2866 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2867 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2868 while (thunks < endthunks) {
2869 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2870 if ((thunks [0] == load [0]) && (thunks [1] == load [1])) {
2871 ppc_patch (pdata->code, (guchar*)thunks);
2874 static int num_thunks = 0;
2876 if ((num_thunks % 20) == 0)
2877 g_print ("num_thunks lookup: %d\n", num_thunks);
2880 } else if ((thunks [0] == 0) && (thunks [1] == 0)) {
2881 /* found a free slot instead: emit thunk */
2882 code = (guchar*)thunks;
2883 ppc_lis (code, ppc_r0, (gulong)(pdata->target) >> 16);
2884 ppc_ori (code, ppc_r0, ppc_r0, (gulong)(pdata->target) & 0xffff);
2885 ppc_mtctr (code, ppc_r0);
2886 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
2887 mono_arch_flush_icache ((guchar*)thunks, 16);
2889 ppc_patch (pdata->code, (guchar*)thunks);
2892 static int num_thunks = 0;
2894 if ((num_thunks % 20) == 0)
2895 g_print ("num_thunks: %d\n", num_thunks);
2899 /* skip 16 bytes, the size of the thunk */
2903 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2910 handle_thunk (int absolute, guchar *code, const guchar *target) {
2911 MonoDomain *domain = mono_domain_get ();
2915 pdata.target = target;
2916 pdata.absolute = absolute;
2919 mono_domain_lock (domain);
2920 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2923 /* this uses the first available slot */
2925 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2927 mono_domain_unlock (domain);
2929 if (pdata.found != 1)
2930 g_print ("thunk failed for %p from %p\n", target, code);
2931 g_assert (pdata.found == 1);
2935 patch_ins (guint8 *code, guint32 ins)
2937 *(guint32*)code = ins;
2938 mono_arch_flush_icache (code, 4);
2942 ppc_patch_full (guchar *code, const guchar *target, gboolean is_fd)
2944 guint32 ins = *(guint32*)code;
2945 guint32 prim = ins >> 26;
2948 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2950 // prefer relative branches, they are more position independent (e.g. for AOT compilation).
2951 gint diff = target - code;
2954 if (diff <= 33554431){
2955 ins = (18 << 26) | (diff) | (ins & 1);
2956 patch_ins (code, ins);
2960 /* diff between 0 and -33554432 */
2961 if (diff >= -33554432){
2962 ins = (18 << 26) | (diff & ~0xfc000000) | (ins & 1);
2963 patch_ins (code, ins);
2968 if ((glong)target >= 0){
2969 if ((glong)target <= 33554431){
2970 ins = (18 << 26) | ((gulong) target) | (ins & 1) | 2;
2971 patch_ins (code, ins);
2975 if ((glong)target >= -33554432){
2976 ins = (18 << 26) | (((gulong)target) & ~0xfc000000) | (ins & 1) | 2;
2977 patch_ins (code, ins);
2982 handle_thunk (TRUE, code, target);
2985 g_assert_not_reached ();
2993 guint32 li = (gulong)target;
2994 ins = (ins & 0xffff0000) | (ins & 3);
2995 ovf = li & 0xffff0000;
2996 if (ovf != 0 && ovf != 0xffff0000)
2997 g_assert_not_reached ();
3000 // FIXME: assert the top bits of li are 0
3002 gint diff = target - code;
3003 ins = (ins & 0xffff0000) | (ins & 3);
3004 ovf = diff & 0xffff0000;
3005 if (ovf != 0 && ovf != 0xffff0000)
3006 g_assert_not_reached ();
3010 patch_ins (code, ins);
3014 if (prim == 15 || ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
3015 #ifdef __mono_ppc64__
3016 guint32 *seq = (guint32*)code;
3017 guint32 *branch_ins;
3019 /* the trampoline code will try to patch the blrl, blr, bcctr */
3020 if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
3022 if (ppc_is_load_op (seq [-3]) || ppc_opcode (seq [-3]) == 31) /* ld || lwz || mr */
3027 if (ppc_is_load_op (seq [5])
3028 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3029 /* With function descs we need to do more careful
3031 || ppc_opcode (seq [5]) == 31 /* ld || lwz || mr */
3034 branch_ins = seq + 8;
3036 branch_ins = seq + 6;
3039 seq = (guint32*)code;
3040 /* this is the lis/ori/sldi/oris/ori/(ld/ld|mr/nop)/mtlr/blrl sequence */
3041 g_assert (mono_ppc_is_direct_call_sequence (branch_ins));
3043 if (ppc_is_load_op (seq [5])) {
3044 g_assert (ppc_is_load_op (seq [6]));
3047 guint8 *buf = (guint8*)&seq [5];
3048 ppc_mr (buf, PPC_CALL_REG, ppc_r12);
3053 target = mono_get_addr_from_ftnptr ((gpointer)target);
3056 /* FIXME: make this thread safe */
3057 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3058 /* FIXME: we're assuming we're using r12 here */
3059 ppc_load_ptr_sequence (code, ppc_r12, target);
3061 ppc_load_ptr_sequence (code, PPC_CALL_REG, target);
3063 mono_arch_flush_icache ((guint8*)seq, 28);
3066 /* the trampoline code will try to patch the blrl, blr, bcctr */
3067 if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
3070 /* this is the lis/ori/mtlr/blrl sequence */
3071 seq = (guint32*)code;
3072 g_assert ((seq [0] >> 26) == 15);
3073 g_assert ((seq [1] >> 26) == 24);
3074 g_assert ((seq [2] >> 26) == 31);
3075 g_assert (seq [3] == 0x4e800021 || seq [3] == 0x4e800020 || seq [3] == 0x4e800420);
3076 /* FIXME: make this thread safe */
3077 ppc_lis (code, PPC_CALL_REG, (guint32)(target) >> 16);
3078 ppc_ori (code, PPC_CALL_REG, PPC_CALL_REG, (guint32)(target) & 0xffff);
3079 mono_arch_flush_icache (code - 8, 8);
3082 g_assert_not_reached ();
3084 // g_print ("patched with 0x%08x\n", ins);
3088 ppc_patch (guchar *code, const guchar *target)
3090 ppc_patch_full (code, target, FALSE);
3094 mono_ppc_patch (guchar *code, const guchar *target)
3096 ppc_patch (code, target);
3100 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
3102 switch (ins->opcode) {
3105 case OP_FCALL_MEMBASE:
3106 if (ins->dreg != ppc_f1)
3107 ppc_fmr (code, ins->dreg, ppc_f1);
3115 ins_native_length (MonoCompile *cfg, MonoInst *ins)
3117 return ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3121 emit_reserve_param_area (MonoCompile *cfg, guint8 *code)
3123 long size = cfg->param_area;
3125 size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3126 size &= -MONO_ARCH_FRAME_ALIGNMENT;
3131 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
3132 if (ppc_is_imm16 (-size)) {
3133 ppc_stptr_update (code, ppc_r0, -size, ppc_sp);
3135 ppc_load (code, ppc_r12, -size);
3136 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
3143 emit_unreserve_param_area (MonoCompile *cfg, guint8 *code)
3145 long size = cfg->param_area;
3147 size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3148 size &= -MONO_ARCH_FRAME_ALIGNMENT;
3153 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
3154 if (ppc_is_imm16 (size)) {
3155 ppc_stptr_update (code, ppc_r0, size, ppc_sp);
3157 ppc_load (code, ppc_r12, size);
3158 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
3164 #define MASK_SHIFT_IMM(i) ((i) & MONO_PPC_32_64_CASE (0x1f, 0x3f))
3168 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3170 MonoInst *ins, *next;
3173 guint8 *code = cfg->native_code + cfg->code_len;
3174 MonoInst *last_ins = NULL;
3175 guint last_offset = 0;
3179 /* we don't align basic blocks of loops on ppc */
3181 if (cfg->verbose_level > 2)
3182 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3184 cpos = bb->max_offset;
3186 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3187 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3188 //g_assert (!mono_compile_aot);
3191 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3192 /* this is not thread save, but good enough */
3193 /* fixme: howto handle overflows? */
3194 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3197 MONO_BB_FOR_EACH_INS (bb, ins) {
3198 offset = code - cfg->native_code;
3200 max_len = ins_native_length (cfg, ins);
3202 if (offset > (cfg->code_size - max_len - 16)) {
3203 cfg->code_size *= 2;
3204 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3205 code = cfg->native_code + offset;
3207 // if (ins->cil_code)
3208 // g_print ("cil code\n");
3209 mono_debug_record_line_number (cfg, ins, offset);
3211 switch (normalize_opcode (ins->opcode)) {
3212 case OP_RELAXED_NOP:
3215 case OP_DUMMY_STORE:
3216 case OP_NOT_REACHED:
3219 case OP_IL_SEQ_POINT:
3220 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3222 case OP_SEQ_POINT: {
3225 if (cfg->compile_aot)
3229 * Read from the single stepping trigger page. This will cause a
3230 * SIGSEGV when single stepping is enabled.
3231 * We do this _before_ the breakpoint, so single stepping after
3232 * a breakpoint is hit will step to the next IL offset.
3234 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3235 ppc_load (code, ppc_r12, (gsize)ss_trigger_page);
3236 ppc_ldptr (code, ppc_r12, 0, ppc_r12);
3239 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3242 * A placeholder for a possible breakpoint inserted by
3243 * mono_arch_set_breakpoint ().
3245 for (i = 0; i < BREAKPOINT_SIZE / 4; ++i)
3250 ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2);
3251 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3252 ppc_mr (code, ppc_r4, ppc_r0);
3255 ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2);
3256 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3257 ppc_mr (code, ppc_r4, ppc_r0);
3259 case OP_MEMORY_BARRIER:
3262 case OP_STOREI1_MEMBASE_REG:
3263 if (ppc_is_imm16 (ins->inst_offset)) {
3264 ppc_stb (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3266 if (ppc_is_imm32 (ins->inst_offset)) {
3267 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3268 ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r11);
3270 ppc_load (code, ppc_r0, ins->inst_offset);
3271 ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3275 case OP_STOREI2_MEMBASE_REG:
3276 if (ppc_is_imm16 (ins->inst_offset)) {
3277 ppc_sth (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3279 if (ppc_is_imm32 (ins->inst_offset)) {
3280 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3281 ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r11);
3283 ppc_load (code, ppc_r0, ins->inst_offset);
3284 ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3288 case OP_STORE_MEMBASE_REG:
3289 if (ppc_is_imm16 (ins->inst_offset)) {
3290 ppc_stptr (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3292 if (ppc_is_imm32 (ins->inst_offset)) {
3293 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3294 ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r11);
3296 ppc_load (code, ppc_r0, ins->inst_offset);
3297 ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3301 #ifdef __mono_ilp32__
3302 case OP_STOREI8_MEMBASE_REG:
3303 if (ppc_is_imm16 (ins->inst_offset)) {
3304 ppc_str (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3306 ppc_load (code, ppc_r0, ins->inst_offset);
3307 ppc_str_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3311 case OP_STOREI1_MEMINDEX:
3312 ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3314 case OP_STOREI2_MEMINDEX:
3315 ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3317 case OP_STORE_MEMINDEX:
3318 ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3321 g_assert_not_reached ();
3323 case OP_LOAD_MEMBASE:
3324 if (ppc_is_imm16 (ins->inst_offset)) {
3325 ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3327 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3328 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3329 ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->dreg);
3331 ppc_load (code, ppc_r0, ins->inst_offset);
3332 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3336 case OP_LOADI4_MEMBASE:
3337 #ifdef __mono_ppc64__
3338 if (ppc_is_imm16 (ins->inst_offset)) {
3339 ppc_lwa (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3341 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3342 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3343 ppc_lwa (code, ins->dreg, ins->inst_offset, ins->dreg);
3345 ppc_load (code, ppc_r0, ins->inst_offset);
3346 ppc_lwax (code, ins->dreg, ins->inst_basereg, ppc_r0);
3351 case OP_LOADU4_MEMBASE:
3352 if (ppc_is_imm16 (ins->inst_offset)) {
3353 ppc_lwz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3355 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3356 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3357 ppc_lwz (code, ins->dreg, ins->inst_offset, ins->dreg);
3359 ppc_load (code, ppc_r0, ins->inst_offset);
3360 ppc_lwzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3364 case OP_LOADI1_MEMBASE:
3365 case OP_LOADU1_MEMBASE:
3366 if (ppc_is_imm16 (ins->inst_offset)) {
3367 ppc_lbz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3369 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3370 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3371 ppc_lbz (code, ins->dreg, ins->inst_offset, ins->dreg);
3373 ppc_load (code, ppc_r0, ins->inst_offset);
3374 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3377 if (ins->opcode == OP_LOADI1_MEMBASE)
3378 ppc_extsb (code, ins->dreg, ins->dreg);
3380 case OP_LOADU2_MEMBASE:
3381 if (ppc_is_imm16 (ins->inst_offset)) {
3382 ppc_lhz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3384 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3385 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3386 ppc_lhz (code, ins->dreg, ins->inst_offset, ins->dreg);
3388 ppc_load (code, ppc_r0, ins->inst_offset);
3389 ppc_lhzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3393 case OP_LOADI2_MEMBASE:
3394 if (ppc_is_imm16 (ins->inst_offset)) {
3395 ppc_lha (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3397 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3398 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3399 ppc_lha (code, ins->dreg, ins->inst_offset, ins->dreg);
3401 ppc_load (code, ppc_r0, ins->inst_offset);
3402 ppc_lhax (code, ins->dreg, ins->inst_basereg, ppc_r0);
3406 #ifdef __mono_ilp32__
3407 case OP_LOADI8_MEMBASE:
3408 if (ppc_is_imm16 (ins->inst_offset)) {
3409 ppc_ldr (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3411 ppc_load (code, ppc_r0, ins->inst_offset);
3412 ppc_ldr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3416 case OP_LOAD_MEMINDEX:
3417 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3419 case OP_LOADI4_MEMINDEX:
3420 #ifdef __mono_ppc64__
3421 ppc_lwax (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3424 case OP_LOADU4_MEMINDEX:
3425 ppc_lwzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3427 case OP_LOADU2_MEMINDEX:
3428 ppc_lhzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3430 case OP_LOADI2_MEMINDEX:
3431 ppc_lhax (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3433 case OP_LOADU1_MEMINDEX:
3434 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3436 case OP_LOADI1_MEMINDEX:
3437 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3438 ppc_extsb (code, ins->dreg, ins->dreg);
3440 case OP_ICONV_TO_I1:
3441 CASE_PPC64 (OP_LCONV_TO_I1)
3442 ppc_extsb (code, ins->dreg, ins->sreg1);
3444 case OP_ICONV_TO_I2:
3445 CASE_PPC64 (OP_LCONV_TO_I2)
3446 ppc_extsh (code, ins->dreg, ins->sreg1);
3448 case OP_ICONV_TO_U1:
3449 CASE_PPC64 (OP_LCONV_TO_U1)
3450 ppc_clrlwi (code, ins->dreg, ins->sreg1, 24);
3452 case OP_ICONV_TO_U2:
3453 CASE_PPC64 (OP_LCONV_TO_U2)
3454 ppc_clrlwi (code, ins->dreg, ins->sreg1, 16);
3458 CASE_PPC64 (OP_LCOMPARE)
3459 L = (sizeof (mgreg_t) == 4 || ins->opcode == OP_ICOMPARE) ? 0 : 1;
3461 if (next && compare_opcode_is_unsigned (next->opcode))
3462 ppc_cmpl (code, 0, L, ins->sreg1, ins->sreg2);
3464 ppc_cmp (code, 0, L, ins->sreg1, ins->sreg2);
3466 case OP_COMPARE_IMM:
3467 case OP_ICOMPARE_IMM:
3468 CASE_PPC64 (OP_LCOMPARE_IMM)
3469 L = (sizeof (mgreg_t) == 4 || ins->opcode == OP_ICOMPARE_IMM) ? 0 : 1;
3471 if (next && compare_opcode_is_unsigned (next->opcode)) {
3472 if (ppc_is_uimm16 (ins->inst_imm)) {
3473 ppc_cmpli (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff));
3475 g_assert_not_reached ();
3478 if (ppc_is_imm16 (ins->inst_imm)) {
3479 ppc_cmpi (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff));
3481 g_assert_not_reached ();
3487 * gdb does not like encountering a trap in the debugged code. So
3488 * instead of emitting a trap, we emit a call a C function and place a
3492 ppc_mr (code, ppc_r3, ins->sreg1);
3493 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3494 (gpointer)"mono_break");
3495 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
3496 ppc_load_func (code, PPC_CALL_REG, 0);
3497 ppc_mtlr (code, PPC_CALL_REG);
3505 ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2);
3508 CASE_PPC64 (OP_LADD)
3509 ppc_add (code, ins->dreg, ins->sreg1, ins->sreg2);
3513 ppc_adde (code, ins->dreg, ins->sreg1, ins->sreg2);
3516 if (ppc_is_imm16 (ins->inst_imm)) {
3517 ppc_addic (code, ins->dreg, ins->sreg1, ins->inst_imm);
3519 g_assert_not_reached ();
3524 CASE_PPC64 (OP_LADD_IMM)
3525 if (ppc_is_imm16 (ins->inst_imm)) {
3526 ppc_addi (code, ins->dreg, ins->sreg1, ins->inst_imm);
3528 g_assert_not_reached ();
3532 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3534 ppc_addo (code, ins->dreg, ins->sreg1, ins->sreg2);
3535 ppc_mfspr (code, ppc_r0, ppc_xer);
3536 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3537 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3539 case OP_IADD_OVF_UN:
3540 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3542 ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2);
3543 ppc_mfspr (code, ppc_r0, ppc_xer);
3544 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3545 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3548 CASE_PPC64 (OP_LSUB_OVF)
3549 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3551 ppc_subfo (code, ins->dreg, ins->sreg2, ins->sreg1);
3552 ppc_mfspr (code, ppc_r0, ppc_xer);
3553 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3554 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3556 case OP_ISUB_OVF_UN:
3557 CASE_PPC64 (OP_LSUB_OVF_UN)
3558 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3560 ppc_subfc (code, ins->dreg, ins->sreg2, ins->sreg1);
3561 ppc_mfspr (code, ppc_r0, ppc_xer);
3562 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3563 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3565 case OP_ADD_OVF_CARRY:
3566 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3568 ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2);
3569 ppc_mfspr (code, ppc_r0, ppc_xer);
3570 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3571 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3573 case OP_ADD_OVF_UN_CARRY:
3574 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3576 ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2);
3577 ppc_mfspr (code, ppc_r0, ppc_xer);
3578 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3579 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3581 case OP_SUB_OVF_CARRY:
3582 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3584 ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1);
3585 ppc_mfspr (code, ppc_r0, ppc_xer);
3586 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3587 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3589 case OP_SUB_OVF_UN_CARRY:
3590 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3592 ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1);
3593 ppc_mfspr (code, ppc_r0, ppc_xer);
3594 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3595 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3599 ppc_subfco (code, ins->dreg, ins->sreg2, ins->sreg1);
3602 CASE_PPC64 (OP_LSUB)
3603 ppc_subf (code, ins->dreg, ins->sreg2, ins->sreg1);
3607 ppc_subfe (code, ins->dreg, ins->sreg2, ins->sreg1);
3611 CASE_PPC64 (OP_LSUB_IMM)
3612 // we add the negated value
3613 if (ppc_is_imm16 (-ins->inst_imm))
3614 ppc_addi (code, ins->dreg, ins->sreg1, -ins->inst_imm);
3616 g_assert_not_reached ();
3620 g_assert (ppc_is_imm16 (ins->inst_imm));
3621 ppc_subfic (code, ins->dreg, ins->sreg1, ins->inst_imm);
3624 ppc_subfze (code, ins->dreg, ins->sreg1);
3627 CASE_PPC64 (OP_LAND)
3628 /* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */
3629 ppc_and (code, ins->sreg1, ins->dreg, ins->sreg2);
3633 CASE_PPC64 (OP_LAND_IMM)
3634 if (!(ins->inst_imm & 0xffff0000)) {
3635 ppc_andid (code, ins->sreg1, ins->dreg, ins->inst_imm);
3636 } else if (!(ins->inst_imm & 0xffff)) {
3637 ppc_andisd (code, ins->sreg1, ins->dreg, ((guint32)ins->inst_imm >> 16));
3639 g_assert_not_reached ();
3643 CASE_PPC64 (OP_LDIV) {
3644 guint8 *divisor_is_m1;
3645 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3647 ppc_compare_reg_imm (code, 0, ins->sreg2, -1);
3648 divisor_is_m1 = code;
3649 ppc_bc (code, PPC_BR_FALSE | PPC_BR_LIKELY, PPC_BR_EQ, 0);
3650 ppc_lis (code, ppc_r0, 0x8000);
3651 #ifdef __mono_ppc64__
3652 if (ins->opcode == OP_LDIV)
3653 ppc_sldi (code, ppc_r0, ppc_r0, 32);
3655 ppc_compare (code, 0, ins->sreg1, ppc_r0);
3656 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3657 ppc_patch (divisor_is_m1, code);
3658 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3660 if (ins->opcode == OP_IDIV)
3661 ppc_divwod (code, ins->dreg, ins->sreg1, ins->sreg2);
3662 #ifdef __mono_ppc64__
3664 ppc_divdod (code, ins->dreg, ins->sreg1, ins->sreg2);
3666 ppc_mfspr (code, ppc_r0, ppc_xer);
3667 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3668 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
3672 CASE_PPC64 (OP_LDIV_UN)
3673 if (ins->opcode == OP_IDIV_UN)
3674 ppc_divwuod (code, ins->dreg, ins->sreg1, ins->sreg2);
3675 #ifdef __mono_ppc64__
3677 ppc_divduod (code, ins->dreg, ins->sreg1, ins->sreg2);
3679 ppc_mfspr (code, ppc_r0, ppc_xer);
3680 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3681 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
3687 g_assert_not_reached ();
3690 ppc_or (code, ins->dreg, ins->sreg1, ins->sreg2);
3694 CASE_PPC64 (OP_LOR_IMM)
3695 if (!(ins->inst_imm & 0xffff0000)) {
3696 ppc_ori (code, ins->sreg1, ins->dreg, ins->inst_imm);
3697 } else if (!(ins->inst_imm & 0xffff)) {
3698 ppc_oris (code, ins->dreg, ins->sreg1, ((guint32)(ins->inst_imm) >> 16));
3700 g_assert_not_reached ();
3704 CASE_PPC64 (OP_LXOR)
3705 ppc_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
3709 CASE_PPC64 (OP_LXOR_IMM)
3710 if (!(ins->inst_imm & 0xffff0000)) {
3711 ppc_xori (code, ins->sreg1, ins->dreg, ins->inst_imm);
3712 } else if (!(ins->inst_imm & 0xffff)) {
3713 ppc_xoris (code, ins->sreg1, ins->dreg, ((guint32)(ins->inst_imm) >> 16));
3715 g_assert_not_reached ();
3719 CASE_PPC64 (OP_LSHL)
3720 ppc_shift_left (code, ins->dreg, ins->sreg1, ins->sreg2);
3724 CASE_PPC64 (OP_LSHL_IMM)
3725 ppc_shift_left_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3728 ppc_sraw (code, ins->dreg, ins->sreg1, ins->sreg2);
3731 ppc_shift_right_arith_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3734 if (MASK_SHIFT_IMM (ins->inst_imm))
3735 ppc_shift_right_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3737 ppc_mr (code, ins->dreg, ins->sreg1);
3740 ppc_srw (code, ins->dreg, ins->sreg1, ins->sreg2);
3743 CASE_PPC64 (OP_LNOT)
3744 ppc_not (code, ins->dreg, ins->sreg1);
3747 CASE_PPC64 (OP_LNEG)
3748 ppc_neg (code, ins->dreg, ins->sreg1);
3751 CASE_PPC64 (OP_LMUL)
3752 ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2);
3756 CASE_PPC64 (OP_LMUL_IMM)
3757 if (ppc_is_imm16 (ins->inst_imm)) {
3758 ppc_mulli (code, ins->dreg, ins->sreg1, ins->inst_imm);
3760 g_assert_not_reached ();
3764 CASE_PPC64 (OP_LMUL_OVF)
3765 /* we annot use mcrxr, since it's not implemented on some processors
3766 * XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3768 if (ins->opcode == OP_IMUL_OVF)
3769 ppc_mullwo (code, ins->dreg, ins->sreg1, ins->sreg2);
3770 #ifdef __mono_ppc64__
3772 ppc_mulldo (code, ins->dreg, ins->sreg1, ins->sreg2);
3774 ppc_mfspr (code, ppc_r0, ppc_xer);
3775 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3776 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3778 case OP_IMUL_OVF_UN:
3779 CASE_PPC64 (OP_LMUL_OVF_UN)
3780 /* we first multiply to get the high word and compare to 0
3781 * to set the flags, then the result is discarded and then
3782 * we multiply to get the lower * bits result
3784 if (ins->opcode == OP_IMUL_OVF_UN)
3785 ppc_mulhwu (code, ppc_r0, ins->sreg1, ins->sreg2);
3786 #ifdef __mono_ppc64__
3788 ppc_mulhdu (code, ppc_r0, ins->sreg1, ins->sreg2);
3790 ppc_cmpi (code, 0, 0, ppc_r0, 0);
3791 EMIT_COND_SYSTEM_EXCEPTION (CEE_BNE_UN - CEE_BEQ, "OverflowException");
3792 ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2);
3795 ppc_load (code, ins->dreg, ins->inst_c0);
3798 ppc_load (code, ins->dreg, ins->inst_l);
3801 case OP_LOAD_GOTADDR:
3802 /* The PLT implementation depends on this */
3803 g_assert (ins->dreg == ppc_r30);
3805 code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
3808 // FIXME: Fix max instruction length
3809 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
3810 /* arch_emit_got_access () patches this */
3811 ppc_load32 (code, ppc_r0, 0);
3812 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3815 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3816 ppc_load_sequence (code, ins->dreg, 0);
3818 CASE_PPC32 (OP_ICONV_TO_I4)
3819 CASE_PPC32 (OP_ICONV_TO_U4)
3821 if (ins->dreg != ins->sreg1)
3822 ppc_mr (code, ins->dreg, ins->sreg1);
3825 int saved = ins->sreg1;
3826 if (ins->sreg1 == ppc_r3) {
3827 ppc_mr (code, ppc_r0, ins->sreg1);
3830 if (ins->sreg2 != ppc_r3)
3831 ppc_mr (code, ppc_r3, ins->sreg2);
3832 if (saved != ppc_r4)
3833 ppc_mr (code, ppc_r4, saved);
3837 if (ins->dreg != ins->sreg1)
3838 ppc_fmr (code, ins->dreg, ins->sreg1);
3840 case OP_MOVE_F_TO_I4:
3841 ppc_stfs (code, ins->sreg1, -4, ppc_r1);
3842 ppc_ldptr (code, ins->dreg, -4, ppc_r1);
3844 case OP_MOVE_I4_TO_F:
3845 ppc_stw (code, ins->sreg1, -4, ppc_r1);
3846 ppc_lfs (code, ins->dreg, -4, ppc_r1);
3848 case OP_FCONV_TO_R4:
3849 ppc_frsp (code, ins->dreg, ins->sreg1);
3853 MonoCallInst *call = (MonoCallInst*)ins;
3856 * Keep in sync with mono_arch_emit_epilog
3858 g_assert (!cfg->method->save_lmf);
3860 * Note: we can use ppc_r12 here because it is dead anyway:
3861 * we're leaving the method.
3863 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
3864 long ret_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
3865 if (ppc_is_imm16 (ret_offset)) {
3866 ppc_ldptr (code, ppc_r0, ret_offset, cfg->frame_reg);
3868 ppc_load (code, ppc_r12, ret_offset);
3869 ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
3871 ppc_mtlr (code, ppc_r0);
3874 if (ppc_is_imm16 (cfg->stack_usage)) {
3875 ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
3877 /* cfg->stack_usage is an int, so we can use
3878 * an addis/addi sequence here even in 64-bit. */
3879 ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
3880 ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
3882 if (!cfg->method->save_lmf) {
3884 for (i = 31; i >= 13; --i) {
3885 if (cfg->used_int_regs & (1 << i)) {
3886 pos += sizeof (gpointer);
3887 ppc_ldptr (code, i, -pos, ppc_r12);
3891 /* FIXME restore from MonoLMF: though this can't happen yet */
3894 /* Copy arguments on the stack to our argument area */
3895 if (call->stack_usage) {
3896 code = emit_memcpy (code, call->stack_usage, ppc_r12, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET);
3897 /* r12 was clobbered */
3898 g_assert (cfg->frame_reg == ppc_sp);
3899 if (ppc_is_imm16 (cfg->stack_usage)) {
3900 ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
3902 /* cfg->stack_usage is an int, so we can use
3903 * an addis/addi sequence here even in 64-bit. */
3904 ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
3905 ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
3909 ppc_mr (code, ppc_sp, ppc_r12);
3910 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
3911 if (cfg->compile_aot) {
3912 /* arch_emit_got_access () patches this */
3913 ppc_load32 (code, ppc_r0, 0);
3914 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3915 ppc_ldptr_indexed (code, ppc_r12, ppc_r30, ppc_r0);
3916 ppc_ldptr (code, ppc_r0, 0, ppc_r12);
3918 ppc_ldptr_indexed (code, ppc_r0, ppc_r30, ppc_r0);
3920 ppc_mtctr (code, ppc_r0);
3921 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
3928 /* ensure ins->sreg1 is not NULL */
3929 ppc_ldptr (code, ppc_r0, 0, ins->sreg1);
3932 long cookie_offset = cfg->sig_cookie + cfg->stack_usage;
3933 if (ppc_is_imm16 (cookie_offset)) {
3934 ppc_addi (code, ppc_r0, cfg->frame_reg, cookie_offset);
3936 ppc_load (code, ppc_r0, cookie_offset);
3937 ppc_add (code, ppc_r0, cfg->frame_reg, ppc_r0);
3939 ppc_stptr (code, ppc_r0, 0, ins->sreg1);
3948 call = (MonoCallInst*)ins;
3949 if (ins->flags & MONO_INST_HAS_METHOD)
3950 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3952 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3953 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
3954 ppc_load_func (code, PPC_CALL_REG, 0);
3955 ppc_mtlr (code, PPC_CALL_REG);
3960 /* FIXME: this should be handled somewhere else in the new jit */
3961 code = emit_move_return_value (cfg, ins, code);
3967 case OP_VOIDCALL_REG:
3969 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3970 ppc_ldptr (code, ppc_r0, 0, ins->sreg1);
3971 /* FIXME: if we know that this is a method, we
3972 can omit this load */
3973 ppc_ldptr (code, ppc_r2, 8, ins->sreg1);
3974 ppc_mtlr (code, ppc_r0);
3976 #if (_CALL_ELF == 2)
3977 if (ins->flags & MONO_INST_HAS_METHOD) {
3978 // Not a global entry point
3980 // Need to set up r12 with function entry address for global entry point
3981 if (ppc_r12 != ins->sreg1) {
3982 ppc_mr(code,ppc_r12,ins->sreg1);
3986 ppc_mtlr (code, ins->sreg1);
3989 /* FIXME: this should be handled somewhere else in the new jit */
3990 code = emit_move_return_value (cfg, ins, code);
3992 case OP_FCALL_MEMBASE:
3993 case OP_LCALL_MEMBASE:
3994 case OP_VCALL_MEMBASE:
3995 case OP_VCALL2_MEMBASE:
3996 case OP_VOIDCALL_MEMBASE:
3997 case OP_CALL_MEMBASE:
3998 if (cfg->compile_aot && ins->sreg1 == ppc_r12) {
3999 /* The trampolines clobber this */
4000 ppc_mr (code, ppc_r29, ins->sreg1);
4001 ppc_ldptr (code, ppc_r0, ins->inst_offset, ppc_r29);
4003 ppc_ldptr (code, ppc_r0, ins->inst_offset, ins->sreg1);
4005 ppc_mtlr (code, ppc_r0);
4007 /* FIXME: this should be handled somewhere else in the new jit */
4008 code = emit_move_return_value (cfg, ins, code);
4011 guint8 * zero_loop_jump, * zero_loop_start;
4012 /* keep alignment */
4013 int alloca_waste = PPC_STACK_PARAM_OFFSET + cfg->param_area + 31;
4014 int area_offset = alloca_waste;
4016 ppc_addi (code, ppc_r12, ins->sreg1, alloca_waste + 31);
4017 /* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */
4018 ppc_clear_right_imm (code, ppc_r12, ppc_r12, 4);
4019 /* use ctr to store the number of words to 0 if needed */
4020 if (ins->flags & MONO_INST_INIT) {
4021 /* we zero 4 bytes at a time:
4022 * we add 7 instead of 3 so that we set the counter to
4023 * at least 1, otherwise the bdnz instruction will make
4024 * it negative and iterate billions of times.
4026 ppc_addi (code, ppc_r0, ins->sreg1, 7);
4027 ppc_shift_right_arith_imm (code, ppc_r0, ppc_r0, 2);
4028 ppc_mtctr (code, ppc_r0);
4030 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
4031 ppc_neg (code, ppc_r12, ppc_r12);
4032 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
4034 /* FIXME: make this loop work in 8 byte
4035 increments on PPC64 */
4036 if (ins->flags & MONO_INST_INIT) {
4037 /* adjust the dest reg by -4 so we can use stwu */
4038 /* we actually adjust -8 because we let the loop
4041 ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 8));
4042 ppc_li (code, ppc_r12, 0);
4043 zero_loop_start = code;
4044 ppc_stwu (code, ppc_r12, 4, ins->dreg);
4045 zero_loop_jump = code;
4046 ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
4047 ppc_patch (zero_loop_jump, zero_loop_start);
4049 ppc_addi (code, ins->dreg, ppc_sp, area_offset);
4054 ppc_mr (code, ppc_r3, ins->sreg1);
4055 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4056 (gpointer)"mono_arch_throw_exception");
4057 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
4058 ppc_load_func (code, PPC_CALL_REG, 0);
4059 ppc_mtlr (code, PPC_CALL_REG);
4068 ppc_mr (code, ppc_r3, ins->sreg1);
4069 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4070 (gpointer)"mono_arch_rethrow_exception");
4071 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
4072 ppc_load_func (code, PPC_CALL_REG, 0);
4073 ppc_mtlr (code, PPC_CALL_REG);
4080 case OP_START_HANDLER: {
4081 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4082 g_assert (spvar->inst_basereg != ppc_sp);
4083 code = emit_reserve_param_area (cfg, code);
4084 ppc_mflr (code, ppc_r0);
4085 if (ppc_is_imm16 (spvar->inst_offset)) {
4086 ppc_stptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4088 ppc_load (code, ppc_r12, spvar->inst_offset);
4089 ppc_stptr_indexed (code, ppc_r0, ppc_r12, spvar->inst_basereg);
4093 case OP_ENDFILTER: {
4094 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4095 g_assert (spvar->inst_basereg != ppc_sp);
4096 code = emit_unreserve_param_area (cfg, code);
4097 if (ins->sreg1 != ppc_r3)
4098 ppc_mr (code, ppc_r3, ins->sreg1);
4099 if (ppc_is_imm16 (spvar->inst_offset)) {
4100 ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4102 ppc_load (code, ppc_r12, spvar->inst_offset);
4103 ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r12);
4105 ppc_mtlr (code, ppc_r0);
4109 case OP_ENDFINALLY: {
4110 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4111 g_assert (spvar->inst_basereg != ppc_sp);
4112 code = emit_unreserve_param_area (cfg, code);
4113 ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4114 ppc_mtlr (code, ppc_r0);
4118 case OP_CALL_HANDLER:
4119 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4121 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
4124 ins->inst_c0 = code - cfg->native_code;
4127 /*if (ins->inst_target_bb->native_offset) {
4129 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
4131 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4136 ppc_mtctr (code, ins->sreg1);
4137 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
4141 CASE_PPC64 (OP_LCEQ)
4142 ppc_li (code, ins->dreg, 0);
4143 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2);
4144 ppc_li (code, ins->dreg, 1);
4150 CASE_PPC64 (OP_LCLT)
4151 CASE_PPC64 (OP_LCLT_UN)
4152 ppc_li (code, ins->dreg, 1);
4153 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4154 ppc_li (code, ins->dreg, 0);
4160 CASE_PPC64 (OP_LCGT)
4161 CASE_PPC64 (OP_LCGT_UN)
4162 ppc_li (code, ins->dreg, 1);
4163 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4164 ppc_li (code, ins->dreg, 0);
4166 case OP_COND_EXC_EQ:
4167 case OP_COND_EXC_NE_UN:
4168 case OP_COND_EXC_LT:
4169 case OP_COND_EXC_LT_UN:
4170 case OP_COND_EXC_GT:
4171 case OP_COND_EXC_GT_UN:
4172 case OP_COND_EXC_GE:
4173 case OP_COND_EXC_GE_UN:
4174 case OP_COND_EXC_LE:
4175 case OP_COND_EXC_LE_UN:
4176 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
4178 case OP_COND_EXC_IEQ:
4179 case OP_COND_EXC_INE_UN:
4180 case OP_COND_EXC_ILT:
4181 case OP_COND_EXC_ILT_UN:
4182 case OP_COND_EXC_IGT:
4183 case OP_COND_EXC_IGT_UN:
4184 case OP_COND_EXC_IGE:
4185 case OP_COND_EXC_IGE_UN:
4186 case OP_COND_EXC_ILE:
4187 case OP_COND_EXC_ILE_UN:
4188 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
4200 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
4203 /* floating point opcodes */
4205 g_assert (cfg->compile_aot);
4207 /* FIXME: Optimize this */
4209 ppc_mflr (code, ppc_r12);
4211 *(double*)code = *(double*)ins->inst_p0;
4213 ppc_lfd (code, ins->dreg, 8, ppc_r12);
4216 g_assert_not_reached ();
4218 case OP_STORER8_MEMBASE_REG:
4219 if (ppc_is_imm16 (ins->inst_offset)) {
4220 ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4222 if (ppc_is_imm32 (ins->inst_offset)) {
4223 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4224 ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r11);
4226 ppc_load (code, ppc_r0, ins->inst_offset);
4227 ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4231 case OP_LOADR8_MEMBASE:
4232 if (ppc_is_imm16 (ins->inst_offset)) {
4233 ppc_lfd (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
4235 if (ppc_is_imm32 (ins->inst_offset)) {
4236 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4237 ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r11);
4239 ppc_load (code, ppc_r0, ins->inst_offset);
4240 ppc_lfdx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
4244 case OP_STORER4_MEMBASE_REG:
4245 ppc_frsp (code, ins->sreg1, ins->sreg1);
4246 if (ppc_is_imm16 (ins->inst_offset)) {
4247 ppc_stfs (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4249 if (ppc_is_imm32 (ins->inst_offset)) {
4250 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4251 ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r11);
4253 ppc_load (code, ppc_r0, ins->inst_offset);
4254 ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4258 case OP_LOADR4_MEMBASE:
4259 if (ppc_is_imm16 (ins->inst_offset)) {
4260 ppc_lfs (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
4262 if (ppc_is_imm32 (ins->inst_offset)) {
4263 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4264 ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r11);
4266 ppc_load (code, ppc_r0, ins->inst_offset);
4267 ppc_lfsx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
4271 case OP_LOADR4_MEMINDEX:
4272 ppc_lfsx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4274 case OP_LOADR8_MEMINDEX:
4275 ppc_lfdx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4277 case OP_STORER4_MEMINDEX:
4278 ppc_frsp (code, ins->sreg1, ins->sreg1);
4279 ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4281 case OP_STORER8_MEMINDEX:
4282 ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4285 case CEE_CONV_R4: /* FIXME: change precision */
4287 g_assert_not_reached ();
4288 case OP_FCONV_TO_I1:
4289 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4291 case OP_FCONV_TO_U1:
4292 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4294 case OP_FCONV_TO_I2:
4295 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4297 case OP_FCONV_TO_U2:
4298 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4300 case OP_FCONV_TO_I4:
4302 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4304 case OP_FCONV_TO_U4:
4306 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4308 case OP_LCONV_TO_R_UN:
4309 g_assert_not_reached ();
4310 /* Implemented as helper calls */
4312 case OP_LCONV_TO_OVF_I4_2:
4313 case OP_LCONV_TO_OVF_I: {
4314 #ifdef __mono_ppc64__
4317 guint8 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
4318 // Check if its negative
4319 ppc_cmpi (code, 0, 0, ins->sreg1, 0);
4320 negative_branch = code;
4321 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0);
4322 // Its positive msword == 0
4323 ppc_cmpi (code, 0, 0, ins->sreg2, 0);
4324 msword_positive_branch = code;
4325 ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
4327 ovf_ex_target = code;
4328 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException");
4330 ppc_patch (negative_branch, code);
4331 ppc_cmpi (code, 0, 0, ins->sreg2, -1);
4332 msword_negative_branch = code;
4333 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4334 ppc_patch (msword_negative_branch, ovf_ex_target);
4336 ppc_patch (msword_positive_branch, code);
4337 if (ins->dreg != ins->sreg1)
4338 ppc_mr (code, ins->dreg, ins->sreg1);
4343 ppc_fsqrtd (code, ins->dreg, ins->sreg1);
4346 ppc_fadd (code, ins->dreg, ins->sreg1, ins->sreg2);
4349 ppc_fsub (code, ins->dreg, ins->sreg1, ins->sreg2);
4352 ppc_fmul (code, ins->dreg, ins->sreg1, ins->sreg2);
4355 ppc_fdiv (code, ins->dreg, ins->sreg1, ins->sreg2);
4358 ppc_fneg (code, ins->dreg, ins->sreg1);
4362 g_assert_not_reached ();
4365 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4368 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4369 ppc_li (code, ins->dreg, 0);
4370 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2);
4371 ppc_li (code, ins->dreg, 1);
4374 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4375 ppc_li (code, ins->dreg, 1);
4376 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4377 ppc_li (code, ins->dreg, 0);
4380 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4381 ppc_li (code, ins->dreg, 1);
4382 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3);
4383 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4384 ppc_li (code, ins->dreg, 0);
4387 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4388 ppc_li (code, ins->dreg, 1);
4389 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4390 ppc_li (code, ins->dreg, 0);
4393 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4394 ppc_li (code, ins->dreg, 1);
4395 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3);
4396 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4397 ppc_li (code, ins->dreg, 0);
4400 EMIT_COND_BRANCH (ins, CEE_BEQ - CEE_BEQ);
4403 EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ);
4406 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4407 EMIT_COND_BRANCH (ins, CEE_BLT - CEE_BEQ);
4410 EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO);
4411 EMIT_COND_BRANCH (ins, CEE_BLT_UN - CEE_BEQ);
4414 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4415 EMIT_COND_BRANCH (ins, CEE_BGT - CEE_BEQ);
4418 EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO);
4419 EMIT_COND_BRANCH (ins, CEE_BGT_UN - CEE_BEQ);
4422 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4423 EMIT_COND_BRANCH (ins, CEE_BGE - CEE_BEQ);
4426 EMIT_COND_BRANCH (ins, CEE_BGE_UN - CEE_BEQ);
4429 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4430 EMIT_COND_BRANCH (ins, CEE_BLE - CEE_BEQ);
4433 EMIT_COND_BRANCH (ins, CEE_BLE_UN - CEE_BEQ);
4436 g_assert_not_reached ();
4437 case OP_CHECK_FINITE: {
4438 ppc_rlwinm (code, ins->sreg1, ins->sreg1, 0, 1, 31);
4439 ppc_addis (code, ins->sreg1, ins->sreg1, -32752);
4440 ppc_rlwinmd (code, ins->sreg1, ins->sreg1, 1, 31, 31);
4441 EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException");
4444 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_c1, ins->inst_p0);
4445 #ifdef __mono_ppc64__
4446 ppc_load_sequence (code, ins->dreg, (guint64)0x0f0f0f0f0f0f0f0fLL);
4448 ppc_load_sequence (code, ins->dreg, (gulong)0x0f0f0f0fL);
4453 #ifdef __mono_ppc64__
4454 case OP_ICONV_TO_I4:
4456 ppc_extsw (code, ins->dreg, ins->sreg1);
4458 case OP_ICONV_TO_U4:
4460 ppc_clrldi (code, ins->dreg, ins->sreg1, 32);
4462 case OP_ICONV_TO_R4:
4463 case OP_ICONV_TO_R8:
4464 case OP_LCONV_TO_R4:
4465 case OP_LCONV_TO_R8: {
4467 if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_ICONV_TO_R8) {
4468 ppc_extsw (code, ppc_r0, ins->sreg1);
4473 if (cpu_hw_caps & PPC_MOVE_FPR_GPR) {
4474 ppc_mffgpr (code, ins->dreg, tmp);
4476 ppc_str (code, tmp, -8, ppc_r1);
4477 ppc_lfd (code, ins->dreg, -8, ppc_r1);
4479 ppc_fcfid (code, ins->dreg, ins->dreg);
4480 if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_LCONV_TO_R4)
4481 ppc_frsp (code, ins->dreg, ins->dreg);
4485 ppc_srad (code, ins->dreg, ins->sreg1, ins->sreg2);
4488 ppc_srd (code, ins->dreg, ins->sreg1, ins->sreg2);
4491 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
4493 ppc_mfspr (code, ppc_r0, ppc_xer);
4494 ppc_andisd (code, ppc_r0, ppc_r0, (1 << 13)); /* CA */
4495 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, ins->inst_p1);
4497 case OP_COND_EXC_OV:
4498 ppc_mfspr (code, ppc_r0, ppc_xer);
4499 ppc_andisd (code, ppc_r0, ppc_r0, (1 << 14)); /* OV */
4500 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, ins->inst_p1);
4512 EMIT_COND_BRANCH (ins, ins->opcode - OP_LBEQ);
4514 case OP_FCONV_TO_I8:
4515 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
4517 case OP_FCONV_TO_U8:
4518 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE);
4520 case OP_STOREI4_MEMBASE_REG:
4521 if (ppc_is_imm16 (ins->inst_offset)) {
4522 ppc_stw (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4524 ppc_load (code, ppc_r0, ins->inst_offset);
4525 ppc_stwx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4528 case OP_STOREI4_MEMINDEX:
4529 ppc_stwx (code, ins->sreg1, ins->sreg2, ins->inst_destbasereg);
4532 ppc_srawi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4534 case OP_ISHR_UN_IMM:
4535 if (ins->inst_imm & 0x1f)
4536 ppc_srwi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4538 ppc_mr (code, ins->dreg, ins->sreg1);
4541 case OP_ICONV_TO_R4:
4542 case OP_ICONV_TO_R8: {
4543 if (cpu_hw_caps & PPC_ISA_64) {
4544 ppc_srawi(code, ppc_r0, ins->sreg1, 31);
4545 ppc_stw (code, ppc_r0, -8, ppc_r1);
4546 ppc_stw (code, ins->sreg1, -4, ppc_r1);
4547 ppc_lfd (code, ins->dreg, -8, ppc_r1);
4548 ppc_fcfid (code, ins->dreg, ins->dreg);
4549 if (ins->opcode == OP_ICONV_TO_R4)
4550 ppc_frsp (code, ins->dreg, ins->dreg);
4556 case OP_ATOMIC_ADD_I4:
4557 CASE_PPC64 (OP_ATOMIC_ADD_I8) {
4558 int location = ins->inst_basereg;
4559 int addend = ins->sreg2;
4560 guint8 *loop, *branch;
4561 g_assert (ins->inst_offset == 0);
4565 if (ins->opcode == OP_ATOMIC_ADD_I4)
4566 ppc_lwarx (code, ppc_r0, 0, location);
4567 #ifdef __mono_ppc64__
4569 ppc_ldarx (code, ppc_r0, 0, location);
4572 ppc_add (code, ppc_r0, ppc_r0, addend);
4574 if (ins->opcode == OP_ATOMIC_ADD_I4)
4575 ppc_stwcxd (code, ppc_r0, 0, location);
4576 #ifdef __mono_ppc64__
4578 ppc_stdcxd (code, ppc_r0, 0, location);
4582 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4583 ppc_patch (branch, loop);
4586 ppc_mr (code, ins->dreg, ppc_r0);
4589 case OP_ATOMIC_CAS_I4:
4590 CASE_PPC64 (OP_ATOMIC_CAS_I8) {
4591 int location = ins->sreg1;
4592 int value = ins->sreg2;
4593 int comparand = ins->sreg3;
4594 guint8 *start, *not_equal, *lost_reservation;
4598 if (ins->opcode == OP_ATOMIC_CAS_I4)
4599 ppc_lwarx (code, ppc_r0, 0, location);
4600 #ifdef __mono_ppc64__
4602 ppc_ldarx (code, ppc_r0, 0, location);
4605 ppc_cmp (code, 0, ins->opcode == OP_ATOMIC_CAS_I4 ? 0 : 1, ppc_r0, comparand);
4607 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4609 if (ins->opcode == OP_ATOMIC_CAS_I4)
4610 ppc_stwcxd (code, value, 0, location);
4611 #ifdef __mono_ppc64__
4613 ppc_stdcxd (code, value, 0, location);
4616 lost_reservation = code;
4617 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4618 ppc_patch (lost_reservation, start);
4619 ppc_patch (not_equal, code);
4622 ppc_mr (code, ins->dreg, ppc_r0);
4625 case OP_GC_SAFE_POINT:
4629 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4630 g_assert_not_reached ();
4633 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4634 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4635 mono_inst_name (ins->opcode), max_len, (glong)(code - cfg->native_code - offset));
4636 g_assert_not_reached ();
4642 last_offset = offset;
4645 cfg->code_len = code - cfg->native_code;
4647 #endif /* !DISABLE_JIT */
4650 mono_arch_register_lowlevel_calls (void)
4652 /* The signature doesn't matter */
4653 mono_register_jit_icall (mono_ppc_throw_exception, "mono_ppc_throw_exception", mono_create_icall_signature ("void"), TRUE);
4656 #ifdef __mono_ppc64__
4657 #ifdef _LITTLE_ENDIAN
4658 #define patch_load_sequence(ip,val) do {\
4659 guint16 *__load = (guint16*)(ip); \
4660 g_assert (sizeof (val) == sizeof (gsize)); \
4661 __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4662 __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4663 __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4664 __load [8] = ((guint64)(gsize)(val)) & 0xffff; \
4666 #elif defined _BIG_ENDIAN
4667 #define patch_load_sequence(ip,val) do {\
4668 guint16 *__load = (guint16*)(ip); \
4669 g_assert (sizeof (val) == sizeof (gsize)); \
4670 __load [1] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4671 __load [3] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4672 __load [7] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4673 __load [9] = ((guint64)(gsize)(val)) & 0xffff; \
4676 #error huh? No endianess defined by compiler
4679 #define patch_load_sequence(ip,val) do {\
4680 guint16 *__lis_ori = (guint16*)(ip); \
4681 __lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \
4682 __lis_ori [3] = ((gulong)(val)) & 0xffff; \
4688 mono_arch_patch_code (MonoCompile *cfg, MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors, MonoError *error)
4690 MonoJumpInfo *patch_info;
4691 gboolean compile_aot = !run_cctors;
4695 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4696 unsigned char *ip = patch_info->ip.i + code;
4697 unsigned char *target;
4698 gboolean is_fd = FALSE;
4700 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors, error);
4701 return_if_nok (error);
4704 switch (patch_info->type) {
4705 case MONO_PATCH_INFO_BB:
4706 case MONO_PATCH_INFO_LABEL:
4709 /* No need to patch these */
4714 switch (patch_info->type) {
4715 case MONO_PATCH_INFO_IP:
4716 patch_load_sequence (ip, ip);
4718 case MONO_PATCH_INFO_METHOD_REL:
4719 g_assert_not_reached ();
4720 *((gpointer *)(ip)) = code + patch_info->data.offset;
4722 case MONO_PATCH_INFO_SWITCH: {
4723 gpointer *table = (gpointer *)patch_info->data.table->table;
4726 patch_load_sequence (ip, table);
4728 for (i = 0; i < patch_info->data.table->table_size; i++) {
4729 table [i] = (glong)patch_info->data.table->table [i] + code;
4731 /* we put into the table the absolute address, no need for ppc_patch in this case */
4734 case MONO_PATCH_INFO_METHODCONST:
4735 case MONO_PATCH_INFO_CLASS:
4736 case MONO_PATCH_INFO_IMAGE:
4737 case MONO_PATCH_INFO_FIELD:
4738 case MONO_PATCH_INFO_VTABLE:
4739 case MONO_PATCH_INFO_IID:
4740 case MONO_PATCH_INFO_SFLDA:
4741 case MONO_PATCH_INFO_LDSTR:
4742 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4743 case MONO_PATCH_INFO_LDTOKEN:
4744 /* from OP_AOTCONST : lis + ori */
4745 patch_load_sequence (ip, target);
4747 case MONO_PATCH_INFO_R4:
4748 case MONO_PATCH_INFO_R8:
4749 g_assert_not_reached ();
4750 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4752 case MONO_PATCH_INFO_EXC_NAME:
4753 g_assert_not_reached ();
4754 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4756 case MONO_PATCH_INFO_NONE:
4757 case MONO_PATCH_INFO_BB_OVF:
4758 case MONO_PATCH_INFO_EXC_OVF:
4759 /* everything is dealt with at epilog output time */
4761 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
4762 case MONO_PATCH_INFO_INTERNAL_METHOD:
4763 case MONO_PATCH_INFO_ABS:
4764 case MONO_PATCH_INFO_RGCTX_FETCH:
4765 case MONO_PATCH_INFO_JIT_ICALL_ADDR:
4772 ppc_patch_full (ip, target, is_fd);
4777 * Emit code to save the registers in used_int_regs or the registers in the MonoLMF
4778 * structure at positive offset pos from register base_reg. pos is guaranteed to fit into
4779 * the instruction offset immediate for all the registers.
4782 save_registers (MonoCompile *cfg, guint8* code, int pos, int base_reg, gboolean save_lmf, guint32 used_int_regs, int cfa_offset)
4786 for (i = 13; i <= 31; i++) {
4787 if (used_int_regs & (1 << i)) {
4788 ppc_str (code, i, pos, base_reg);
4789 mono_emit_unwind_op_offset (cfg, code, i, pos - cfa_offset);
4790 pos += sizeof (mgreg_t);
4794 /* pos is the start of the MonoLMF structure */
4795 int offset = pos + G_STRUCT_OFFSET (MonoLMF, iregs);
4796 for (i = 13; i <= 31; i++) {
4797 ppc_str (code, i, offset, base_reg);
4798 mono_emit_unwind_op_offset (cfg, code, i, offset - cfa_offset);
4799 offset += sizeof (mgreg_t);
4801 offset = pos + G_STRUCT_OFFSET (MonoLMF, fregs);
4802 for (i = 14; i < 32; i++) {
4803 ppc_stfd (code, i, offset, base_reg);
4804 offset += sizeof (gdouble);
4811 * Stack frame layout:
4813 * ------------------- sp
4814 * MonoLMF structure or saved registers
4815 * -------------------
4817 * -------------------
4819 * -------------------
4820 * optional 8 bytes for tracing
4821 * -------------------
4822 * param area size is cfg->param_area
4823 * -------------------
4824 * linkage area size is PPC_STACK_PARAM_OFFSET
4825 * ------------------- sp
4829 mono_arch_emit_prolog (MonoCompile *cfg)
4831 MonoMethod *method = cfg->method;
4833 MonoMethodSignature *sig;
4835 long alloc_size, pos, max_offset, cfa_offset;
4841 int tailcall_struct_index;
4843 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4846 sig = mono_method_signature (method);
4847 cfg->code_size = 512 + sig->param_count * 32;
4848 code = cfg->native_code = g_malloc (cfg->code_size);
4852 /* We currently emit unwind info for aot, but don't use it */
4853 mono_emit_unwind_op_def_cfa (cfg, code, ppc_r1, 0);
4855 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
4856 ppc_mflr (code, ppc_r0);
4857 ppc_str (code, ppc_r0, PPC_RET_ADDR_OFFSET, ppc_sp);
4858 mono_emit_unwind_op_offset (cfg, code, ppc_lr, PPC_RET_ADDR_OFFSET);
4861 alloc_size = cfg->stack_offset;
4864 if (!method->save_lmf) {
4865 for (i = 31; i >= 13; --i) {
4866 if (cfg->used_int_regs & (1 << i)) {
4867 pos += sizeof (mgreg_t);
4871 pos += sizeof (MonoLMF);
4875 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4876 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4877 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4878 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4881 cfg->stack_usage = alloc_size;
4882 g_assert ((alloc_size & (MONO_ARCH_FRAME_ALIGNMENT-1)) == 0);
4884 if (ppc_is_imm16 (-alloc_size)) {
4885 ppc_str_update (code, ppc_sp, -alloc_size, ppc_sp);
4886 cfa_offset = alloc_size;
4887 mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
4888 code = save_registers (cfg, code, alloc_size - pos, ppc_sp, method->save_lmf, cfg->used_int_regs, cfa_offset);
4891 ppc_addi (code, ppc_r12, ppc_sp, -pos);
4892 ppc_load (code, ppc_r0, -alloc_size);
4893 ppc_str_update_indexed (code, ppc_sp, ppc_sp, ppc_r0);
4894 cfa_offset = alloc_size;
4895 mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
4896 code = save_registers (cfg, code, 0, ppc_r12, method->save_lmf, cfg->used_int_regs, cfa_offset);
4899 if (cfg->frame_reg != ppc_sp) {
4900 ppc_mr (code, cfg->frame_reg, ppc_sp);
4901 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4904 /* store runtime generic context */
4905 if (cfg->rgctx_var) {
4906 g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET &&
4907 (cfg->rgctx_var->inst_basereg == ppc_r1 || cfg->rgctx_var->inst_basereg == ppc_r31));
4909 ppc_stptr (code, MONO_ARCH_RGCTX_REG, cfg->rgctx_var->inst_offset, cfg->rgctx_var->inst_basereg);
4912 /* compute max_offset in order to use short forward jumps
4913 * we always do it on ppc because the immediate displacement
4914 * for jumps is too small
4917 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4919 bb->max_offset = max_offset;
4921 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4924 MONO_BB_FOR_EACH_INS (bb, ins)
4925 max_offset += ins_native_length (cfg, ins);
4928 /* load arguments allocated to register from the stack */
4931 cinfo = get_call_info (sig);
4933 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
4934 ArgInfo *ainfo = &cinfo->ret;
4936 inst = cfg->vret_addr;
4939 if (ppc_is_imm16 (inst->inst_offset)) {
4940 ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4942 ppc_load (code, ppc_r12, inst->inst_offset);
4943 ppc_stptr_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
4947 tailcall_struct_index = 0;
4948 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4949 ArgInfo *ainfo = cinfo->args + i;
4950 inst = cfg->args [pos];
4952 if (cfg->verbose_level > 2)
4953 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
4954 if (inst->opcode == OP_REGVAR) {
4955 if (ainfo->regtype == RegTypeGeneral)
4956 ppc_mr (code, inst->dreg, ainfo->reg);
4957 else if (ainfo->regtype == RegTypeFP)
4958 ppc_fmr (code, inst->dreg, ainfo->reg);
4959 else if (ainfo->regtype == RegTypeBase) {
4960 ppc_ldr (code, ppc_r12, 0, ppc_sp);
4961 ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r12);
4963 g_assert_not_reached ();
4965 if (cfg->verbose_level > 2)
4966 g_print ("Argument %ld assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4968 /* the argument should be put on the stack: FIXME handle size != word */
4969 if (ainfo->regtype == RegTypeGeneral) {
4970 switch (ainfo->size) {
4972 if (ppc_is_imm16 (inst->inst_offset)) {
4973 ppc_stb (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4975 if (ppc_is_imm32 (inst->inst_offset)) {
4976 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4977 ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r12);
4979 ppc_load (code, ppc_r12, inst->inst_offset);
4980 ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
4985 if (ppc_is_imm16 (inst->inst_offset)) {
4986 ppc_sth (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4988 if (ppc_is_imm32 (inst->inst_offset)) {
4989 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4990 ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r12);
4992 ppc_load (code, ppc_r12, inst->inst_offset);
4993 ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
4997 #ifdef __mono_ppc64__
4999 if (ppc_is_imm16 (inst->inst_offset)) {
5000 ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5002 if (ppc_is_imm32 (inst->inst_offset)) {
5003 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5004 ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r12);
5006 ppc_load (code, ppc_r12, inst->inst_offset);
5007 ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
5012 if (ppc_is_imm16 (inst->inst_offset)) {
5013 ppc_str (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5015 ppc_load (code, ppc_r12, inst->inst_offset);
5016 ppc_str_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
5021 if (ppc_is_imm16 (inst->inst_offset + 4)) {
5022 ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5023 ppc_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg);
5025 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5026 ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
5027 ppc_stw (code, ainfo->reg, 0, ppc_r12);
5028 ppc_stw (code, ainfo->reg + 1, 4, ppc_r12);
5033 if (ppc_is_imm16 (inst->inst_offset)) {
5034 ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5036 if (ppc_is_imm32 (inst->inst_offset)) {
5037 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5038 ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r12);
5040 ppc_load (code, ppc_r12, inst->inst_offset);
5041 ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r12);
5046 } else if (ainfo->regtype == RegTypeBase) {
5047 g_assert (ppc_is_imm16 (ainfo->offset));
5048 /* load the previous stack pointer in r12 */
5049 ppc_ldr (code, ppc_r12, 0, ppc_sp);
5050 ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r12);
5051 switch (ainfo->size) {
5053 if (ppc_is_imm16 (inst->inst_offset)) {
5054 ppc_stb (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5056 if (ppc_is_imm32 (inst->inst_offset)) {
5057 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5058 ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r12);
5060 ppc_load (code, ppc_r12, inst->inst_offset);
5061 ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r12);
5066 if (ppc_is_imm16 (inst->inst_offset)) {
5067 ppc_sth (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5069 if (ppc_is_imm32 (inst->inst_offset)) {
5070 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5071 ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r12);
5073 ppc_load (code, ppc_r12, inst->inst_offset);
5074 ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r12);
5078 #ifdef __mono_ppc64__
5080 if (ppc_is_imm16 (inst->inst_offset)) {
5081 ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5083 if (ppc_is_imm32 (inst->inst_offset)) {
5084 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5085 ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r12);
5087 ppc_load (code, ppc_r12, inst->inst_offset);
5088 ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r12);
5093 if (ppc_is_imm16 (inst->inst_offset)) {
5094 ppc_str (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5096 ppc_load (code, ppc_r12, inst->inst_offset);
5097 ppc_str_indexed (code, ppc_r0, ppc_r12, inst->inst_basereg);
5102 g_assert (ppc_is_imm16 (ainfo->offset + 4));
5103 if (ppc_is_imm16 (inst->inst_offset + 4)) {
5104 ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5105 ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r12);
5106 ppc_stw (code, ppc_r0, inst->inst_offset + 4, inst->inst_basereg);
5108 /* use r11 to load the 2nd half of the long before we clobber r12. */
5109 ppc_lwz (code, ppc_r11, ainfo->offset + 4, ppc_r12);
5110 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5111 ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
5112 ppc_stw (code, ppc_r0, 0, ppc_r12);
5113 ppc_stw (code, ppc_r11, 4, ppc_r12);
5118 if (ppc_is_imm16 (inst->inst_offset)) {
5119 ppc_stptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5121 if (ppc_is_imm32 (inst->inst_offset)) {
5122 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5123 ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r12);
5125 ppc_load (code, ppc_r12, inst->inst_offset);
5126 ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r12);
5131 } else if (ainfo->regtype == RegTypeFP) {
5132 g_assert (ppc_is_imm16 (inst->inst_offset));
5133 if (ainfo->size == 8)
5134 ppc_stfd (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5135 else if (ainfo->size == 4)
5136 ppc_stfs (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5138 g_assert_not_reached ();
5139 } else if (ainfo->regtype == RegTypeFPStructByVal) {
5140 int doffset = inst->inst_offset;
5144 g_assert (ppc_is_imm16 (inst->inst_offset));
5145 g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (gpointer)));
5146 /* FIXME: what if there is no class? */
5147 if (sig->pinvoke && mono_class_from_mono_type (inst->inst_vtype))
5148 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
5149 for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) {
5150 if (ainfo->size == 4) {
5151 ppc_stfs (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5153 ppc_stfd (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5155 soffset += ainfo->size;
5156 doffset += ainfo->size;
5158 } else if (ainfo->regtype == RegTypeStructByVal) {
5159 int doffset = inst->inst_offset;
5163 g_assert (ppc_is_imm16 (inst->inst_offset));
5164 g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (gpointer)));
5165 /* FIXME: what if there is no class? */
5166 if (sig->pinvoke && mono_class_from_mono_type (inst->inst_vtype))
5167 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
5168 for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) {
5171 * Darwin handles 1 and 2 byte
5172 * structs specially by
5173 * loading h/b into the arg
5174 * register. Only done for
5178 ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5180 ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5184 #ifdef __mono_ppc64__
5186 g_assert (cur_reg == 0);
5187 #if G_BYTE_ORDER == G_BIG_ENDIAN
5188 ppc_sldi (code, ppc_r0, ainfo->reg,
5189 (sizeof (gpointer) - ainfo->bytes) * 8);
5190 ppc_stptr (code, ppc_r0, doffset, inst->inst_basereg);
5192 if (mono_class_native_size (inst->klass, NULL) == 1) {
5193 ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5194 } else if (mono_class_native_size (inst->klass, NULL) == 2) {
5195 ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5196 } else if (mono_class_native_size (inst->klass, NULL) == 4) { // WDS -- maybe <=4?
5197 ppc_stw (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5199 ppc_stptr (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); // WDS -- Better way?
5205 ppc_stptr (code, ainfo->reg + cur_reg, doffset,
5206 inst->inst_basereg);
5209 soffset += sizeof (gpointer);
5210 doffset += sizeof (gpointer);
5212 if (ainfo->vtsize) {
5213 /* FIXME: we need to do the shifting here, too */
5216 /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */
5217 ppc_ldr (code, ppc_r12, 0, ppc_sp);
5218 if ((size & MONO_PPC_32_64_CASE (3, 7)) != 0) {
5219 code = emit_memcpy (code, size - soffset,
5220 inst->inst_basereg, doffset,
5221 ppc_r12, ainfo->offset + soffset);
5223 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer),
5224 inst->inst_basereg, doffset,
5225 ppc_r12, ainfo->offset + soffset);
5228 } else if (ainfo->regtype == RegTypeStructByAddr) {
5229 /* if it was originally a RegTypeBase */
5230 if (ainfo->offset) {
5231 /* load the previous stack pointer in r12 */
5232 ppc_ldr (code, ppc_r12, 0, ppc_sp);
5233 ppc_ldptr (code, ppc_r12, ainfo->offset, ppc_r12);
5235 ppc_mr (code, ppc_r12, ainfo->reg);
5238 if (cfg->tailcall_valuetype_addrs) {
5239 MonoInst *addr = cfg->tailcall_valuetype_addrs [tailcall_struct_index];
5241 g_assert (ppc_is_imm16 (addr->inst_offset));
5242 ppc_stptr (code, ppc_r12, addr->inst_offset, addr->inst_basereg);
5244 tailcall_struct_index++;
5247 g_assert (ppc_is_imm16 (inst->inst_offset));
5248 code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r12, 0);
5249 /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
5251 g_assert_not_reached ();
5256 if (method->save_lmf) {
5257 if (cfg->compile_aot) {
5258 /* Compute the got address which is needed by the PLT entry */
5259 code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
5261 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5262 (gpointer)"mono_tls_get_lmf_addr");
5263 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
5264 ppc_load_func (code, PPC_CALL_REG, 0);
5265 ppc_mtlr (code, PPC_CALL_REG);
5270 /* we build the MonoLMF structure on the stack - see mini-ppc.h */
5271 /* lmf_offset is the offset from the previous stack pointer,
5272 * alloc_size is the total stack space allocated, so the offset
5273 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
5274 * The pointer to the struct is put in ppc_r12 (new_lmf).
5275 * The callee-saved registers are already in the MonoLMF structure
5277 ppc_addi (code, ppc_r12, ppc_sp, alloc_size - lmf_offset);
5278 /* ppc_r3 is the result from mono_get_lmf_addr () */
5279 ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
5280 /* new_lmf->previous_lmf = *lmf_addr */
5281 ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
5282 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
5283 /* *(lmf_addr) = r12 */
5284 ppc_stptr (code, ppc_r12, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
5285 /* save method info */
5286 if (cfg->compile_aot)
5288 ppc_load (code, ppc_r0, 0);
5290 ppc_load_ptr (code, ppc_r0, method);
5291 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r12);
5292 ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r12);
5293 /* save the current IP */
5294 if (cfg->compile_aot) {
5296 ppc_mflr (code, ppc_r0);
5298 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
5299 #ifdef __mono_ppc64__
5300 ppc_load_sequence (code, ppc_r0, (guint64)0x0101010101010101LL);
5302 ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L);
5305 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r12);
5309 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5311 cfg->code_len = code - cfg->native_code;
5312 g_assert (cfg->code_len <= cfg->code_size);
5319 mono_arch_emit_epilog (MonoCompile *cfg)
5321 MonoMethod *method = cfg->method;
5323 int max_epilog_size = 16 + 20*4;
5326 if (cfg->method->save_lmf)
5327 max_epilog_size += 128;
5329 if (mono_jit_trace_calls != NULL)
5330 max_epilog_size += 50;
5332 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5333 cfg->code_size *= 2;
5334 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5335 cfg->stat_code_reallocs++;
5339 * Keep in sync with OP_JMP
5341 code = cfg->native_code + cfg->code_len;
5343 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
5344 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5348 if (method->save_lmf) {
5350 pos += sizeof (MonoLMF);
5352 /* save the frame reg in r8 */
5353 ppc_mr (code, ppc_r8, cfg->frame_reg);
5354 ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage - lmf_offset);
5355 /* r5 = previous_lmf */
5356 ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
5358 ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
5359 /* *(lmf_addr) = previous_lmf */
5360 ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6);
5361 /* FIXME: speedup: there is no actual need to restore the registers if
5362 * we didn't actually change them (idea from Zoltan).
5365 ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r12);
5367 /*for (i = 14; i < 32; i++) {
5368 ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12);
5370 g_assert (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET));
5371 /* use the saved copy of the frame reg in r8 */
5372 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
5373 ppc_ldr (code, ppc_r0, cfg->stack_usage + PPC_RET_ADDR_OFFSET, ppc_r8);
5374 ppc_mtlr (code, ppc_r0);
5376 ppc_addic (code, ppc_sp, ppc_r8, cfg->stack_usage);
5378 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
5379 long return_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
5380 if (ppc_is_imm16 (return_offset)) {
5381 ppc_ldr (code, ppc_r0, return_offset, cfg->frame_reg);
5383 ppc_load (code, ppc_r12, return_offset);
5384 ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
5386 ppc_mtlr (code, ppc_r0);
5388 if (ppc_is_imm16 (cfg->stack_usage)) {
5389 int offset = cfg->stack_usage;
5390 for (i = 13; i <= 31; i++) {
5391 if (cfg->used_int_regs & (1 << i))
5392 offset -= sizeof (mgreg_t);
5394 if (cfg->frame_reg != ppc_sp)
5395 ppc_mr (code, ppc_r12, cfg->frame_reg);
5396 /* note r31 (possibly the frame register) is restored last */
5397 for (i = 13; i <= 31; i++) {
5398 if (cfg->used_int_regs & (1 << i)) {
5399 ppc_ldr (code, i, offset, cfg->frame_reg);
5400 offset += sizeof (mgreg_t);
5403 if (cfg->frame_reg != ppc_sp)
5404 ppc_addi (code, ppc_sp, ppc_r12, cfg->stack_usage);
5406 ppc_addi (code, ppc_sp, ppc_sp, cfg->stack_usage);
5408 ppc_load32 (code, ppc_r12, cfg->stack_usage);
5409 if (cfg->used_int_regs) {
5410 ppc_add (code, ppc_r12, cfg->frame_reg, ppc_r12);
5411 for (i = 31; i >= 13; --i) {
5412 if (cfg->used_int_regs & (1 << i)) {
5413 pos += sizeof (mgreg_t);
5414 ppc_ldr (code, i, -pos, ppc_r12);
5417 ppc_mr (code, ppc_sp, ppc_r12);
5419 ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r12);
5426 cfg->code_len = code - cfg->native_code;
5428 g_assert (cfg->code_len < cfg->code_size);
5431 #endif /* ifndef DISABLE_JIT */
5433 /* remove once throw_exception_by_name is eliminated */
5435 exception_id_by_name (const char *name)
5437 if (strcmp (name, "IndexOutOfRangeException") == 0)
5438 return MONO_EXC_INDEX_OUT_OF_RANGE;
5439 if (strcmp (name, "OverflowException") == 0)
5440 return MONO_EXC_OVERFLOW;
5441 if (strcmp (name, "ArithmeticException") == 0)
5442 return MONO_EXC_ARITHMETIC;
5443 if (strcmp (name, "DivideByZeroException") == 0)
5444 return MONO_EXC_DIVIDE_BY_ZERO;
5445 if (strcmp (name, "InvalidCastException") == 0)
5446 return MONO_EXC_INVALID_CAST;
5447 if (strcmp (name, "NullReferenceException") == 0)
5448 return MONO_EXC_NULL_REF;
5449 if (strcmp (name, "ArrayTypeMismatchException") == 0)
5450 return MONO_EXC_ARRAY_TYPE_MISMATCH;
5451 if (strcmp (name, "ArgumentException") == 0)
5452 return MONO_EXC_ARGUMENT;
5453 g_error ("Unknown intrinsic exception %s\n", name);
5459 mono_arch_emit_exceptions (MonoCompile *cfg)
5461 MonoJumpInfo *patch_info;
5464 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
5465 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
5466 int max_epilog_size = 50;
5468 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
5469 exc_throw_pos [i] = NULL;
5470 exc_throw_found [i] = 0;
5473 /* count the number of exception infos */
5476 * make sure we have enough space for exceptions
5478 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5479 if (patch_info->type == MONO_PATCH_INFO_EXC) {
5480 i = exception_id_by_name (patch_info->data.target);
5481 if (!exc_throw_found [i]) {
5482 max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4;
5483 exc_throw_found [i] = TRUE;
5485 } else if (patch_info->type == MONO_PATCH_INFO_BB_OVF)
5486 max_epilog_size += 12;
5487 else if (patch_info->type == MONO_PATCH_INFO_EXC_OVF) {
5488 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5489 i = exception_id_by_name (ovfj->data.exception);
5490 if (!exc_throw_found [i]) {
5491 max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4;
5492 exc_throw_found [i] = TRUE;
5494 max_epilog_size += 8;
5498 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5499 cfg->code_size *= 2;
5500 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5501 cfg->stat_code_reallocs++;
5504 code = cfg->native_code + cfg->code_len;
5506 /* add code to raise exceptions */
5507 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5508 switch (patch_info->type) {
5509 case MONO_PATCH_INFO_BB_OVF: {
5510 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5511 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5512 /* patch the initial jump */
5513 ppc_patch (ip, code);
5514 ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 2);
5516 ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */
5517 /* jump back to the true target */
5519 ip = ovfj->data.bb->native_offset + cfg->native_code;
5520 ppc_patch (code - 4, ip);
5521 patch_info->type = MONO_PATCH_INFO_NONE;
5524 case MONO_PATCH_INFO_EXC_OVF: {
5525 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5526 MonoJumpInfo *newji;
5527 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5528 unsigned char *bcl = code;
5529 /* patch the initial jump: we arrived here with a call */
5530 ppc_patch (ip, code);
5531 ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 0);
5533 ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */
5534 /* patch the conditional jump to the right handler */
5535 /* make it processed next */
5536 newji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
5537 newji->type = MONO_PATCH_INFO_EXC;
5538 newji->ip.i = bcl - cfg->native_code;
5539 newji->data.target = ovfj->data.exception;
5540 newji->next = patch_info->next;
5541 patch_info->next = newji;
5542 patch_info->type = MONO_PATCH_INFO_NONE;
5545 case MONO_PATCH_INFO_EXC: {
5546 MonoClass *exc_class;
5548 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5549 i = exception_id_by_name (patch_info->data.target);
5550 if (exc_throw_pos [i] && !(ip > exc_throw_pos [i] && ip - exc_throw_pos [i] > 50000)) {
5551 ppc_patch (ip, exc_throw_pos [i]);
5552 patch_info->type = MONO_PATCH_INFO_NONE;
5555 exc_throw_pos [i] = code;
5558 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5560 ppc_patch (ip, code);
5561 /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
5562 ppc_load (code, ppc_r3, exc_class->type_token);
5563 /* we got here from a conditional call, so the calling ip is set in lr */
5564 ppc_mflr (code, ppc_r4);
5565 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5566 patch_info->data.name = "mono_arch_throw_corlib_exception";
5567 patch_info->ip.i = code - cfg->native_code;
5568 if (FORCE_INDIR_CALL || cfg->method->dynamic) {
5569 ppc_load_func (code, PPC_CALL_REG, 0);
5570 ppc_mtctr (code, PPC_CALL_REG);
5571 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5583 cfg->code_len = code - cfg->native_code;
5585 g_assert (cfg->code_len <= cfg->code_size);
5591 try_offset_access (void *value, guint32 idx)
5593 register void* me __asm__ ("r2");
5594 void ***p = (void***)((char*)me + 284);
5595 int idx1 = idx / 32;
5596 int idx2 = idx % 32;
5599 if (value != p[idx1][idx2])
5606 mono_arch_finish_init (void)
5611 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5615 #define CMP_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
5617 #define LOADSTORE_SIZE 4
5618 #define JUMP_IMM_SIZE 12
5619 #define JUMP_IMM32_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 8)
5620 #define ENABLE_WRONG_METHOD_CHECK 0
5623 * LOCKING: called with the domain lock held
5626 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5627 gpointer fail_tramp)
5631 guint8 *code, *start;
5633 for (i = 0; i < count; ++i) {
5634 MonoIMTCheckItem *item = imt_entries [i];
5635 if (item->is_equals) {
5636 if (item->check_target_idx) {
5637 if (!item->compare_done)
5638 item->chunk_size += CMP_SIZE;
5639 if (item->has_target_code)
5640 item->chunk_size += BR_SIZE + JUMP_IMM32_SIZE;
5642 item->chunk_size += LOADSTORE_SIZE + BR_SIZE + JUMP_IMM_SIZE;
5645 item->chunk_size += CMP_SIZE + BR_SIZE + JUMP_IMM32_SIZE * 2;
5646 if (!item->has_target_code)
5647 item->chunk_size += LOADSTORE_SIZE;
5649 item->chunk_size += LOADSTORE_SIZE + JUMP_IMM_SIZE;
5650 #if ENABLE_WRONG_METHOD_CHECK
5651 item->chunk_size += CMP_SIZE + BR_SIZE + 4;
5656 item->chunk_size += CMP_SIZE + BR_SIZE;
5657 imt_entries [item->check_target_idx]->compare_done = TRUE;
5659 size += item->chunk_size;
5661 /* the initial load of the vtable address */
5662 size += PPC_LOAD_SEQUENCE_LENGTH + LOADSTORE_SIZE;
5664 code = mono_method_alloc_generic_virtual_trampoline (domain, size);
5666 code = mono_domain_code_reserve (domain, size);
5671 * We need to save and restore r12 because it might be
5672 * used by the caller as the vtable register, so
5673 * clobbering it will trip up the magic trampoline.
5675 * FIXME: Get rid of this by making sure that r12 is
5676 * not used as the vtable register in interface calls.
5678 ppc_stptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
5679 ppc_load (code, ppc_r12, (gsize)(& (vtable->vtable [0])));
5681 for (i = 0; i < count; ++i) {
5682 MonoIMTCheckItem *item = imt_entries [i];
5683 item->code_target = code;
5684 if (item->is_equals) {
5685 if (item->check_target_idx) {
5686 if (!item->compare_done) {
5687 ppc_load (code, ppc_r0, (gsize)item->key);
5688 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5690 item->jmp_code = code;
5691 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5692 if (item->has_target_code) {
5693 ppc_load_ptr (code, ppc_r0, item->value.target_code);
5695 ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
5696 ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
5698 ppc_mtctr (code, ppc_r0);
5699 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5702 ppc_load (code, ppc_r0, (gulong)item->key);
5703 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5704 item->jmp_code = code;
5705 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5706 if (item->has_target_code) {
5707 ppc_load_ptr (code, ppc_r0, item->value.target_code);
5710 ppc_load_ptr (code, ppc_r0, & (vtable->vtable [item->value.vtable_slot]));
5711 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
5713 ppc_mtctr (code, ppc_r0);
5714 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5715 ppc_patch (item->jmp_code, code);
5716 ppc_load_ptr (code, ppc_r0, fail_tramp);
5717 ppc_mtctr (code, ppc_r0);
5718 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5719 item->jmp_code = NULL;
5721 /* enable the commented code to assert on wrong method */
5722 #if ENABLE_WRONG_METHOD_CHECK
5723 ppc_load (code, ppc_r0, (guint32)item->key);
5724 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5725 item->jmp_code = code;
5726 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5728 ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
5729 ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
5730 ppc_mtctr (code, ppc_r0);
5731 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5732 #if ENABLE_WRONG_METHOD_CHECK
5733 ppc_patch (item->jmp_code, code);
5735 item->jmp_code = NULL;
5740 ppc_load (code, ppc_r0, (gulong)item->key);
5741 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5742 item->jmp_code = code;
5743 ppc_bc (code, PPC_BR_FALSE, PPC_BR_LT, 0);
5746 /* patch the branches to get to the target items */
5747 for (i = 0; i < count; ++i) {
5748 MonoIMTCheckItem *item = imt_entries [i];
5749 if (item->jmp_code) {
5750 if (item->check_target_idx) {
5751 ppc_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5757 mono_stats.imt_trampolines_size += code - start;
5758 g_assert (code - start <= size);
5759 mono_arch_flush_icache (start, size);
5761 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);
5767 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5769 mgreg_t *r = (mgreg_t*)regs;
5771 return (MonoMethod*)(gsize) r [MONO_ARCH_IMT_REG];
5775 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5777 mgreg_t *r = (mgreg_t*)regs;
5779 return (MonoVTable*)(gsize) r [MONO_ARCH_RGCTX_REG];
5783 mono_arch_get_cie_program (void)
5787 mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ppc_r1, 0);
5793 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5800 mono_arch_print_tree (MonoInst *tree, int arity)
5806 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5809 return (mgreg_t)MONO_CONTEXT_GET_SP (ctx);
5811 return ctx->regs [reg];
5815 mono_arch_get_patch_offset (guint8 *code)
5821 * mono_aot_emit_load_got_addr:
5823 * Emit code to load the got address.
5824 * On PPC, the result is placed into r30.
5827 mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji)
5830 ppc_mflr (code, ppc_r30);
5832 mono_add_patch_info (cfg, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
5834 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
5835 /* arch_emit_got_address () patches this */
5836 #if defined(TARGET_POWERPC64)
5842 ppc_load32 (code, ppc_r0, 0);
5843 ppc_add (code, ppc_r30, ppc_r30, ppc_r0);
5850 * mono_ppc_emit_load_aotconst:
5852 * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
5853 * TARGET from the mscorlib GOT in full-aot code.
5854 * On PPC, the GOT address is assumed to be in r30, and the result is placed into
5858 mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target)
5860 /* Load the mscorlib got address */
5861 ppc_ldptr (code, ppc_r12, sizeof (gpointer), ppc_r30);
5862 *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
5863 /* arch_emit_got_access () patches this */
5864 ppc_load32 (code, ppc_r0, 0);
5865 ppc_ldptr_indexed (code, ppc_r12, ppc_r12, ppc_r0);
5870 /* Soft Debug support */
5871 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5878 * mono_arch_set_breakpoint:
5880 * See mini-amd64.c for docs.
5883 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5886 guint8 *orig_code = code;
5888 ppc_load_sequence (code, ppc_r12, (gsize)bp_trigger_page);
5889 ppc_ldptr (code, ppc_r12, 0, ppc_r12);
5891 g_assert (code - orig_code == BREAKPOINT_SIZE);
5893 mono_arch_flush_icache (orig_code, code - orig_code);
5897 * mono_arch_clear_breakpoint:
5899 * See mini-amd64.c for docs.
5902 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5907 for (i = 0; i < BREAKPOINT_SIZE / 4; ++i)
5910 mono_arch_flush_icache (ip, code - ip);
5914 * mono_arch_is_breakpoint_event:
5916 * See mini-amd64.c for docs.
5919 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5921 siginfo_t* sinfo = (siginfo_t*) info;
5922 /* Sometimes the address is off by 4 */
5923 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5930 * mono_arch_skip_breakpoint:
5932 * See mini-amd64.c for docs.
5935 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
5937 /* skip the ldptr */
5938 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5946 * mono_arch_start_single_stepping:
5948 * See mini-amd64.c for docs.
5951 mono_arch_start_single_stepping (void)
5953 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5957 * mono_arch_stop_single_stepping:
5959 * See mini-amd64.c for docs.
5962 mono_arch_stop_single_stepping (void)
5964 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5968 * mono_arch_is_single_step_event:
5970 * See mini-amd64.c for docs.
5973 mono_arch_is_single_step_event (void *info, void *sigctx)
5975 siginfo_t* sinfo = (siginfo_t*) info;
5976 /* Sometimes the address is off by 4 */
5977 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5984 * mono_arch_skip_single_step:
5986 * See mini-amd64.c for docs.
5989 mono_arch_skip_single_step (MonoContext *ctx)
5991 /* skip the ldptr */
5992 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5996 * mono_arch_create_seq_point_info:
5998 * See mini-amd64.c for docs.
6001 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
6008 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
6010 ext->lmf.previous_lmf = prev_lmf;
6011 /* Mark that this is a MonoLMFExt */
6012 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
6013 ext->lmf.ebp = (gssize)ext;
6019 mono_arch_opcode_supported (int opcode)
6022 case OP_ATOMIC_ADD_I4:
6023 case OP_ATOMIC_CAS_I4:
6024 #ifdef TARGET_POWERPC64
6025 case OP_ATOMIC_ADD_I8:
6026 case OP_ATOMIC_CAS_I8:
6036 // FIXME: To get the test case finally_block_ending_in_dead_bb to work properly we need to define the following
6037 // (in mini-ppc.h) and then implement the fuction mono_arch_create_handler_block_trampoline.
6038 // #define MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD 1
6041 mono_arch_create_handler_block_trampoline (void)