3 * PowerPC backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
8 * Andreas Faerber <andreas.faerber@web.de>
10 * (C) 2003 Ximian, Inc.
11 * (C) 2007-2008 Andreas Faerber
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/utils/mono-proclib.h>
20 #include <mono/utils/mono-mmap.h>
21 #include <mono/utils/mono-hwcap.h>
22 #include <mono/utils/unlocked.h>
25 #ifdef TARGET_POWERPC64
26 #include "cpu-ppc64.h"
33 #include <sys/sysctl.h>
39 #define FORCE_INDIR_CALL 1
50 /* cpu_hw_caps contains the flags defined below */
51 static int cpu_hw_caps = 0;
52 static int cachelinesize = 0;
53 static int cachelineinc = 0;
55 PPC_ICACHE_SNOOP = 1 << 0,
56 PPC_MULTIPLE_LS_UNITS = 1 << 1,
57 PPC_SMP_CAPABLE = 1 << 2,
60 PPC_MOVE_FPR_GPR = 1 << 5,
64 #define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
66 /* This mutex protects architecture specific caches */
67 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
68 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
69 static mono_mutex_t mini_arch_mutex;
71 int mono_exc_esp_offset = 0;
74 * The code generated for sequence points reads from this location, which is
75 * made read-only when single stepping is enabled.
77 static gpointer ss_trigger_page;
79 /* Enabled breakpoints read from this trigger page */
80 static gpointer bp_trigger_page;
82 #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \
84 MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \
85 inst->type = STACK_R8; \
87 inst->inst_p0 = (void*)(addr); \
88 mono_bblock_add_inst (cfg->cbb, inst); \
92 mono_arch_regname (int reg) {
93 static const char rnames[][4] = {
94 "r0", "sp", "r2", "r3", "r4",
95 "r5", "r6", "r7", "r8", "r9",
96 "r10", "r11", "r12", "r13", "r14",
97 "r15", "r16", "r17", "r18", "r19",
98 "r20", "r21", "r22", "r23", "r24",
99 "r25", "r26", "r27", "r28", "r29",
102 if (reg >= 0 && reg < 32)
108 mono_arch_fregname (int reg) {
109 static const char rnames[][4] = {
110 "f0", "f1", "f2", "f3", "f4",
111 "f5", "f6", "f7", "f8", "f9",
112 "f10", "f11", "f12", "f13", "f14",
113 "f15", "f16", "f17", "f18", "f19",
114 "f20", "f21", "f22", "f23", "f24",
115 "f25", "f26", "f27", "f28", "f29",
118 if (reg >= 0 && reg < 32)
123 /* this function overwrites r0, r11, r12 */
125 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
127 /* unrolled, use the counter in big */
128 if (size > sizeof (gpointer) * 5) {
129 long shifted = size / SIZEOF_VOID_P;
130 guint8 *copy_loop_start, *copy_loop_jump;
132 ppc_load (code, ppc_r0, shifted);
133 ppc_mtctr (code, ppc_r0);
134 //g_assert (sreg == ppc_r12);
135 ppc_addi (code, ppc_r11, dreg, (doffset - sizeof (gpointer)));
136 ppc_addi (code, ppc_r12, sreg, (soffset - sizeof (gpointer)));
137 copy_loop_start = code;
138 ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r12);
139 ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r11);
140 copy_loop_jump = code;
141 ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
142 ppc_patch (copy_loop_jump, copy_loop_start);
143 size -= shifted * sizeof (gpointer);
144 doffset = soffset = 0;
147 #ifdef __mono_ppc64__
148 /* the hardware has multiple load/store units and the move is long
149 enough to use more then one register, then use load/load/store/store
150 to execute 2 instructions per cycle. */
151 if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
153 ppc_ldptr (code, ppc_r0, soffset, sreg);
154 ppc_ldptr (code, ppc_r11, soffset+8, sreg);
155 ppc_stptr (code, ppc_r0, doffset, dreg);
156 ppc_stptr (code, ppc_r11, doffset+8, dreg);
163 ppc_ldr (code, ppc_r0, soffset, sreg);
164 ppc_str (code, ppc_r0, doffset, dreg);
170 if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
172 ppc_lwz (code, ppc_r0, soffset, sreg);
173 ppc_lwz (code, ppc_r11, soffset+4, sreg);
174 ppc_stw (code, ppc_r0, doffset, dreg);
175 ppc_stw (code, ppc_r11, doffset+4, dreg);
183 ppc_lwz (code, ppc_r0, soffset, sreg);
184 ppc_stw (code, ppc_r0, doffset, dreg);
190 ppc_lhz (code, ppc_r0, soffset, sreg);
191 ppc_sth (code, ppc_r0, doffset, dreg);
197 ppc_lbz (code, ppc_r0, soffset, sreg);
198 ppc_stb (code, ppc_r0, doffset, dreg);
207 * mono_arch_get_argument_info:
208 * @csig: a method signature
209 * @param_count: the number of parameters to consider
210 * @arg_info: an array to store the result infos
212 * Gathers information on parameters such as size, alignment and
213 * padding. arg_info should be large enought to hold param_count + 1 entries.
215 * Returns the size of the activation frame.
218 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
220 #ifdef __mono_ppc64__
224 int k, frame_size = 0;
225 int size, align, pad;
228 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
229 frame_size += sizeof (gpointer);
233 arg_info [0].offset = offset;
236 frame_size += sizeof (gpointer);
240 arg_info [0].size = frame_size;
242 for (k = 0; k < param_count; k++) {
245 size = mono_type_native_stack_size (csig->params [k], (guint32*)&align);
247 size = mini_type_stack_size (csig->params [k], &align);
249 /* ignore alignment for now */
252 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
253 arg_info [k].pad = pad;
255 arg_info [k + 1].pad = 0;
256 arg_info [k + 1].size = size;
258 arg_info [k + 1].offset = offset;
262 align = MONO_ARCH_FRAME_ALIGNMENT;
263 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
264 arg_info [k].pad = pad;
270 #ifdef __mono_ppc64__
272 is_load_sequence (guint32 *seq)
274 return ppc_opcode (seq [0]) == 15 && /* lis */
275 ppc_opcode (seq [1]) == 24 && /* ori */
276 ppc_opcode (seq [2]) == 30 && /* sldi */
277 ppc_opcode (seq [3]) == 25 && /* oris */
278 ppc_opcode (seq [4]) == 24; /* ori */
281 #define ppc_load_get_dest(l) (((l)>>21) & 0x1f)
282 #define ppc_load_get_off(l) ((gint16)((l) & 0xffff))
286 #define ppc_is_load_op(opcode) (ppc_opcode ((opcode)) == 58 || ppc_opcode ((opcode)) == 32)
288 /* code must point to the blrl */
290 mono_ppc_is_direct_call_sequence (guint32 *code)
292 #ifdef __mono_ppc64__
293 g_assert(*code == 0x4e800021 || *code == 0x4e800020 || *code == 0x4e800420);
295 /* the thunk-less direct call sequence: lis/ori/sldi/oris/ori/mtlr/blrl */
296 if (ppc_opcode (code [-1]) == 31) { /* mtlr */
297 if (ppc_is_load_op (code [-2]) && ppc_is_load_op (code [-3])) { /* ld/ld */
298 if (!is_load_sequence (&code [-8]))
300 /* one of the loads must be "ld r2,8(rX)" or "ld r2,4(rX) for ilp32 */
301 return (ppc_load_get_dest (code [-2]) == ppc_r2 && ppc_load_get_off (code [-2]) == sizeof (gpointer)) ||
302 (ppc_load_get_dest (code [-3]) == ppc_r2 && ppc_load_get_off (code [-3]) == sizeof (gpointer));
304 if (ppc_opcode (code [-2]) == 24 && ppc_opcode (code [-3]) == 31) /* mr/nop */
305 return is_load_sequence (&code [-8]);
307 return is_load_sequence (&code [-6]);
311 g_assert(*code == 0x4e800021);
313 /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */
314 return ppc_opcode (code [-1]) == 31 &&
315 ppc_opcode (code [-2]) == 24 &&
316 ppc_opcode (code [-3]) == 15;
320 #define MAX_ARCH_DELEGATE_PARAMS 7
323 get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count, gboolean aot)
325 guint8 *code, *start;
328 int size = MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE;
330 start = code = mono_global_codeman_reserve (size);
332 code = mono_ppc_create_pre_code_ftnptr (code);
334 /* Replace the this argument with the target */
335 ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
336 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
337 /* it's a function descriptor */
338 /* Can't use ldptr as it doesn't work with r0 */
339 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
341 ppc_mtctr (code, ppc_r0);
342 ppc_ldptr (code, ppc_r3, MONO_STRUCT_OFFSET (MonoDelegate, target), ppc_r3);
343 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
345 g_assert ((code - start) <= size);
347 mono_arch_flush_icache (start, size);
351 size = MONO_PPC_32_64_CASE (32, 32) + param_count * 4 + PPC_FTNPTR_SIZE;
352 start = code = mono_global_codeman_reserve (size);
354 code = mono_ppc_create_pre_code_ftnptr (code);
356 ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
357 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
358 /* it's a function descriptor */
359 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
361 ppc_mtctr (code, ppc_r0);
362 /* slide down the arguments */
363 for (i = 0; i < param_count; ++i) {
364 ppc_mr (code, (ppc_r3 + i), (ppc_r3 + i + 1));
366 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
368 g_assert ((code - start) <= size);
370 mono_arch_flush_icache (start, size);
374 *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL);
376 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
377 *info = mono_tramp_info_create (name, start, code - start, NULL, NULL);
385 mono_arch_get_delegate_invoke_impls (void)
391 get_delegate_invoke_impl (&info, TRUE, 0, TRUE);
392 res = g_slist_prepend (res, info);
394 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
395 get_delegate_invoke_impl (&info, FALSE, i, TRUE);
396 res = g_slist_prepend (res, info);
403 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
405 guint8 *code, *start;
407 /* FIXME: Support more cases */
408 if (MONO_TYPE_ISSTRUCT (sig->ret))
412 static guint8* cached = NULL;
418 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
421 start = get_delegate_invoke_impl (&info, TRUE, 0, FALSE);
422 mono_tramp_info_register (info, NULL);
424 mono_memory_barrier ();
428 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
431 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
433 for (i = 0; i < sig->param_count; ++i)
434 if (!mono_is_regsize_var (sig->params [i]))
438 code = cache [sig->param_count];
443 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
444 start = mono_aot_get_trampoline (name);
448 start = get_delegate_invoke_impl (&info, FALSE, sig->param_count, FALSE);
449 mono_tramp_info_register (info, NULL);
452 mono_memory_barrier ();
454 cache [sig->param_count] = start;
460 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
466 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
468 mgreg_t *r = (mgreg_t*)regs;
470 return (gpointer)(gsize)r [ppc_r3];
478 #define MAX_AUX_ENTRIES 128
480 * PPC_FEATURE_POWER4, PPC_FEATURE_POWER5, PPC_FEATURE_POWER5_PLUS, PPC_FEATURE_CELL,
481 * PPC_FEATURE_PA6T, PPC_FEATURE_ARCH_2_05 are considered supporting 2X ISA features
483 #define ISA_2X (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000 | 0x00000800 | 0x00001000)
485 /* define PPC_FEATURE_64 HWCAP for 64-bit category. */
486 #define ISA_64 0x40000000
488 /* define PPC_FEATURE_POWER6_EXT HWCAP for power6x mffgpr/mftgpr instructions. */
489 #define ISA_MOVE_FPR_GPR 0x00000200
491 * Initialize the cpu to execute managed code.
494 mono_arch_cpu_init (void)
499 * Initialize architecture specific code.
502 mono_arch_init (void)
504 #if defined(MONO_CROSS_COMPILE)
505 #elif defined(__APPLE__)
507 size_t len = sizeof (cachelinesize);
510 mib [1] = HW_CACHELINE;
512 if (sysctl (mib, 2, &cachelinesize, &len, NULL, 0) == -1) {
516 cachelineinc = cachelinesize;
518 #elif defined(__linux__)
519 AuxVec vec [MAX_AUX_ENTRIES];
520 int i, vec_entries = 0;
521 /* sadly this will work only with 2.6 kernels... */
522 FILE* f = fopen ("/proc/self/auxv", "rb");
525 vec_entries = fread (&vec, sizeof (AuxVec), MAX_AUX_ENTRIES, f);
529 for (i = 0; i < vec_entries; i++) {
530 int type = vec [i].type;
532 if (type == 19) { /* AT_DCACHEBSIZE */
533 cachelinesize = vec [i].value;
537 #elif defined(G_COMPILER_CODEWARRIOR)
541 //#error Need a way to get cache line size
544 if (mono_hwcap_ppc_has_icache_snoop)
545 cpu_hw_caps |= PPC_ICACHE_SNOOP;
547 if (mono_hwcap_ppc_is_isa_2x)
548 cpu_hw_caps |= PPC_ISA_2X;
550 if (mono_hwcap_ppc_is_isa_64)
551 cpu_hw_caps |= PPC_ISA_64;
553 if (mono_hwcap_ppc_has_move_fpr_gpr)
554 cpu_hw_caps |= PPC_MOVE_FPR_GPR;
556 if (mono_hwcap_ppc_has_multiple_ls_units)
557 cpu_hw_caps |= PPC_MULTIPLE_LS_UNITS;
563 cachelineinc = cachelinesize;
565 if (mono_cpu_count () > 1)
566 cpu_hw_caps |= PPC_SMP_CAPABLE;
568 mono_os_mutex_init_recursive (&mini_arch_mutex);
570 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
571 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
572 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
574 mono_aot_register_jit_icall ("mono_ppc_throw_exception", mono_ppc_throw_exception);
576 // FIXME: Fix partial sharing for power and remove this
577 mono_set_partial_sharing_supported (FALSE);
581 * Cleanup architecture specific code.
584 mono_arch_cleanup (void)
586 mono_os_mutex_destroy (&mini_arch_mutex);
590 mono_arch_have_fast_tls (void)
596 * This function returns the optimizations supported on this cpu.
599 mono_arch_cpu_optimizations (guint32 *exclude_mask)
603 /* no ppc-specific optimizations yet */
609 * This function test for all SIMD functions supported.
611 * Returns a bitmask corresponding to all supported versions.
615 mono_arch_cpu_enumerate_simd_versions (void)
617 /* SIMD is currently unimplemented */
621 #ifdef __mono_ppc64__
622 #define CASE_PPC32(c)
623 #define CASE_PPC64(c) case c:
625 #define CASE_PPC32(c) case c:
626 #define CASE_PPC64(c)
630 is_regsize_var (MonoType *t) {
633 t = mini_get_underlying_type (t);
637 CASE_PPC64 (MONO_TYPE_I8)
638 CASE_PPC64 (MONO_TYPE_U8)
642 case MONO_TYPE_FNPTR:
644 case MONO_TYPE_OBJECT:
645 case MONO_TYPE_STRING:
646 case MONO_TYPE_CLASS:
647 case MONO_TYPE_SZARRAY:
648 case MONO_TYPE_ARRAY:
650 case MONO_TYPE_GENERICINST:
651 if (!mono_type_generic_inst_is_valuetype (t))
654 case MONO_TYPE_VALUETYPE:
662 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
667 for (i = 0; i < cfg->num_varinfo; i++) {
668 MonoInst *ins = cfg->varinfo [i];
669 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
672 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
675 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
678 /* we can only allocate 32 bit values */
679 if (is_regsize_var (ins->inst_vtype)) {
680 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
681 g_assert (i == vmv->idx);
682 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
688 #endif /* ifndef DISABLE_JIT */
691 mono_arch_get_global_int_regs (MonoCompile *cfg)
695 if (cfg->frame_reg != ppc_sp)
697 /* ppc_r13 is used by the system on PPC EABI */
698 for (i = 14; i < top; ++i) {
700 * Reserve r29 for holding the vtable address for virtual calls in AOT mode,
701 * since the trampolines can clobber r12.
703 if (!(cfg->compile_aot && i == 29))
704 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
711 * mono_arch_regalloc_cost:
713 * Return the cost, in number of memory references, of the action of
714 * allocating the variable VMV into a register during global register
718 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
725 mono_arch_flush_icache (guint8 *code, gint size)
727 #ifdef MONO_CROSS_COMPILE
730 guint8 *endp, *start;
734 start = (guint8*)((gsize)start & ~(cachelinesize - 1));
735 /* use dcbf for smp support, later optimize for UP, see pem._64bit.d20030611.pdf page 211 */
736 #if defined(G_COMPILER_CODEWARRIOR)
737 if (cpu_hw_caps & PPC_SMP_CAPABLE) {
738 for (p = start; p < endp; p += cachelineinc) {
742 for (p = start; p < endp; p += cachelineinc) {
748 for (p = start; p < endp; p += cachelineinc) {
759 /* For POWER5/6 with ICACHE_SNOOPing only one icbi in the range is required.
760 * The sync is required to insure that the store queue is completely empty.
761 * While the icbi performs no cache operations, icbi/isync is required to
762 * kill local prefetch.
764 if (cpu_hw_caps & PPC_ICACHE_SNOOP) {
766 asm ("icbi 0,%0;" : : "r"(code) : "memory");
770 /* use dcbf for smp support, see pem._64bit.d20030611.pdf page 211 */
771 if (cpu_hw_caps & PPC_SMP_CAPABLE) {
772 for (p = start; p < endp; p += cachelineinc) {
773 asm ("dcbf 0,%0;" : : "r"(p) : "memory");
776 for (p = start; p < endp; p += cachelineinc) {
777 asm ("dcbst 0,%0;" : : "r"(p) : "memory");
782 for (p = start; p < endp; p += cachelineinc) {
783 /* for ISA2.0+ implementations we should not need any extra sync between the
784 * icbi instructions. Both the 2.0 PEM and the PowerISA-2.05 say this.
785 * So I am not sure which chip had this problem but its not an issue on
786 * of the ISA V2 chips.
788 if (cpu_hw_caps & PPC_ISA_2X)
789 asm ("icbi 0,%0;" : : "r"(p) : "memory");
791 asm ("icbi 0,%0; sync;" : : "r"(p) : "memory");
793 if (!(cpu_hw_caps & PPC_ISA_2X))
801 mono_arch_flush_register_windows (void)
806 #define ALWAYS_ON_STACK(s) s
807 #define FP_ALSO_IN_REG(s) s
809 #ifdef __mono_ppc64__
810 #define ALWAYS_ON_STACK(s) s
811 #define FP_ALSO_IN_REG(s) s
813 #define ALWAYS_ON_STACK(s)
814 #define FP_ALSO_IN_REG(s)
816 #define ALIGN_DOUBLES
825 RegTypeFPStructByVal, // For the v2 ABI, floats should be passed in FRs instead of GRs. Only valid for ABI v2!
830 guint32 vtsize; /* in param area */
832 guint8 vtregs; /* number of registers used to pass a RegTypeStructByVal/RegTypeFPStructByVal */
833 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
834 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal/RegTypeFPStructByVal */
835 guint8 bytes : 4; /* size in bytes - only valid for
836 RegTypeStructByVal/RegTypeFPStructByVal if the struct fits
837 in one word, otherwise it's 0*/
846 gboolean vtype_retaddr;
854 #if PPC_RETURN_SMALL_FLOAT_STRUCTS_IN_FR_REGS
856 // Test if a structure is completely composed of either float XOR double fields and has fewer than
857 // PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTER members.
858 // If this is true the structure can be returned directly via float registers instead of by a hidden parameter
859 // pointing to where the return value should be stored.
860 // This is as per the ELF ABI v2.
863 is_float_struct_returnable_via_regs (MonoType *type, int* member_cnt, int* member_size)
865 int local_member_cnt, local_member_size;
867 member_cnt = &local_member_cnt;
870 member_size = &local_member_size;
873 gboolean is_all_floats = mini_type_is_hfa(type, member_cnt, member_size);
874 return is_all_floats && (*member_cnt <= PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTERS);
878 #define is_float_struct_returnable_via_regs(a,b,c) (FALSE)
882 #if PPC_RETURN_SMALL_STRUCTS_IN_REGS
884 // Test if a structure is smaller in size than 2 doublewords (PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS) and is
885 // completely composed of fields all of basic types.
886 // If this is true the structure can be returned directly via registers r3/r4 instead of by a hidden parameter
887 // pointing to where the return value should be stored.
888 // This is as per the ELF ABI v2.
891 is_struct_returnable_via_regs (MonoClass *klass, gboolean is_pinvoke)
893 gboolean has_a_field = FALSE;
896 gpointer iter = NULL;
899 size = mono_type_native_stack_size (&klass->byval_arg, 0);
901 size = mini_type_stack_size (&klass->byval_arg, 0);
904 if (size > PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS)
906 while ((f = mono_class_get_fields (klass, &iter))) {
907 if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
908 // TBD: Is there a better way to check for the basic types?
909 if (f->type->byref) {
911 } else if ((f->type->type >= MONO_TYPE_BOOLEAN) && (f->type->type <= MONO_TYPE_R8)) {
913 } else if (MONO_TYPE_ISSTRUCT (f->type)) {
914 MonoClass *klass = mono_class_from_mono_type (f->type);
915 if (is_struct_returnable_via_regs(klass, is_pinvoke)) {
930 #define is_struct_returnable_via_regs(a,b) (FALSE)
935 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
937 #ifdef __mono_ppc64__
942 if (*gr >= 3 + PPC_NUM_REG_ARGS) {
943 ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size;
944 ainfo->reg = ppc_sp; /* in the caller */
945 ainfo->regtype = RegTypeBase;
946 *stack_size += sizeof (gpointer);
948 ALWAYS_ON_STACK (*stack_size += sizeof (gpointer));
952 if (*gr >= 3 + PPC_NUM_REG_ARGS - 1) {
954 //*stack_size += (*stack_size % 8);
956 ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size;
957 ainfo->reg = ppc_sp; /* in the caller */
958 ainfo->regtype = RegTypeBase;
965 ALWAYS_ON_STACK (*stack_size += 8);
973 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
975 has_only_a_r48_field (MonoClass *klass)
979 gboolean have_field = FALSE;
981 while ((f = mono_class_get_fields (klass, &iter))) {
982 if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
985 if (!f->type->byref && (f->type->type == MONO_TYPE_R4 || f->type->type == MONO_TYPE_R8))
996 get_call_info (MonoMethodSignature *sig)
998 guint i, fr, gr, pstart;
999 int n = sig->hasthis + sig->param_count;
1000 MonoType *simpletype;
1001 guint32 stack_size = 0;
1002 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
1003 gboolean is_pinvoke = sig->pinvoke;
1005 fr = PPC_FIRST_FPARG_REG;
1006 gr = PPC_FIRST_ARG_REG;
1008 /* FIXME: handle returning a struct */
1009 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1010 cinfo->vtype_retaddr = TRUE;
1016 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1017 * the first argument, allowing 'this' to be always passed in the first arg reg.
1018 * Also do this if the first argument is a reference type, since virtual calls
1019 * are sometimes made using calli without sig->hasthis set, like in the delegate
1022 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
1024 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1027 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1031 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1032 cinfo->struct_ret = cinfo->ret.reg;
1033 cinfo->vret_arg_index = 1;
1037 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1041 if (cinfo->vtype_retaddr) {
1042 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1043 cinfo->struct_ret = cinfo->ret.reg;
1047 DEBUG(printf("params: %d\n", sig->param_count));
1048 for (i = pstart; i < sig->param_count; ++i) {
1049 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1050 /* Prevent implicit arguments and sig_cookie from
1051 being passed in registers */
1052 gr = PPC_LAST_ARG_REG + 1;
1053 /* FIXME: don't we have to set fr, too? */
1054 /* Emit the signature cookie just before the implicit arguments */
1055 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1057 DEBUG(printf("param %d: ", i));
1058 if (sig->params [i]->byref) {
1059 DEBUG(printf("byref\n"));
1060 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1064 simpletype = mini_get_underlying_type (sig->params [i]);
1065 switch (simpletype->type) {
1066 case MONO_TYPE_BOOLEAN:
1069 cinfo->args [n].size = 1;
1070 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1073 case MONO_TYPE_CHAR:
1076 cinfo->args [n].size = 2;
1077 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1082 cinfo->args [n].size = 4;
1083 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1089 case MONO_TYPE_FNPTR:
1090 case MONO_TYPE_CLASS:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_STRING:
1093 case MONO_TYPE_SZARRAY:
1094 case MONO_TYPE_ARRAY:
1095 cinfo->args [n].size = sizeof (gpointer);
1096 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1099 case MONO_TYPE_GENERICINST:
1100 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1101 cinfo->args [n].size = sizeof (gpointer);
1102 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1107 case MONO_TYPE_VALUETYPE:
1108 case MONO_TYPE_TYPEDBYREF: {
1110 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1111 if (simpletype->type == MONO_TYPE_TYPEDBYREF)
1112 size = sizeof (MonoTypedRef);
1113 else if (is_pinvoke)
1114 size = mono_class_native_size (klass, NULL);
1116 size = mono_class_value_size (klass, NULL);
1118 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
1119 if ((size == 4 || size == 8) && has_only_a_r48_field (klass)) {
1120 cinfo->args [n].size = size;
1122 /* It was 7, now it is 8 in LinuxPPC */
1123 if (fr <= PPC_LAST_FPARG_REG) {
1124 cinfo->args [n].regtype = RegTypeFP;
1125 cinfo->args [n].reg = fr;
1127 FP_ALSO_IN_REG (gr ++);
1128 #if !defined(__mono_ppc64__)
1130 FP_ALSO_IN_REG (gr ++);
1132 ALWAYS_ON_STACK (stack_size += size);
1134 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1135 cinfo->args [n].regtype = RegTypeBase;
1136 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1143 DEBUG(printf ("load %d bytes struct\n",
1144 mono_class_native_size (sig->params [i]->data.klass, NULL)));
1146 #if PPC_PASS_STRUCTS_BY_VALUE
1148 int align_size = size;
1150 int rest = PPC_LAST_ARG_REG - gr + 1;
1153 #if PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS
1156 gboolean is_all_floats = is_float_struct_returnable_via_regs (sig->params [i], &mbr_cnt, &mbr_size);
1158 if (is_all_floats) {
1159 rest = PPC_LAST_FPARG_REG - fr + 1;
1161 // Pass small (<= 8 member) structures entirely made up of either float or double members
1162 // in FR registers. There have to be at least mbr_cnt registers left.
1163 if (is_all_floats &&
1164 (rest >= mbr_cnt)) {
1166 n_in_regs = MIN (rest, nregs);
1167 cinfo->args [n].regtype = RegTypeFPStructByVal;
1168 cinfo->args [n].vtregs = n_in_regs;
1169 cinfo->args [n].size = mbr_size;
1170 cinfo->args [n].vtsize = nregs - n_in_regs;
1171 cinfo->args [n].reg = fr;
1173 if (mbr_size == 4) {
1175 FP_ALSO_IN_REG (gr += (n_in_regs+1)/2);
1178 FP_ALSO_IN_REG (gr += (n_in_regs));
1183 align_size += (sizeof (gpointer) - 1);
1184 align_size &= ~(sizeof (gpointer) - 1);
1185 nregs = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1186 n_in_regs = MIN (rest, nregs);
1190 /* FIXME: check this */
1191 if (size >= 3 && size % 4 != 0)
1194 cinfo->args [n].regtype = RegTypeStructByVal;
1195 cinfo->args [n].vtregs = n_in_regs;
1196 cinfo->args [n].size = n_in_regs;
1197 cinfo->args [n].vtsize = nregs - n_in_regs;
1198 cinfo->args [n].reg = gr;
1202 #ifdef __mono_ppc64__
1203 if (nregs == 1 && is_pinvoke)
1204 cinfo->args [n].bytes = size;
1207 cinfo->args [n].bytes = 0;
1208 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1209 /*g_print ("offset for arg %d at %d\n", n, PPC_STACK_PARAM_OFFSET + stack_size);*/
1210 stack_size += nregs * sizeof (gpointer);
1213 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1214 cinfo->args [n].regtype = RegTypeStructByAddr;
1215 cinfo->args [n].vtsize = size;
1222 cinfo->args [n].size = 8;
1223 add_general (&gr, &stack_size, cinfo->args + n, SIZEOF_REGISTER == 8);
1227 cinfo->args [n].size = 4;
1229 /* It was 7, now it is 8 in LinuxPPC */
1230 if (fr <= PPC_LAST_FPARG_REG
1231 // For non-native vararg calls the parms must go in storage
1232 && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1234 cinfo->args [n].regtype = RegTypeFP;
1235 cinfo->args [n].reg = fr;
1237 FP_ALSO_IN_REG (gr ++);
1238 ALWAYS_ON_STACK (stack_size += SIZEOF_REGISTER);
1240 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size + MONO_PPC_32_64_CASE (0, 4);
1241 cinfo->args [n].regtype = RegTypeBase;
1242 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1243 stack_size += SIZEOF_REGISTER;
1248 cinfo->args [n].size = 8;
1249 /* It was 7, now it is 8 in LinuxPPC */
1250 if (fr <= PPC_LAST_FPARG_REG
1251 // For non-native vararg calls the parms must go in storage
1252 && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1254 cinfo->args [n].regtype = RegTypeFP;
1255 cinfo->args [n].reg = fr;
1257 FP_ALSO_IN_REG (gr += sizeof (double) / SIZEOF_REGISTER);
1258 ALWAYS_ON_STACK (stack_size += 8);
1260 cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size;
1261 cinfo->args [n].regtype = RegTypeBase;
1262 cinfo->args [n].reg = ppc_sp; /* in the caller*/
1268 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1273 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1274 /* Prevent implicit arguments and sig_cookie from
1275 being passed in registers */
1276 gr = PPC_LAST_ARG_REG + 1;
1277 /* Emit the signature cookie just before the implicit arguments */
1278 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1282 simpletype = mini_get_underlying_type (sig->ret);
1283 switch (simpletype->type) {
1284 case MONO_TYPE_BOOLEAN:
1289 case MONO_TYPE_CHAR:
1295 case MONO_TYPE_FNPTR:
1296 case MONO_TYPE_CLASS:
1297 case MONO_TYPE_OBJECT:
1298 case MONO_TYPE_SZARRAY:
1299 case MONO_TYPE_ARRAY:
1300 case MONO_TYPE_STRING:
1301 cinfo->ret.reg = ppc_r3;
1305 cinfo->ret.reg = ppc_r3;
1309 cinfo->ret.reg = ppc_f1;
1310 cinfo->ret.regtype = RegTypeFP;
1312 case MONO_TYPE_GENERICINST:
1313 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1314 cinfo->ret.reg = ppc_r3;
1318 case MONO_TYPE_VALUETYPE:
1320 case MONO_TYPE_TYPEDBYREF:
1321 case MONO_TYPE_VOID:
1324 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1328 /* align stack size to 16 */
1329 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1330 stack_size = (stack_size + 15) & ~15;
1332 cinfo->stack_usage = stack_size;
1337 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1343 c1 = get_call_info (caller_sig);
1344 c2 = get_call_info (callee_sig);
1345 res = c1->stack_usage >= c2->stack_usage;
1346 if (callee_sig->ret && MONO_TYPE_ISSTRUCT (callee_sig->ret))
1347 /* An address on the callee's stack is passed as the first argument */
1349 for (i = 0; i < c2->nargs; ++i) {
1350 if (c2->args [i].regtype == RegTypeStructByAddr)
1351 /* An address on the callee's stack is passed as the argument */
1356 if (!mono_debug_count ())
1367 * Set var information according to the calling convention. ppc version.
1368 * The locals var stuff should most likely be split in another method.
1371 mono_arch_allocate_vars (MonoCompile *m)
1373 MonoMethodSignature *sig;
1374 MonoMethodHeader *header;
1376 int i, offset, size, align, curinst;
1377 int frame_reg = ppc_sp;
1379 guint32 locals_stack_size, locals_stack_align;
1381 m->flags |= MONO_CFG_HAS_SPILLUP;
1383 /* allow room for the vararg method args: void* and long/double */
1384 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
1385 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1386 /* this is bug #60332: remove when #59509 is fixed, so no weird vararg
1387 * call convs needs to be handled this way.
1389 if (m->flags & MONO_CFG_HAS_VARARGS)
1390 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1391 /* gtk-sharp and other broken code will dllimport vararg functions even with
1392 * non-varargs signatures. Since there is little hope people will get this right
1393 * we assume they won't.
1395 if (m->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)
1396 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
1401 * We use the frame register also for any method that has
1402 * exception clauses. This way, when the handlers are called,
1403 * the code will reference local variables using the frame reg instead of
1404 * the stack pointer: if we had to restore the stack pointer, we'd
1405 * corrupt the method frames that are already on the stack (since
1406 * filters get called before stack unwinding happens) when the filter
1407 * code would call any method (this also applies to finally etc.).
1409 if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1410 frame_reg = ppc_r31;
1411 m->frame_reg = frame_reg;
1412 if (frame_reg != ppc_sp) {
1413 m->used_int_regs |= 1 << frame_reg;
1416 sig = mono_method_signature (m->method);
1420 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1421 m->ret->opcode = OP_REGVAR;
1422 m->ret->inst_c0 = m->ret->dreg = ppc_r3;
1424 /* FIXME: handle long values? */
1425 switch (mini_get_underlying_type (sig->ret)->type) {
1426 case MONO_TYPE_VOID:
1430 m->ret->opcode = OP_REGVAR;
1431 m->ret->inst_c0 = m->ret->dreg = ppc_f1;
1434 m->ret->opcode = OP_REGVAR;
1435 m->ret->inst_c0 = m->ret->dreg = ppc_r3;
1439 /* local vars are at a positive offset from the stack pointer */
1441 * also note that if the function uses alloca, we use ppc_r31
1442 * to point at the local variables.
1444 offset = PPC_MINIMAL_STACK_SIZE; /* linkage area */
1445 /* align the offset to 16 bytes: not sure this is needed here */
1447 //offset &= ~(16 - 1);
1449 /* add parameter area size for called functions */
1450 offset += m->param_area;
1452 offset &= ~(16 - 1);
1454 /* allow room to save the return value */
1455 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
1458 /* the MonoLMF structure is stored just below the stack pointer */
1461 /* this stuff should not be needed on ppc and the new jit,
1462 * because a call on ppc to the handlers doesn't change the
1463 * stack pointer and the jist doesn't manipulate the stack pointer
1464 * for operations involving valuetypes.
1466 /* reserve space to store the esp */
1467 offset += sizeof (gpointer);
1469 /* this is a global constant */
1470 mono_exc_esp_offset = offset;
1473 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1474 offset += sizeof(gpointer) - 1;
1475 offset &= ~(sizeof(gpointer) - 1);
1477 m->vret_addr->opcode = OP_REGOFFSET;
1478 m->vret_addr->inst_basereg = frame_reg;
1479 m->vret_addr->inst_offset = offset;
1481 if (G_UNLIKELY (m->verbose_level > 1)) {
1482 printf ("vret_addr =");
1483 mono_print_ins (m->vret_addr);
1486 offset += sizeof(gpointer);
1489 offsets = mono_allocate_stack_slots (m, FALSE, &locals_stack_size, &locals_stack_align);
1490 if (locals_stack_align) {
1491 offset += (locals_stack_align - 1);
1492 offset &= ~(locals_stack_align - 1);
1494 for (i = m->locals_start; i < m->num_varinfo; i++) {
1495 if (offsets [i] != -1) {
1496 MonoInst *inst = m->varinfo [i];
1497 inst->opcode = OP_REGOFFSET;
1498 inst->inst_basereg = frame_reg;
1499 inst->inst_offset = offset + offsets [i];
1501 g_print ("allocating local %d (%s) to %d\n",
1502 i, mono_type_get_name (inst->inst_vtype), inst->inst_offset);
1506 offset += locals_stack_size;
1510 inst = m->args [curinst];
1511 if (inst->opcode != OP_REGVAR) {
1512 inst->opcode = OP_REGOFFSET;
1513 inst->inst_basereg = frame_reg;
1514 offset += sizeof (gpointer) - 1;
1515 offset &= ~(sizeof (gpointer) - 1);
1516 inst->inst_offset = offset;
1517 offset += sizeof (gpointer);
1522 for (i = 0; i < sig->param_count; ++i) {
1523 inst = m->args [curinst];
1524 if (inst->opcode != OP_REGVAR) {
1525 inst->opcode = OP_REGOFFSET;
1526 inst->inst_basereg = frame_reg;
1528 size = mono_type_native_stack_size (sig->params [i], (guint32*)&align);
1529 inst->backend.is_pinvoke = 1;
1531 size = mono_type_size (sig->params [i], &align);
1533 if (MONO_TYPE_ISSTRUCT (sig->params [i]) && size < sizeof (gpointer))
1534 size = align = sizeof (gpointer);
1536 * Use at least 4/8 byte alignment, since these might be passed in registers, and
1537 * they are saved using std in the prolog.
1539 align = sizeof (gpointer);
1540 offset += align - 1;
1541 offset &= ~(align - 1);
1542 inst->inst_offset = offset;
1548 /* some storage for fp conversions */
1551 m->arch.fp_conv_var_offset = offset;
1554 /* align the offset to 16 bytes */
1556 offset &= ~(16 - 1);
1559 m->stack_offset = offset;
1561 if (sig->call_convention == MONO_CALL_VARARG) {
1562 CallInfo *cinfo = get_call_info (m->method->signature);
1564 m->sig_cookie = cinfo->sig_cookie.offset;
1571 mono_arch_create_vars (MonoCompile *cfg)
1573 MonoMethodSignature *sig = mono_method_signature (cfg->method);
1575 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1576 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1580 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1581 * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
1585 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1587 int sig_reg = mono_alloc_ireg (cfg);
1589 /* FIXME: Add support for signature tokens to AOT */
1590 cfg->disable_aot = TRUE;
1592 MONO_EMIT_NEW_ICONST (cfg, sig_reg, (gulong)call->signature);
1593 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG,
1594 ppc_r1, cinfo->sig_cookie.offset, sig_reg);
1598 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1601 MonoMethodSignature *sig;
1605 sig = call->signature;
1606 n = sig->param_count + sig->hasthis;
1608 cinfo = get_call_info (sig);
1610 for (i = 0; i < n; ++i) {
1611 ArgInfo *ainfo = cinfo->args + i;
1614 if (i >= sig->hasthis)
1615 t = sig->params [i - sig->hasthis];
1617 t = &mono_defaults.int_class->byval_arg;
1618 t = mini_get_underlying_type (t);
1620 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos))
1621 emit_sig_cookie (cfg, call, cinfo);
1623 in = call->args [i];
1625 if (ainfo->regtype == RegTypeGeneral) {
1626 #ifndef __mono_ppc64__
1627 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1628 MONO_INST_NEW (cfg, ins, OP_MOVE);
1629 ins->dreg = mono_alloc_ireg (cfg);
1630 ins->sreg1 = MONO_LVREG_LS (in->dreg);
1631 MONO_ADD_INS (cfg->cbb, ins);
1632 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1634 MONO_INST_NEW (cfg, ins, OP_MOVE);
1635 ins->dreg = mono_alloc_ireg (cfg);
1636 ins->sreg1 = MONO_LVREG_MS (in->dreg);
1637 MONO_ADD_INS (cfg->cbb, ins);
1638 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1642 MONO_INST_NEW (cfg, ins, OP_MOVE);
1643 ins->dreg = mono_alloc_ireg (cfg);
1644 ins->sreg1 = in->dreg;
1645 MONO_ADD_INS (cfg->cbb, ins);
1647 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1649 } else if (ainfo->regtype == RegTypeStructByAddr) {
1650 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1651 ins->opcode = OP_OUTARG_VT;
1652 ins->sreg1 = in->dreg;
1653 ins->klass = in->klass;
1654 ins->inst_p0 = call;
1655 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1656 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1657 MONO_ADD_INS (cfg->cbb, ins);
1658 } else if (ainfo->regtype == RegTypeStructByVal) {
1659 /* this is further handled in mono_arch_emit_outarg_vt () */
1660 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1661 ins->opcode = OP_OUTARG_VT;
1662 ins->sreg1 = in->dreg;
1663 ins->klass = in->klass;
1664 ins->inst_p0 = call;
1665 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1666 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1667 MONO_ADD_INS (cfg->cbb, ins);
1668 } else if (ainfo->regtype == RegTypeFPStructByVal) {
1669 /* this is further handled in mono_arch_emit_outarg_vt () */
1670 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1671 ins->opcode = OP_OUTARG_VT;
1672 ins->sreg1 = in->dreg;
1673 ins->klass = in->klass;
1674 ins->inst_p0 = call;
1675 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1676 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1677 MONO_ADD_INS (cfg->cbb, ins);
1678 cfg->flags |= MONO_CFG_HAS_FPOUT;
1679 } else if (ainfo->regtype == RegTypeBase) {
1680 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1682 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1683 if (t->type == MONO_TYPE_R8)
1684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
1690 } else if (ainfo->regtype == RegTypeFP) {
1691 if (t->type == MONO_TYPE_VALUETYPE) {
1692 /* this is further handled in mono_arch_emit_outarg_vt () */
1693 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1694 ins->opcode = OP_OUTARG_VT;
1695 ins->sreg1 = in->dreg;
1696 ins->klass = in->klass;
1697 ins->inst_p0 = call;
1698 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1699 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1700 MONO_ADD_INS (cfg->cbb, ins);
1702 cfg->flags |= MONO_CFG_HAS_FPOUT;
1704 int dreg = mono_alloc_freg (cfg);
1706 if (ainfo->size == 4) {
1707 MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, dreg, in->dreg);
1709 MONO_INST_NEW (cfg, ins, OP_FMOVE);
1711 ins->sreg1 = in->dreg;
1712 MONO_ADD_INS (cfg->cbb, ins);
1715 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
1716 cfg->flags |= MONO_CFG_HAS_FPOUT;
1719 g_assert_not_reached ();
1723 /* Emit the signature cookie in the case that there is no
1724 additional argument */
1725 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1726 emit_sig_cookie (cfg, call, cinfo);
1728 if (cinfo->struct_ret) {
1731 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1732 vtarg->sreg1 = call->vret_var->dreg;
1733 vtarg->dreg = mono_alloc_preg (cfg);
1734 MONO_ADD_INS (cfg->cbb, vtarg);
1736 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->struct_ret, FALSE);
1739 call->stack_usage = cinfo->stack_usage;
1740 cfg->param_area = MAX (PPC_MINIMAL_PARAM_AREA_SIZE, MAX (cfg->param_area, cinfo->stack_usage));
1741 cfg->flags |= MONO_CFG_HAS_CALLS;
1749 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1751 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1752 ArgInfo *ainfo = ins->inst_p1;
1753 int ovf_size = ainfo->vtsize;
1754 int doffset = ainfo->offset;
1755 int i, soffset, dreg;
1757 if (ainfo->regtype == RegTypeStructByVal) {
1764 * Darwin pinvokes needs some special handling for 1
1765 * and 2 byte arguments
1767 g_assert (ins->klass);
1768 if (call->signature->pinvoke)
1769 size = mono_class_native_size (ins->klass, NULL);
1770 if (size == 2 || size == 1) {
1771 int tmpr = mono_alloc_ireg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmpr, src->dreg, soffset);
1775 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmpr, src->dreg, soffset);
1776 dreg = mono_alloc_ireg (cfg);
1777 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, dreg, tmpr);
1778 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, FALSE);
1781 for (i = 0; i < ainfo->vtregs; ++i) {
1782 dreg = mono_alloc_ireg (cfg);
1783 #if G_BYTE_ORDER == G_BIG_ENDIAN
1784 int antipadding = 0;
1787 antipadding = sizeof (gpointer) - ainfo->bytes;
1789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1791 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, dreg, dreg, antipadding * 8);
1793 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1795 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1796 soffset += sizeof (gpointer);
1799 mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (gpointer), SIZEOF_VOID_P);
1800 } else if (ainfo->regtype == RegTypeFPStructByVal) {
1802 for (i = 0; i < ainfo->vtregs; ++i) {
1803 int tmpr = mono_alloc_freg (cfg);
1804 if (ainfo->size == 4)
1805 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, soffset);
1807 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, soffset);
1808 dreg = mono_alloc_freg (cfg);
1809 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr);
1810 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg+i, TRUE);
1811 soffset += ainfo->size;
1814 mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (gpointer), SIZEOF_VOID_P);
1815 } else if (ainfo->regtype == RegTypeFP) {
1816 int tmpr = mono_alloc_freg (cfg);
1817 if (ainfo->size == 4)
1818 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, 0);
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, 0);
1821 dreg = mono_alloc_freg (cfg);
1822 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr);
1823 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
1825 MonoInst *vtcopy = mono_compile_create_var (cfg, &src->klass->byval_arg, OP_LOCAL);
1829 /* FIXME: alignment? */
1830 if (call->signature->pinvoke) {
1831 size = mono_type_native_stack_size (&src->klass->byval_arg, NULL);
1832 vtcopy->backend.is_pinvoke = 1;
1834 size = mini_type_stack_size (&src->klass->byval_arg, NULL);
1837 g_assert (ovf_size > 0);
1839 EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype);
1840 mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, SIZEOF_VOID_P);
1843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, load->dreg);
1845 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE);
1850 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1852 MonoType *ret = mini_get_underlying_type (mono_method_signature (method)->ret);
1854 #ifndef __mono_ppc64__
1855 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1858 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1859 ins->sreg1 = MONO_LVREG_LS (val->dreg);
1860 ins->sreg2 = MONO_LVREG_MS (val->dreg);
1861 MONO_ADD_INS (cfg->cbb, ins);
1865 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1866 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1870 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1873 /* FIXME: this is just a useless hint: fix the interface to include the opcode */
1875 mono_arch_is_inst_imm (gint64 imm)
1880 #endif /* DISABLE_JIT */
1883 * Allow tracing to work with this interface (with an optional argument)
1887 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1891 ppc_load_ptr (code, ppc_r3, cfg->method);
1892 ppc_li (code, ppc_r4, 0); /* NULL ebp for now */
1893 ppc_load_func (code, PPC_CALL_REG, func);
1894 ppc_mtlr (code, PPC_CALL_REG);
1908 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1911 int save_mode = SAVE_NONE;
1913 MonoMethod *method = cfg->method;
1914 int rtype = mini_get_underlying_type (mono_method_signature (method)->ret)->type;
1915 int save_offset = PPC_STACK_PARAM_OFFSET + cfg->param_area;
1919 offset = code - cfg->native_code;
1920 /* we need about 16 instructions */
1921 if (offset > (cfg->code_size - 16 * 4)) {
1922 cfg->code_size *= 2;
1923 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1924 code = cfg->native_code + offset;
1928 case MONO_TYPE_VOID:
1929 /* special case string .ctor icall */
1930 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1931 save_mode = SAVE_ONE;
1933 save_mode = SAVE_NONE;
1935 #ifndef __mono_ppc64__
1938 save_mode = SAVE_TWO;
1943 save_mode = SAVE_FP;
1945 case MONO_TYPE_VALUETYPE:
1946 save_mode = SAVE_STRUCT;
1949 save_mode = SAVE_ONE;
1953 switch (save_mode) {
1955 ppc_stw (code, ppc_r3, save_offset, cfg->frame_reg);
1956 ppc_stw (code, ppc_r4, save_offset + 4, cfg->frame_reg);
1957 if (enable_arguments) {
1958 ppc_mr (code, ppc_r5, ppc_r4);
1959 ppc_mr (code, ppc_r4, ppc_r3);
1963 ppc_stptr (code, ppc_r3, save_offset, cfg->frame_reg);
1964 if (enable_arguments) {
1965 ppc_mr (code, ppc_r4, ppc_r3);
1969 ppc_stfd (code, ppc_f1, save_offset, cfg->frame_reg);
1970 if (enable_arguments) {
1971 /* FIXME: what reg? */
1972 ppc_fmr (code, ppc_f3, ppc_f1);
1973 /* FIXME: use 8 byte load on PPC64 */
1974 ppc_lwz (code, ppc_r4, save_offset, cfg->frame_reg);
1975 ppc_lwz (code, ppc_r5, save_offset + 4, cfg->frame_reg);
1979 if (enable_arguments) {
1980 /* FIXME: get the actual address */
1981 ppc_mr (code, ppc_r4, ppc_r3);
1982 // FIXME: Support the new v2 ABI!
1990 ppc_load_ptr (code, ppc_r3, cfg->method);
1991 ppc_load_func (code, PPC_CALL_REG, func);
1992 ppc_mtlr (code, PPC_CALL_REG);
1995 switch (save_mode) {
1997 ppc_lwz (code, ppc_r3, save_offset, cfg->frame_reg);
1998 ppc_lwz (code, ppc_r4, save_offset + 4, cfg->frame_reg);
2001 ppc_ldptr (code, ppc_r3, save_offset, cfg->frame_reg);
2004 ppc_lfd (code, ppc_f1, save_offset, cfg->frame_reg);
2014 * Conditional branches have a small offset, so if it is likely overflowed,
2015 * we do a branch to the end of the method (uncond branches have much larger
2016 * offsets) where we perform the conditional and jump back unconditionally.
2017 * It's slightly slower, since we add two uncond branches, but it's very simple
2018 * with the current patch implementation and such large methods are likely not
2019 * going to be perf critical anyway.
2024 const char *exception;
2031 #define EMIT_COND_BRANCH_FLAGS(ins,b0,b1) \
2032 if (0 && ins->inst_true_bb->native_offset) { \
2033 ppc_bc (code, (b0), (b1), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffff); \
2035 int br_disp = ins->inst_true_bb->max_offset - offset; \
2036 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
2037 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
2038 ovfj->data.bb = ins->inst_true_bb; \
2039 ovfj->ip_offset = 0; \
2040 ovfj->b0_cond = (b0); \
2041 ovfj->b1_cond = (b1); \
2042 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB_OVF, ovfj); \
2045 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2046 ppc_bc (code, (b0), (b1), 0); \
2050 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_b0_table [(cond)], branch_b1_table [(cond)])
2052 /* emit an exception if condition is fail
2054 * We assign the extra code used to throw the implicit exceptions
2055 * to cfg->bb_exit as far as the big branch handling is concerned
2057 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(b0,b1,exc_name) \
2059 int br_disp = cfg->bb_exit->max_offset - offset; \
2060 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
2061 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
2062 ovfj->data.exception = (exc_name); \
2063 ovfj->ip_offset = code - cfg->native_code; \
2064 ovfj->b0_cond = (b0); \
2065 ovfj->b1_cond = (b1); \
2066 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
2068 cfg->bb_exit->max_offset += 24; \
2070 mono_add_patch_info (cfg, code - cfg->native_code, \
2071 MONO_PATCH_INFO_EXC, exc_name); \
2072 ppc_bcl (code, (b0), (b1), 0); \
2076 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
2079 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2084 normalize_opcode (int opcode)
2087 #ifndef __mono_ilp32__
2088 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMBASE, OP_LOADI8_MEMBASE):
2089 return OP_LOAD_MEMBASE;
2090 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMINDEX, OP_LOADI8_MEMINDEX):
2091 return OP_LOAD_MEMINDEX;
2092 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_REG, OP_STOREI8_MEMBASE_REG):
2093 return OP_STORE_MEMBASE_REG;
2094 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_IMM, OP_STOREI8_MEMBASE_IMM):
2095 return OP_STORE_MEMBASE_IMM;
2096 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMINDEX, OP_STOREI8_MEMINDEX):
2097 return OP_STORE_MEMINDEX;
2099 case MONO_PPC_32_64_CASE (OP_ISHR_IMM, OP_LSHR_IMM):
2101 case MONO_PPC_32_64_CASE (OP_ISHR_UN_IMM, OP_LSHR_UN_IMM):
2102 return OP_SHR_UN_IMM;
2109 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2111 MonoInst *ins, *n, *last_ins = NULL;
2113 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2114 switch (normalize_opcode (ins->opcode)) {
2116 /* remove unnecessary multiplication with 1 */
2117 if (ins->inst_imm == 1) {
2118 if (ins->dreg != ins->sreg1) {
2119 ins->opcode = OP_MOVE;
2121 MONO_DELETE_INS (bb, ins);
2125 int power2 = mono_is_power_of_two (ins->inst_imm);
2127 ins->opcode = OP_SHL_IMM;
2128 ins->inst_imm = power2;
2132 case OP_LOAD_MEMBASE:
2134 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2135 * OP_LOAD_MEMBASE offset(basereg), reg
2137 if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_REG &&
2138 ins->inst_basereg == last_ins->inst_destbasereg &&
2139 ins->inst_offset == last_ins->inst_offset) {
2140 if (ins->dreg == last_ins->sreg1) {
2141 MONO_DELETE_INS (bb, ins);
2144 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2145 ins->opcode = OP_MOVE;
2146 ins->sreg1 = last_ins->sreg1;
2150 * Note: reg1 must be different from the basereg in the second load
2151 * OP_LOAD_MEMBASE offset(basereg), reg1
2152 * OP_LOAD_MEMBASE offset(basereg), reg2
2154 * OP_LOAD_MEMBASE offset(basereg), reg1
2155 * OP_MOVE reg1, reg2
2157 } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_LOAD_MEMBASE &&
2158 ins->inst_basereg != last_ins->dreg &&
2159 ins->inst_basereg == last_ins->inst_basereg &&
2160 ins->inst_offset == last_ins->inst_offset) {
2162 if (ins->dreg == last_ins->dreg) {
2163 MONO_DELETE_INS (bb, ins);
2166 ins->opcode = OP_MOVE;
2167 ins->sreg1 = last_ins->dreg;
2170 //g_assert_not_reached ();
2174 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2175 * OP_LOAD_MEMBASE offset(basereg), reg
2177 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2178 * OP_ICONST reg, imm
2180 } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_IMM &&
2181 ins->inst_basereg == last_ins->inst_destbasereg &&
2182 ins->inst_offset == last_ins->inst_offset) {
2183 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2184 ins->opcode = OP_ICONST;
2185 ins->inst_c0 = last_ins->inst_imm;
2186 g_assert_not_reached (); // check this rule
2190 case OP_LOADU1_MEMBASE:
2191 case OP_LOADI1_MEMBASE:
2192 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2193 ins->inst_basereg == last_ins->inst_destbasereg &&
2194 ins->inst_offset == last_ins->inst_offset) {
2195 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2196 ins->sreg1 = last_ins->sreg1;
2199 case OP_LOADU2_MEMBASE:
2200 case OP_LOADI2_MEMBASE:
2201 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2202 ins->inst_basereg == last_ins->inst_destbasereg &&
2203 ins->inst_offset == last_ins->inst_offset) {
2204 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2205 ins->sreg1 = last_ins->sreg1;
2208 #ifdef __mono_ppc64__
2209 case OP_LOADU4_MEMBASE:
2210 case OP_LOADI4_MEMBASE:
2211 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
2212 ins->inst_basereg == last_ins->inst_destbasereg &&
2213 ins->inst_offset == last_ins->inst_offset) {
2214 ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4;
2215 ins->sreg1 = last_ins->sreg1;
2220 ins->opcode = OP_MOVE;
2224 if (ins->dreg == ins->sreg1) {
2225 MONO_DELETE_INS (bb, ins);
2229 * OP_MOVE sreg, dreg
2230 * OP_MOVE dreg, sreg
2232 if (last_ins && last_ins->opcode == OP_MOVE &&
2233 ins->sreg1 == last_ins->dreg &&
2234 ins->dreg == last_ins->sreg1) {
2235 MONO_DELETE_INS (bb, ins);
2243 bb->last_ins = last_ins;
2247 mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
2249 switch (ins->opcode) {
2250 case OP_ICONV_TO_R_UN: {
2251 // This value is OK as-is for both big and little endian because of how it is stored
2252 static const guint64 adjust_val = 0x4330000000000000ULL;
2253 int msw_reg = mono_alloc_ireg (cfg);
2254 int adj_reg = mono_alloc_freg (cfg);
2255 int tmp_reg = mono_alloc_freg (cfg);
2256 int basereg = ppc_sp;
2258 MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000);
2259 if (!ppc_is_imm16 (offset + 4)) {
2260 basereg = mono_alloc_ireg (cfg);
2261 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2263 #if G_BYTE_ORDER == G_BIG_ENDIAN
2264 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg);
2265 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, ins->sreg1);
2267 // For little endian the words are reversed
2268 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, msw_reg);
2269 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, ins->sreg1);
2271 MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, &adjust_val);
2272 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset);
2273 MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg);
2274 ins->opcode = OP_NOP;
2277 #ifndef __mono_ppc64__
2278 case OP_ICONV_TO_R4:
2279 case OP_ICONV_TO_R8: {
2280 /* If we have a PPC_FEATURE_64 machine we can avoid
2281 this and use the fcfid instruction. Otherwise
2282 on an old 32-bit chip and we have to do this the
2284 if (!(cpu_hw_caps & PPC_ISA_64)) {
2285 /* FIXME: change precision for CEE_CONV_R4 */
2286 static const guint64 adjust_val = 0x4330000080000000ULL;
2287 int msw_reg = mono_alloc_ireg (cfg);
2288 int xored = mono_alloc_ireg (cfg);
2289 int adj_reg = mono_alloc_freg (cfg);
2290 int tmp_reg = mono_alloc_freg (cfg);
2291 int basereg = ppc_sp;
2293 if (!ppc_is_imm16 (offset + 4)) {
2294 basereg = mono_alloc_ireg (cfg);
2295 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2297 MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000);
2298 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg);
2299 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_XOR_IMM, xored, ins->sreg1, 0x80000000);
2300 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, xored);
2301 MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, (gpointer)&adjust_val);
2302 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset);
2303 MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg);
2304 if (ins->opcode == OP_ICONV_TO_R4)
2305 MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, ins->dreg, ins->dreg);
2306 ins->opcode = OP_NOP;
2312 int msw_reg = mono_alloc_ireg (cfg);
2313 int basereg = ppc_sp;
2315 if (!ppc_is_imm16 (offset + 4)) {
2316 basereg = mono_alloc_ireg (cfg);
2317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset);
2319 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, basereg, offset, ins->sreg1);
2320 #if G_BYTE_ORDER == G_BIG_ENDIAN
2321 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset);
2323 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset+4);
2325 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_FINITE, -1, msw_reg);
2326 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
2327 ins->opcode = OP_NOP;
2330 #ifdef __mono_ppc64__
2332 case OP_IADD_OVF_UN:
2334 int shifted1_reg = mono_alloc_ireg (cfg);
2335 int shifted2_reg = mono_alloc_ireg (cfg);
2336 int result_shifted_reg = mono_alloc_ireg (cfg);
2338 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted1_reg, ins->sreg1, 32);
2339 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted2_reg, ins->sreg2, 32);
2340 MONO_EMIT_NEW_BIALU (cfg, ins->opcode, result_shifted_reg, shifted1_reg, shifted2_reg);
2341 if (ins->opcode == OP_IADD_OVF_UN)
2342 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, ins->dreg, result_shifted_reg, 32);
2344 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, ins->dreg, result_shifted_reg, 32);
2345 ins->opcode = OP_NOP;
2355 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
2357 switch (ins->opcode) {
2359 /* ADC sets the condition code */
2360 MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2));
2361 MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2));
2364 case OP_LADD_OVF_UN:
2365 /* ADC sets the condition code */
2366 MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2));
2367 MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_UN_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2));
2371 /* SBB sets the condition code */
2372 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2));
2373 MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2));
2376 case OP_LSUB_OVF_UN:
2377 /* SBB sets the condition code */
2378 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2));
2379 MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_UN_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2));
2383 /* From gcc generated code */
2384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PPC_SUBFIC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0);
2385 MONO_EMIT_NEW_UNALU (cfg, OP_PPC_SUBFZE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1));
2394 * the branch_b0_table should maintain the order of these
2408 branch_b0_table [] = {
2423 branch_b1_table [] = {
2437 #define NEW_INS(cfg,dest,op) do { \
2438 MONO_INST_NEW((cfg), (dest), (op)); \
2439 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
2443 map_to_reg_reg_op (int op)
2452 case OP_COMPARE_IMM:
2454 case OP_ICOMPARE_IMM:
2456 case OP_LCOMPARE_IMM:
2472 case OP_LOAD_MEMBASE:
2473 return OP_LOAD_MEMINDEX;
2474 case OP_LOADI4_MEMBASE:
2475 return OP_LOADI4_MEMINDEX;
2476 case OP_LOADU4_MEMBASE:
2477 return OP_LOADU4_MEMINDEX;
2478 case OP_LOADI8_MEMBASE:
2479 return OP_LOADI8_MEMINDEX;
2480 case OP_LOADU1_MEMBASE:
2481 return OP_LOADU1_MEMINDEX;
2482 case OP_LOADI2_MEMBASE:
2483 return OP_LOADI2_MEMINDEX;
2484 case OP_LOADU2_MEMBASE:
2485 return OP_LOADU2_MEMINDEX;
2486 case OP_LOADI1_MEMBASE:
2487 return OP_LOADI1_MEMINDEX;
2488 case OP_LOADR4_MEMBASE:
2489 return OP_LOADR4_MEMINDEX;
2490 case OP_LOADR8_MEMBASE:
2491 return OP_LOADR8_MEMINDEX;
2492 case OP_STOREI1_MEMBASE_REG:
2493 return OP_STOREI1_MEMINDEX;
2494 case OP_STOREI2_MEMBASE_REG:
2495 return OP_STOREI2_MEMINDEX;
2496 case OP_STOREI4_MEMBASE_REG:
2497 return OP_STOREI4_MEMINDEX;
2498 case OP_STOREI8_MEMBASE_REG:
2499 return OP_STOREI8_MEMINDEX;
2500 case OP_STORE_MEMBASE_REG:
2501 return OP_STORE_MEMINDEX;
2502 case OP_STORER4_MEMBASE_REG:
2503 return OP_STORER4_MEMINDEX;
2504 case OP_STORER8_MEMBASE_REG:
2505 return OP_STORER8_MEMINDEX;
2506 case OP_STORE_MEMBASE_IMM:
2507 return OP_STORE_MEMBASE_REG;
2508 case OP_STOREI1_MEMBASE_IMM:
2509 return OP_STOREI1_MEMBASE_REG;
2510 case OP_STOREI2_MEMBASE_IMM:
2511 return OP_STOREI2_MEMBASE_REG;
2512 case OP_STOREI4_MEMBASE_IMM:
2513 return OP_STOREI4_MEMBASE_REG;
2514 case OP_STOREI8_MEMBASE_IMM:
2515 return OP_STOREI8_MEMBASE_REG;
2517 if (mono_op_imm_to_op (op) == -1)
2518 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (op));
2519 return mono_op_imm_to_op (op);
2522 //#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op))
2524 #define compare_opcode_is_unsigned(opcode) \
2525 (((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \
2526 ((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \
2527 ((opcode) >= OP_LBNE_UN && (opcode) <= OP_LBLT_UN) || \
2528 ((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \
2529 ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \
2530 ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || \
2531 (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN || \
2532 (opcode) == OP_LCLT_UN || (opcode) == OP_LCGT_UN))
2535 * Remove from the instruction list the instructions that can't be
2536 * represented with very simple instructions with no register
2540 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2542 MonoInst *ins, *next, *temp, *last_ins = NULL;
2545 MONO_BB_FOR_EACH_INS (bb, ins) {
2547 switch (ins->opcode) {
2548 case OP_IDIV_UN_IMM:
2551 case OP_IREM_UN_IMM:
2552 CASE_PPC64 (OP_LREM_IMM) {
2553 NEW_INS (cfg, temp, OP_ICONST);
2554 temp->inst_c0 = ins->inst_imm;
2555 temp->dreg = mono_alloc_ireg (cfg);
2556 ins->sreg2 = temp->dreg;
2557 if (ins->opcode == OP_IDIV_IMM)
2558 ins->opcode = OP_IDIV;
2559 else if (ins->opcode == OP_IREM_IMM)
2560 ins->opcode = OP_IREM;
2561 else if (ins->opcode == OP_IDIV_UN_IMM)
2562 ins->opcode = OP_IDIV_UN;
2563 else if (ins->opcode == OP_IREM_UN_IMM)
2564 ins->opcode = OP_IREM_UN;
2565 else if (ins->opcode == OP_LREM_IMM)
2566 ins->opcode = OP_LREM;
2568 /* handle rem separately */
2573 CASE_PPC64 (OP_LREM)
2574 CASE_PPC64 (OP_LREM_UN) {
2576 /* we change a rem dest, src1, src2 to
2577 * div temp1, src1, src2
2578 * mul temp2, temp1, src2
2579 * sub dest, src1, temp2
2581 if (ins->opcode == OP_IREM || ins->opcode == OP_IREM_UN) {
2582 NEW_INS (cfg, mul, OP_IMUL);
2583 NEW_INS (cfg, temp, ins->opcode == OP_IREM? OP_IDIV: OP_IDIV_UN);
2584 ins->opcode = OP_ISUB;
2586 NEW_INS (cfg, mul, OP_LMUL);
2587 NEW_INS (cfg, temp, ins->opcode == OP_LREM? OP_LDIV: OP_LDIV_UN);
2588 ins->opcode = OP_LSUB;
2590 temp->sreg1 = ins->sreg1;
2591 temp->sreg2 = ins->sreg2;
2592 temp->dreg = mono_alloc_ireg (cfg);
2593 mul->sreg1 = temp->dreg;
2594 mul->sreg2 = ins->sreg2;
2595 mul->dreg = mono_alloc_ireg (cfg);
2596 ins->sreg2 = mul->dreg;
2600 CASE_PPC64 (OP_LADD_IMM)
2603 if (!ppc_is_imm16 (ins->inst_imm)) {
2604 NEW_INS (cfg, temp, OP_ICONST);
2605 temp->inst_c0 = ins->inst_imm;
2606 temp->dreg = mono_alloc_ireg (cfg);
2607 ins->sreg2 = temp->dreg;
2608 ins->opcode = map_to_reg_reg_op (ins->opcode);
2612 CASE_PPC64 (OP_LSUB_IMM)
2614 if (!ppc_is_imm16 (-ins->inst_imm)) {
2615 NEW_INS (cfg, temp, OP_ICONST);
2616 temp->inst_c0 = ins->inst_imm;
2617 temp->dreg = mono_alloc_ireg (cfg);
2618 ins->sreg2 = temp->dreg;
2619 ins->opcode = map_to_reg_reg_op (ins->opcode);
2631 gboolean is_imm = ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff));
2632 #ifdef __mono_ppc64__
2633 if (ins->inst_imm & 0xffffffff00000000ULL)
2637 NEW_INS (cfg, temp, OP_ICONST);
2638 temp->inst_c0 = ins->inst_imm;
2639 temp->dreg = mono_alloc_ireg (cfg);
2640 ins->sreg2 = temp->dreg;
2641 ins->opcode = map_to_reg_reg_op (ins->opcode);
2650 NEW_INS (cfg, temp, OP_ICONST);
2651 temp->inst_c0 = ins->inst_imm;
2652 temp->dreg = mono_alloc_ireg (cfg);
2653 ins->sreg2 = temp->dreg;
2654 ins->opcode = map_to_reg_reg_op (ins->opcode);
2656 case OP_COMPARE_IMM:
2657 case OP_ICOMPARE_IMM:
2658 CASE_PPC64 (OP_LCOMPARE_IMM)
2660 /* Branch opts can eliminate the branch */
2661 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
2662 ins->opcode = OP_NOP;
2666 if (compare_opcode_is_unsigned (next->opcode)) {
2667 if (!ppc_is_uimm16 (ins->inst_imm)) {
2668 NEW_INS (cfg, temp, OP_ICONST);
2669 temp->inst_c0 = ins->inst_imm;
2670 temp->dreg = mono_alloc_ireg (cfg);
2671 ins->sreg2 = temp->dreg;
2672 ins->opcode = map_to_reg_reg_op (ins->opcode);
2675 if (!ppc_is_imm16 (ins->inst_imm)) {
2676 NEW_INS (cfg, temp, OP_ICONST);
2677 temp->inst_c0 = ins->inst_imm;
2678 temp->dreg = mono_alloc_ireg (cfg);
2679 ins->sreg2 = temp->dreg;
2680 ins->opcode = map_to_reg_reg_op (ins->opcode);
2686 if (ins->inst_imm == 1) {
2687 ins->opcode = OP_MOVE;
2690 if (ins->inst_imm == 0) {
2691 ins->opcode = OP_ICONST;
2695 imm = mono_is_power_of_two (ins->inst_imm);
2697 ins->opcode = OP_SHL_IMM;
2698 ins->inst_imm = imm;
2701 if (!ppc_is_imm16 (ins->inst_imm)) {
2702 NEW_INS (cfg, temp, OP_ICONST);
2703 temp->inst_c0 = ins->inst_imm;
2704 temp->dreg = mono_alloc_ireg (cfg);
2705 ins->sreg2 = temp->dreg;
2706 ins->opcode = map_to_reg_reg_op (ins->opcode);
2709 case OP_LOCALLOC_IMM:
2710 NEW_INS (cfg, temp, OP_ICONST);
2711 temp->inst_c0 = ins->inst_imm;
2712 temp->dreg = mono_alloc_ireg (cfg);
2713 ins->sreg1 = temp->dreg;
2714 ins->opcode = OP_LOCALLOC;
2716 case OP_LOAD_MEMBASE:
2717 case OP_LOADI4_MEMBASE:
2718 CASE_PPC64 (OP_LOADI8_MEMBASE)
2719 case OP_LOADU4_MEMBASE:
2720 case OP_LOADI2_MEMBASE:
2721 case OP_LOADU2_MEMBASE:
2722 case OP_LOADI1_MEMBASE:
2723 case OP_LOADU1_MEMBASE:
2724 case OP_LOADR4_MEMBASE:
2725 case OP_LOADR8_MEMBASE:
2726 case OP_STORE_MEMBASE_REG:
2727 CASE_PPC64 (OP_STOREI8_MEMBASE_REG)
2728 case OP_STOREI4_MEMBASE_REG:
2729 case OP_STOREI2_MEMBASE_REG:
2730 case OP_STOREI1_MEMBASE_REG:
2731 case OP_STORER4_MEMBASE_REG:
2732 case OP_STORER8_MEMBASE_REG:
2733 /* we can do two things: load the immed in a register
2734 * and use an indexed load, or see if the immed can be
2735 * represented as an ad_imm + a load with a smaller offset
2736 * that fits. We just do the first for now, optimize later.
2738 if (ppc_is_imm16 (ins->inst_offset))
2740 NEW_INS (cfg, temp, OP_ICONST);
2741 temp->inst_c0 = ins->inst_offset;
2742 temp->dreg = mono_alloc_ireg (cfg);
2743 ins->sreg2 = temp->dreg;
2744 ins->opcode = map_to_reg_reg_op (ins->opcode);
2746 case OP_STORE_MEMBASE_IMM:
2747 case OP_STOREI1_MEMBASE_IMM:
2748 case OP_STOREI2_MEMBASE_IMM:
2749 case OP_STOREI4_MEMBASE_IMM:
2750 CASE_PPC64 (OP_STOREI8_MEMBASE_IMM)
2751 NEW_INS (cfg, temp, OP_ICONST);
2752 temp->inst_c0 = ins->inst_imm;
2753 temp->dreg = mono_alloc_ireg (cfg);
2754 ins->sreg1 = temp->dreg;
2755 ins->opcode = map_to_reg_reg_op (ins->opcode);
2757 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2760 if (cfg->compile_aot) {
2761 /* Keep these in the aot case */
2764 NEW_INS (cfg, temp, OP_ICONST);
2765 temp->inst_c0 = (gulong)ins->inst_p0;
2766 temp->dreg = mono_alloc_ireg (cfg);
2767 ins->inst_basereg = temp->dreg;
2768 ins->inst_offset = 0;
2769 ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE;
2771 /* make it handle the possibly big ins->inst_offset
2772 * later optimize to use lis + load_membase
2778 bb->last_ins = last_ins;
2779 bb->max_vreg = cfg->next_vreg;
2783 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2785 long offset = cfg->arch.fp_conv_var_offset;
2787 /* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */
2788 #ifdef __mono_ppc64__
2790 ppc_fctidz (code, ppc_f0, sreg);
2795 ppc_fctiwz (code, ppc_f0, sreg);
2798 if (ppc_is_imm16 (offset + sub_offset)) {
2799 ppc_stfd (code, ppc_f0, offset, cfg->frame_reg);
2801 ppc_ldr (code, dreg, offset + sub_offset, cfg->frame_reg);
2803 ppc_lwz (code, dreg, offset + sub_offset, cfg->frame_reg);
2805 ppc_load (code, dreg, offset);
2806 ppc_add (code, dreg, dreg, cfg->frame_reg);
2807 ppc_stfd (code, ppc_f0, 0, dreg);
2809 ppc_ldr (code, dreg, sub_offset, dreg);
2811 ppc_lwz (code, dreg, sub_offset, dreg);
2815 ppc_andid (code, dreg, dreg, 0xff);
2817 ppc_andid (code, dreg, dreg, 0xffff);
2818 #ifdef __mono_ppc64__
2820 ppc_clrldi (code, dreg, dreg, 32);
2824 ppc_extsb (code, dreg, dreg);
2826 ppc_extsh (code, dreg, dreg);
2827 #ifdef __mono_ppc64__
2829 ppc_extsw (code, dreg, dreg);
2837 const guchar *target;
2842 #define is_call_imm(diff) ((glong)(diff) >= -33554432 && (glong)(diff) <= 33554431)
2845 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2846 #ifdef __mono_ppc64__
2847 g_assert_not_reached ();
2849 PatchData *pdata = (PatchData*)user_data;
2850 guchar *code = data;
2851 guint32 *thunks = data;
2852 guint32 *endthunks = (guint32*)(code + bsize);
2856 int difflow, diffhigh;
2858 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2859 difflow = (char*)pdata->code - (char*)thunks;
2860 diffhigh = (char*)pdata->code - (char*)endthunks;
2861 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2864 templ = (guchar*)load;
2865 ppc_load_sequence (templ, ppc_r0, pdata->target);
2867 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2868 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2869 while (thunks < endthunks) {
2870 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2871 if ((thunks [0] == load [0]) && (thunks [1] == load [1])) {
2872 ppc_patch (pdata->code, (guchar*)thunks);
2875 static int num_thunks = 0;
2877 if ((num_thunks % 20) == 0)
2878 g_print ("num_thunks lookup: %d\n", num_thunks);
2881 } else if ((thunks [0] == 0) && (thunks [1] == 0)) {
2882 /* found a free slot instead: emit thunk */
2883 code = (guchar*)thunks;
2884 ppc_lis (code, ppc_r0, (gulong)(pdata->target) >> 16);
2885 ppc_ori (code, ppc_r0, ppc_r0, (gulong)(pdata->target) & 0xffff);
2886 ppc_mtctr (code, ppc_r0);
2887 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
2888 mono_arch_flush_icache ((guchar*)thunks, 16);
2890 ppc_patch (pdata->code, (guchar*)thunks);
2893 static int num_thunks = 0;
2895 if ((num_thunks % 20) == 0)
2896 g_print ("num_thunks: %d\n", num_thunks);
2900 /* skip 16 bytes, the size of the thunk */
2904 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2911 handle_thunk (int absolute, guchar *code, const guchar *target) {
2912 MonoDomain *domain = mono_domain_get ();
2916 pdata.target = target;
2917 pdata.absolute = absolute;
2920 mono_domain_lock (domain);
2921 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2924 /* this uses the first available slot */
2926 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2928 mono_domain_unlock (domain);
2930 if (pdata.found != 1)
2931 g_print ("thunk failed for %p from %p\n", target, code);
2932 g_assert (pdata.found == 1);
2936 patch_ins (guint8 *code, guint32 ins)
2938 *(guint32*)code = ins;
2939 mono_arch_flush_icache (code, 4);
2943 ppc_patch_full (guchar *code, const guchar *target, gboolean is_fd)
2945 guint32 ins = *(guint32*)code;
2946 guint32 prim = ins >> 26;
2949 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2951 // prefer relative branches, they are more position independent (e.g. for AOT compilation).
2952 gint diff = target - code;
2955 if (diff <= 33554431){
2956 ins = (18 << 26) | (diff) | (ins & 1);
2957 patch_ins (code, ins);
2961 /* diff between 0 and -33554432 */
2962 if (diff >= -33554432){
2963 ins = (18 << 26) | (diff & ~0xfc000000) | (ins & 1);
2964 patch_ins (code, ins);
2969 if ((glong)target >= 0){
2970 if ((glong)target <= 33554431){
2971 ins = (18 << 26) | ((gulong) target) | (ins & 1) | 2;
2972 patch_ins (code, ins);
2976 if ((glong)target >= -33554432){
2977 ins = (18 << 26) | (((gulong)target) & ~0xfc000000) | (ins & 1) | 2;
2978 patch_ins (code, ins);
2983 handle_thunk (TRUE, code, target);
2986 g_assert_not_reached ();
2994 guint32 li = (gulong)target;
2995 ins = (ins & 0xffff0000) | (ins & 3);
2996 ovf = li & 0xffff0000;
2997 if (ovf != 0 && ovf != 0xffff0000)
2998 g_assert_not_reached ();
3001 // FIXME: assert the top bits of li are 0
3003 gint diff = target - code;
3004 ins = (ins & 0xffff0000) | (ins & 3);
3005 ovf = diff & 0xffff0000;
3006 if (ovf != 0 && ovf != 0xffff0000)
3007 g_assert_not_reached ();
3011 patch_ins (code, ins);
3015 if (prim == 15 || ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
3016 #ifdef __mono_ppc64__
3017 guint32 *seq = (guint32*)code;
3018 guint32 *branch_ins;
3020 /* the trampoline code will try to patch the blrl, blr, bcctr */
3021 if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
3023 if (ppc_is_load_op (seq [-3]) || ppc_opcode (seq [-3]) == 31) /* ld || lwz || mr */
3028 if (ppc_is_load_op (seq [5])
3029 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3030 /* With function descs we need to do more careful
3032 || ppc_opcode (seq [5]) == 31 /* ld || lwz || mr */
3035 branch_ins = seq + 8;
3037 branch_ins = seq + 6;
3040 seq = (guint32*)code;
3041 /* this is the lis/ori/sldi/oris/ori/(ld/ld|mr/nop)/mtlr/blrl sequence */
3042 g_assert (mono_ppc_is_direct_call_sequence (branch_ins));
3044 if (ppc_is_load_op (seq [5])) {
3045 g_assert (ppc_is_load_op (seq [6]));
3048 guint8 *buf = (guint8*)&seq [5];
3049 ppc_mr (buf, PPC_CALL_REG, ppc_r12);
3054 target = mono_get_addr_from_ftnptr ((gpointer)target);
3057 /* FIXME: make this thread safe */
3058 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3059 /* FIXME: we're assuming we're using r12 here */
3060 ppc_load_ptr_sequence (code, ppc_r12, target);
3062 ppc_load_ptr_sequence (code, PPC_CALL_REG, target);
3064 mono_arch_flush_icache ((guint8*)seq, 28);
3067 /* the trampoline code will try to patch the blrl, blr, bcctr */
3068 if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
3071 /* this is the lis/ori/mtlr/blrl sequence */
3072 seq = (guint32*)code;
3073 g_assert ((seq [0] >> 26) == 15);
3074 g_assert ((seq [1] >> 26) == 24);
3075 g_assert ((seq [2] >> 26) == 31);
3076 g_assert (seq [3] == 0x4e800021 || seq [3] == 0x4e800020 || seq [3] == 0x4e800420);
3077 /* FIXME: make this thread safe */
3078 ppc_lis (code, PPC_CALL_REG, (guint32)(target) >> 16);
3079 ppc_ori (code, PPC_CALL_REG, PPC_CALL_REG, (guint32)(target) & 0xffff);
3080 mono_arch_flush_icache (code - 8, 8);
3083 g_assert_not_reached ();
3085 // g_print ("patched with 0x%08x\n", ins);
3089 ppc_patch (guchar *code, const guchar *target)
3091 ppc_patch_full (code, target, FALSE);
3095 mono_ppc_patch (guchar *code, const guchar *target)
3097 ppc_patch (code, target);
3101 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
3103 switch (ins->opcode) {
3106 case OP_FCALL_MEMBASE:
3107 if (ins->dreg != ppc_f1)
3108 ppc_fmr (code, ins->dreg, ppc_f1);
3116 ins_native_length (MonoCompile *cfg, MonoInst *ins)
3118 return ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3122 emit_reserve_param_area (MonoCompile *cfg, guint8 *code)
3124 long size = cfg->param_area;
3126 size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3127 size &= -MONO_ARCH_FRAME_ALIGNMENT;
3132 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
3133 if (ppc_is_imm16 (-size)) {
3134 ppc_stptr_update (code, ppc_r0, -size, ppc_sp);
3136 ppc_load (code, ppc_r12, -size);
3137 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
3144 emit_unreserve_param_area (MonoCompile *cfg, guint8 *code)
3146 long size = cfg->param_area;
3148 size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3149 size &= -MONO_ARCH_FRAME_ALIGNMENT;
3154 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
3155 if (ppc_is_imm16 (size)) {
3156 ppc_stptr_update (code, ppc_r0, size, ppc_sp);
3158 ppc_load (code, ppc_r12, size);
3159 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
3165 #define MASK_SHIFT_IMM(i) ((i) & MONO_PPC_32_64_CASE (0x1f, 0x3f))
3169 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3171 MonoInst *ins, *next;
3174 guint8 *code = cfg->native_code + cfg->code_len;
3175 MonoInst *last_ins = NULL;
3176 guint last_offset = 0;
3180 /* we don't align basic blocks of loops on ppc */
3182 if (cfg->verbose_level > 2)
3183 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3185 cpos = bb->max_offset;
3187 MONO_BB_FOR_EACH_INS (bb, ins) {
3188 offset = code - cfg->native_code;
3190 max_len = ins_native_length (cfg, ins);
3192 if (offset > (cfg->code_size - max_len - 16)) {
3193 cfg->code_size *= 2;
3194 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3195 code = cfg->native_code + offset;
3197 // if (ins->cil_code)
3198 // g_print ("cil code\n");
3199 mono_debug_record_line_number (cfg, ins, offset);
3201 switch (normalize_opcode (ins->opcode)) {
3202 case OP_RELAXED_NOP:
3205 case OP_DUMMY_STORE:
3206 case OP_NOT_REACHED:
3209 case OP_IL_SEQ_POINT:
3210 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3212 case OP_SEQ_POINT: {
3215 if (cfg->compile_aot)
3219 * Read from the single stepping trigger page. This will cause a
3220 * SIGSEGV when single stepping is enabled.
3221 * We do this _before_ the breakpoint, so single stepping after
3222 * a breakpoint is hit will step to the next IL offset.
3224 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3225 ppc_load (code, ppc_r12, (gsize)ss_trigger_page);
3226 ppc_ldptr (code, ppc_r12, 0, ppc_r12);
3229 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3232 * A placeholder for a possible breakpoint inserted by
3233 * mono_arch_set_breakpoint ().
3235 for (i = 0; i < BREAKPOINT_SIZE / 4; ++i)
3240 ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2);
3241 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3242 ppc_mr (code, ppc_r4, ppc_r0);
3245 ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2);
3246 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3247 ppc_mr (code, ppc_r4, ppc_r0);
3249 case OP_MEMORY_BARRIER:
3252 case OP_STOREI1_MEMBASE_REG:
3253 if (ppc_is_imm16 (ins->inst_offset)) {
3254 ppc_stb (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3256 if (ppc_is_imm32 (ins->inst_offset)) {
3257 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3258 ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r11);
3260 ppc_load (code, ppc_r0, ins->inst_offset);
3261 ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3265 case OP_STOREI2_MEMBASE_REG:
3266 if (ppc_is_imm16 (ins->inst_offset)) {
3267 ppc_sth (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3269 if (ppc_is_imm32 (ins->inst_offset)) {
3270 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3271 ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r11);
3273 ppc_load (code, ppc_r0, ins->inst_offset);
3274 ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3278 case OP_STORE_MEMBASE_REG:
3279 if (ppc_is_imm16 (ins->inst_offset)) {
3280 ppc_stptr (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3282 if (ppc_is_imm32 (ins->inst_offset)) {
3283 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
3284 ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r11);
3286 ppc_load (code, ppc_r0, ins->inst_offset);
3287 ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3291 #ifdef __mono_ilp32__
3292 case OP_STOREI8_MEMBASE_REG:
3293 if (ppc_is_imm16 (ins->inst_offset)) {
3294 ppc_str (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
3296 ppc_load (code, ppc_r0, ins->inst_offset);
3297 ppc_str_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
3301 case OP_STOREI1_MEMINDEX:
3302 ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3304 case OP_STOREI2_MEMINDEX:
3305 ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3307 case OP_STORE_MEMINDEX:
3308 ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3311 g_assert_not_reached ();
3313 case OP_LOAD_MEMBASE:
3314 if (ppc_is_imm16 (ins->inst_offset)) {
3315 ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3317 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3318 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3319 ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->dreg);
3321 ppc_load (code, ppc_r0, ins->inst_offset);
3322 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3326 case OP_LOADI4_MEMBASE:
3327 #ifdef __mono_ppc64__
3328 if (ppc_is_imm16 (ins->inst_offset)) {
3329 ppc_lwa (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3331 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3332 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3333 ppc_lwa (code, ins->dreg, ins->inst_offset, ins->dreg);
3335 ppc_load (code, ppc_r0, ins->inst_offset);
3336 ppc_lwax (code, ins->dreg, ins->inst_basereg, ppc_r0);
3341 case OP_LOADU4_MEMBASE:
3342 if (ppc_is_imm16 (ins->inst_offset)) {
3343 ppc_lwz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3345 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3346 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3347 ppc_lwz (code, ins->dreg, ins->inst_offset, ins->dreg);
3349 ppc_load (code, ppc_r0, ins->inst_offset);
3350 ppc_lwzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3354 case OP_LOADI1_MEMBASE:
3355 case OP_LOADU1_MEMBASE:
3356 if (ppc_is_imm16 (ins->inst_offset)) {
3357 ppc_lbz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3359 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3360 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3361 ppc_lbz (code, ins->dreg, ins->inst_offset, ins->dreg);
3363 ppc_load (code, ppc_r0, ins->inst_offset);
3364 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3367 if (ins->opcode == OP_LOADI1_MEMBASE)
3368 ppc_extsb (code, ins->dreg, ins->dreg);
3370 case OP_LOADU2_MEMBASE:
3371 if (ppc_is_imm16 (ins->inst_offset)) {
3372 ppc_lhz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3374 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3375 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3376 ppc_lhz (code, ins->dreg, ins->inst_offset, ins->dreg);
3378 ppc_load (code, ppc_r0, ins->inst_offset);
3379 ppc_lhzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
3383 case OP_LOADI2_MEMBASE:
3384 if (ppc_is_imm16 (ins->inst_offset)) {
3385 ppc_lha (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3387 if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) {
3388 ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset));
3389 ppc_lha (code, ins->dreg, ins->inst_offset, ins->dreg);
3391 ppc_load (code, ppc_r0, ins->inst_offset);
3392 ppc_lhax (code, ins->dreg, ins->inst_basereg, ppc_r0);
3396 #ifdef __mono_ilp32__
3397 case OP_LOADI8_MEMBASE:
3398 if (ppc_is_imm16 (ins->inst_offset)) {
3399 ppc_ldr (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
3401 ppc_load (code, ppc_r0, ins->inst_offset);
3402 ppc_ldr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3406 case OP_LOAD_MEMINDEX:
3407 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3409 case OP_LOADI4_MEMINDEX:
3410 #ifdef __mono_ppc64__
3411 ppc_lwax (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3414 case OP_LOADU4_MEMINDEX:
3415 ppc_lwzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3417 case OP_LOADU2_MEMINDEX:
3418 ppc_lhzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3420 case OP_LOADI2_MEMINDEX:
3421 ppc_lhax (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3423 case OP_LOADU1_MEMINDEX:
3424 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3426 case OP_LOADI1_MEMINDEX:
3427 ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3428 ppc_extsb (code, ins->dreg, ins->dreg);
3430 case OP_ICONV_TO_I1:
3431 CASE_PPC64 (OP_LCONV_TO_I1)
3432 ppc_extsb (code, ins->dreg, ins->sreg1);
3434 case OP_ICONV_TO_I2:
3435 CASE_PPC64 (OP_LCONV_TO_I2)
3436 ppc_extsh (code, ins->dreg, ins->sreg1);
3438 case OP_ICONV_TO_U1:
3439 CASE_PPC64 (OP_LCONV_TO_U1)
3440 ppc_clrlwi (code, ins->dreg, ins->sreg1, 24);
3442 case OP_ICONV_TO_U2:
3443 CASE_PPC64 (OP_LCONV_TO_U2)
3444 ppc_clrlwi (code, ins->dreg, ins->sreg1, 16);
3448 CASE_PPC64 (OP_LCOMPARE)
3449 L = (sizeof (mgreg_t) == 4 || ins->opcode == OP_ICOMPARE) ? 0 : 1;
3451 if (next && compare_opcode_is_unsigned (next->opcode))
3452 ppc_cmpl (code, 0, L, ins->sreg1, ins->sreg2);
3454 ppc_cmp (code, 0, L, ins->sreg1, ins->sreg2);
3456 case OP_COMPARE_IMM:
3457 case OP_ICOMPARE_IMM:
3458 CASE_PPC64 (OP_LCOMPARE_IMM)
3459 L = (sizeof (mgreg_t) == 4 || ins->opcode == OP_ICOMPARE_IMM) ? 0 : 1;
3461 if (next && compare_opcode_is_unsigned (next->opcode)) {
3462 if (ppc_is_uimm16 (ins->inst_imm)) {
3463 ppc_cmpli (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff));
3465 g_assert_not_reached ();
3468 if (ppc_is_imm16 (ins->inst_imm)) {
3469 ppc_cmpi (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff));
3471 g_assert_not_reached ();
3477 * gdb does not like encountering a trap in the debugged code. So
3478 * instead of emitting a trap, we emit a call a C function and place a
3482 ppc_mr (code, ppc_r3, ins->sreg1);
3483 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3484 (gpointer)"mono_break");
3485 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
3486 ppc_load_func (code, PPC_CALL_REG, 0);
3487 ppc_mtlr (code, PPC_CALL_REG);
3495 ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2);
3498 CASE_PPC64 (OP_LADD)
3499 ppc_add (code, ins->dreg, ins->sreg1, ins->sreg2);
3503 ppc_adde (code, ins->dreg, ins->sreg1, ins->sreg2);
3506 if (ppc_is_imm16 (ins->inst_imm)) {
3507 ppc_addic (code, ins->dreg, ins->sreg1, ins->inst_imm);
3509 g_assert_not_reached ();
3514 CASE_PPC64 (OP_LADD_IMM)
3515 if (ppc_is_imm16 (ins->inst_imm)) {
3516 ppc_addi (code, ins->dreg, ins->sreg1, ins->inst_imm);
3518 g_assert_not_reached ();
3522 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3524 ppc_addo (code, ins->dreg, ins->sreg1, ins->sreg2);
3525 ppc_mfspr (code, ppc_r0, ppc_xer);
3526 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3527 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3529 case OP_IADD_OVF_UN:
3530 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3532 ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2);
3533 ppc_mfspr (code, ppc_r0, ppc_xer);
3534 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3535 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3538 CASE_PPC64 (OP_LSUB_OVF)
3539 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3541 ppc_subfo (code, ins->dreg, ins->sreg2, ins->sreg1);
3542 ppc_mfspr (code, ppc_r0, ppc_xer);
3543 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3544 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3546 case OP_ISUB_OVF_UN:
3547 CASE_PPC64 (OP_LSUB_OVF_UN)
3548 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3550 ppc_subfc (code, ins->dreg, ins->sreg2, ins->sreg1);
3551 ppc_mfspr (code, ppc_r0, ppc_xer);
3552 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3553 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3555 case OP_ADD_OVF_CARRY:
3556 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3558 ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2);
3559 ppc_mfspr (code, ppc_r0, ppc_xer);
3560 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3561 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3563 case OP_ADD_OVF_UN_CARRY:
3564 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3566 ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2);
3567 ppc_mfspr (code, ppc_r0, ppc_xer);
3568 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3569 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3571 case OP_SUB_OVF_CARRY:
3572 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3574 ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1);
3575 ppc_mfspr (code, ppc_r0, ppc_xer);
3576 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3577 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3579 case OP_SUB_OVF_UN_CARRY:
3580 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3582 ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1);
3583 ppc_mfspr (code, ppc_r0, ppc_xer);
3584 ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
3585 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3589 ppc_subfco (code, ins->dreg, ins->sreg2, ins->sreg1);
3592 CASE_PPC64 (OP_LSUB)
3593 ppc_subf (code, ins->dreg, ins->sreg2, ins->sreg1);
3597 ppc_subfe (code, ins->dreg, ins->sreg2, ins->sreg1);
3601 CASE_PPC64 (OP_LSUB_IMM)
3602 // we add the negated value
3603 if (ppc_is_imm16 (-ins->inst_imm))
3604 ppc_addi (code, ins->dreg, ins->sreg1, -ins->inst_imm);
3606 g_assert_not_reached ();
3610 g_assert (ppc_is_imm16 (ins->inst_imm));
3611 ppc_subfic (code, ins->dreg, ins->sreg1, ins->inst_imm);
3614 ppc_subfze (code, ins->dreg, ins->sreg1);
3617 CASE_PPC64 (OP_LAND)
3618 /* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */
3619 ppc_and (code, ins->sreg1, ins->dreg, ins->sreg2);
3623 CASE_PPC64 (OP_LAND_IMM)
3624 if (!(ins->inst_imm & 0xffff0000)) {
3625 ppc_andid (code, ins->sreg1, ins->dreg, ins->inst_imm);
3626 } else if (!(ins->inst_imm & 0xffff)) {
3627 ppc_andisd (code, ins->sreg1, ins->dreg, ((guint32)ins->inst_imm >> 16));
3629 g_assert_not_reached ();
3633 CASE_PPC64 (OP_LDIV) {
3634 guint8 *divisor_is_m1;
3635 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3637 ppc_compare_reg_imm (code, 0, ins->sreg2, -1);
3638 divisor_is_m1 = code;
3639 ppc_bc (code, PPC_BR_FALSE | PPC_BR_LIKELY, PPC_BR_EQ, 0);
3640 ppc_lis (code, ppc_r0, 0x8000);
3641 #ifdef __mono_ppc64__
3642 if (ins->opcode == OP_LDIV)
3643 ppc_sldi (code, ppc_r0, ppc_r0, 32);
3645 ppc_compare (code, 0, ins->sreg1, ppc_r0);
3646 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3647 ppc_patch (divisor_is_m1, code);
3648 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3650 if (ins->opcode == OP_IDIV)
3651 ppc_divwod (code, ins->dreg, ins->sreg1, ins->sreg2);
3652 #ifdef __mono_ppc64__
3654 ppc_divdod (code, ins->dreg, ins->sreg1, ins->sreg2);
3656 ppc_mfspr (code, ppc_r0, ppc_xer);
3657 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3658 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
3662 CASE_PPC64 (OP_LDIV_UN)
3663 if (ins->opcode == OP_IDIV_UN)
3664 ppc_divwuod (code, ins->dreg, ins->sreg1, ins->sreg2);
3665 #ifdef __mono_ppc64__
3667 ppc_divduod (code, ins->dreg, ins->sreg1, ins->sreg2);
3669 ppc_mfspr (code, ppc_r0, ppc_xer);
3670 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3671 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
3677 g_assert_not_reached ();
3680 ppc_or (code, ins->dreg, ins->sreg1, ins->sreg2);
3684 CASE_PPC64 (OP_LOR_IMM)
3685 if (!(ins->inst_imm & 0xffff0000)) {
3686 ppc_ori (code, ins->sreg1, ins->dreg, ins->inst_imm);
3687 } else if (!(ins->inst_imm & 0xffff)) {
3688 ppc_oris (code, ins->dreg, ins->sreg1, ((guint32)(ins->inst_imm) >> 16));
3690 g_assert_not_reached ();
3694 CASE_PPC64 (OP_LXOR)
3695 ppc_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
3699 CASE_PPC64 (OP_LXOR_IMM)
3700 if (!(ins->inst_imm & 0xffff0000)) {
3701 ppc_xori (code, ins->sreg1, ins->dreg, ins->inst_imm);
3702 } else if (!(ins->inst_imm & 0xffff)) {
3703 ppc_xoris (code, ins->sreg1, ins->dreg, ((guint32)(ins->inst_imm) >> 16));
3705 g_assert_not_reached ();
3709 CASE_PPC64 (OP_LSHL)
3710 ppc_shift_left (code, ins->dreg, ins->sreg1, ins->sreg2);
3714 CASE_PPC64 (OP_LSHL_IMM)
3715 ppc_shift_left_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3718 ppc_sraw (code, ins->dreg, ins->sreg1, ins->sreg2);
3721 ppc_shift_right_arith_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3724 if (MASK_SHIFT_IMM (ins->inst_imm))
3725 ppc_shift_right_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm));
3727 ppc_mr (code, ins->dreg, ins->sreg1);
3730 ppc_srw (code, ins->dreg, ins->sreg1, ins->sreg2);
3733 CASE_PPC64 (OP_LNOT)
3734 ppc_not (code, ins->dreg, ins->sreg1);
3737 CASE_PPC64 (OP_LNEG)
3738 ppc_neg (code, ins->dreg, ins->sreg1);
3741 CASE_PPC64 (OP_LMUL)
3742 ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2);
3746 CASE_PPC64 (OP_LMUL_IMM)
3747 if (ppc_is_imm16 (ins->inst_imm)) {
3748 ppc_mulli (code, ins->dreg, ins->sreg1, ins->inst_imm);
3750 g_assert_not_reached ();
3754 CASE_PPC64 (OP_LMUL_OVF)
3755 /* we annot use mcrxr, since it's not implemented on some processors
3756 * XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3758 if (ins->opcode == OP_IMUL_OVF)
3759 ppc_mullwo (code, ins->dreg, ins->sreg1, ins->sreg2);
3760 #ifdef __mono_ppc64__
3762 ppc_mulldo (code, ins->dreg, ins->sreg1, ins->sreg2);
3764 ppc_mfspr (code, ppc_r0, ppc_xer);
3765 ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
3766 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3768 case OP_IMUL_OVF_UN:
3769 CASE_PPC64 (OP_LMUL_OVF_UN)
3770 /* we first multiply to get the high word and compare to 0
3771 * to set the flags, then the result is discarded and then
3772 * we multiply to get the lower * bits result
3774 if (ins->opcode == OP_IMUL_OVF_UN)
3775 ppc_mulhwu (code, ppc_r0, ins->sreg1, ins->sreg2);
3776 #ifdef __mono_ppc64__
3778 ppc_mulhdu (code, ppc_r0, ins->sreg1, ins->sreg2);
3780 ppc_cmpi (code, 0, 0, ppc_r0, 0);
3781 EMIT_COND_SYSTEM_EXCEPTION (CEE_BNE_UN - CEE_BEQ, "OverflowException");
3782 ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2);
3785 ppc_load (code, ins->dreg, ins->inst_c0);
3788 ppc_load (code, ins->dreg, ins->inst_l);
3791 case OP_LOAD_GOTADDR:
3792 /* The PLT implementation depends on this */
3793 g_assert (ins->dreg == ppc_r30);
3795 code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
3798 // FIXME: Fix max instruction length
3799 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
3800 /* arch_emit_got_access () patches this */
3801 ppc_load32 (code, ppc_r0, 0);
3802 ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0);
3805 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3806 ppc_load_sequence (code, ins->dreg, 0);
3808 CASE_PPC32 (OP_ICONV_TO_I4)
3809 CASE_PPC32 (OP_ICONV_TO_U4)
3811 if (ins->dreg != ins->sreg1)
3812 ppc_mr (code, ins->dreg, ins->sreg1);
3815 int saved = ins->sreg1;
3816 if (ins->sreg1 == ppc_r3) {
3817 ppc_mr (code, ppc_r0, ins->sreg1);
3820 if (ins->sreg2 != ppc_r3)
3821 ppc_mr (code, ppc_r3, ins->sreg2);
3822 if (saved != ppc_r4)
3823 ppc_mr (code, ppc_r4, saved);
3827 if (ins->dreg != ins->sreg1)
3828 ppc_fmr (code, ins->dreg, ins->sreg1);
3830 case OP_MOVE_F_TO_I4:
3831 ppc_stfs (code, ins->sreg1, -4, ppc_r1);
3832 ppc_ldptr (code, ins->dreg, -4, ppc_r1);
3834 case OP_MOVE_I4_TO_F:
3835 ppc_stw (code, ins->sreg1, -4, ppc_r1);
3836 ppc_lfs (code, ins->dreg, -4, ppc_r1);
3838 case OP_FCONV_TO_R4:
3839 ppc_frsp (code, ins->dreg, ins->sreg1);
3843 MonoCallInst *call = (MonoCallInst*)ins;
3846 * Keep in sync with mono_arch_emit_epilog
3848 g_assert (!cfg->method->save_lmf);
3850 * Note: we can use ppc_r12 here because it is dead anyway:
3851 * we're leaving the method.
3853 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
3854 long ret_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
3855 if (ppc_is_imm16 (ret_offset)) {
3856 ppc_ldptr (code, ppc_r0, ret_offset, cfg->frame_reg);
3858 ppc_load (code, ppc_r12, ret_offset);
3859 ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
3861 ppc_mtlr (code, ppc_r0);
3864 if (ppc_is_imm16 (cfg->stack_usage)) {
3865 ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
3867 /* cfg->stack_usage is an int, so we can use
3868 * an addis/addi sequence here even in 64-bit. */
3869 ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
3870 ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
3872 if (!cfg->method->save_lmf) {
3874 for (i = 31; i >= 13; --i) {
3875 if (cfg->used_int_regs & (1 << i)) {
3876 pos += sizeof (gpointer);
3877 ppc_ldptr (code, i, -pos, ppc_r12);
3881 /* FIXME restore from MonoLMF: though this can't happen yet */
3884 /* Copy arguments on the stack to our argument area */
3885 if (call->stack_usage) {
3886 code = emit_memcpy (code, call->stack_usage, ppc_r12, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET);
3887 /* r12 was clobbered */
3888 g_assert (cfg->frame_reg == ppc_sp);
3889 if (ppc_is_imm16 (cfg->stack_usage)) {
3890 ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
3892 /* cfg->stack_usage is an int, so we can use
3893 * an addis/addi sequence here even in 64-bit. */
3894 ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
3895 ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
3899 ppc_mr (code, ppc_sp, ppc_r12);
3900 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
3901 if (cfg->compile_aot) {
3902 /* arch_emit_got_access () patches this */
3903 ppc_load32 (code, ppc_r0, 0);
3904 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3905 ppc_ldptr_indexed (code, ppc_r12, ppc_r30, ppc_r0);
3906 ppc_ldptr (code, ppc_r0, 0, ppc_r12);
3908 ppc_ldptr_indexed (code, ppc_r0, ppc_r30, ppc_r0);
3910 ppc_mtctr (code, ppc_r0);
3911 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
3918 /* ensure ins->sreg1 is not NULL */
3919 ppc_ldptr (code, ppc_r0, 0, ins->sreg1);
3922 long cookie_offset = cfg->sig_cookie + cfg->stack_usage;
3923 if (ppc_is_imm16 (cookie_offset)) {
3924 ppc_addi (code, ppc_r0, cfg->frame_reg, cookie_offset);
3926 ppc_load (code, ppc_r0, cookie_offset);
3927 ppc_add (code, ppc_r0, cfg->frame_reg, ppc_r0);
3929 ppc_stptr (code, ppc_r0, 0, ins->sreg1);
3938 call = (MonoCallInst*)ins;
3939 if (ins->flags & MONO_INST_HAS_METHOD)
3940 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3942 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3943 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
3944 ppc_load_func (code, PPC_CALL_REG, 0);
3945 ppc_mtlr (code, PPC_CALL_REG);
3950 /* FIXME: this should be handled somewhere else in the new jit */
3951 code = emit_move_return_value (cfg, ins, code);
3957 case OP_VOIDCALL_REG:
3959 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3960 ppc_ldptr (code, ppc_r0, 0, ins->sreg1);
3961 /* FIXME: if we know that this is a method, we
3962 can omit this load */
3963 ppc_ldptr (code, ppc_r2, 8, ins->sreg1);
3964 ppc_mtlr (code, ppc_r0);
3966 #if (_CALL_ELF == 2)
3967 if (ins->flags & MONO_INST_HAS_METHOD) {
3968 // Not a global entry point
3970 // Need to set up r12 with function entry address for global entry point
3971 if (ppc_r12 != ins->sreg1) {
3972 ppc_mr(code,ppc_r12,ins->sreg1);
3976 ppc_mtlr (code, ins->sreg1);
3979 /* FIXME: this should be handled somewhere else in the new jit */
3980 code = emit_move_return_value (cfg, ins, code);
3982 case OP_FCALL_MEMBASE:
3983 case OP_LCALL_MEMBASE:
3984 case OP_VCALL_MEMBASE:
3985 case OP_VCALL2_MEMBASE:
3986 case OP_VOIDCALL_MEMBASE:
3987 case OP_CALL_MEMBASE:
3988 if (cfg->compile_aot && ins->sreg1 == ppc_r12) {
3989 /* The trampolines clobber this */
3990 ppc_mr (code, ppc_r29, ins->sreg1);
3991 ppc_ldptr (code, ppc_r0, ins->inst_offset, ppc_r29);
3993 ppc_ldptr (code, ppc_r0, ins->inst_offset, ins->sreg1);
3995 ppc_mtlr (code, ppc_r0);
3997 /* FIXME: this should be handled somewhere else in the new jit */
3998 code = emit_move_return_value (cfg, ins, code);
4001 guint8 * zero_loop_jump, * zero_loop_start;
4002 /* keep alignment */
4003 int alloca_waste = PPC_STACK_PARAM_OFFSET + cfg->param_area + 31;
4004 int area_offset = alloca_waste;
4006 ppc_addi (code, ppc_r12, ins->sreg1, alloca_waste + 31);
4007 /* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */
4008 ppc_clear_right_imm (code, ppc_r12, ppc_r12, 4);
4009 /* use ctr to store the number of words to 0 if needed */
4010 if (ins->flags & MONO_INST_INIT) {
4011 /* we zero 4 bytes at a time:
4012 * we add 7 instead of 3 so that we set the counter to
4013 * at least 1, otherwise the bdnz instruction will make
4014 * it negative and iterate billions of times.
4016 ppc_addi (code, ppc_r0, ins->sreg1, 7);
4017 ppc_shift_right_arith_imm (code, ppc_r0, ppc_r0, 2);
4018 ppc_mtctr (code, ppc_r0);
4020 ppc_ldptr (code, ppc_r0, 0, ppc_sp);
4021 ppc_neg (code, ppc_r12, ppc_r12);
4022 ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
4024 /* FIXME: make this loop work in 8 byte
4025 increments on PPC64 */
4026 if (ins->flags & MONO_INST_INIT) {
4027 /* adjust the dest reg by -4 so we can use stwu */
4028 /* we actually adjust -8 because we let the loop
4031 ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 8));
4032 ppc_li (code, ppc_r12, 0);
4033 zero_loop_start = code;
4034 ppc_stwu (code, ppc_r12, 4, ins->dreg);
4035 zero_loop_jump = code;
4036 ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
4037 ppc_patch (zero_loop_jump, zero_loop_start);
4039 ppc_addi (code, ins->dreg, ppc_sp, area_offset);
4044 ppc_mr (code, ppc_r3, ins->sreg1);
4045 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4046 (gpointer)"mono_arch_throw_exception");
4047 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
4048 ppc_load_func (code, PPC_CALL_REG, 0);
4049 ppc_mtlr (code, PPC_CALL_REG);
4058 ppc_mr (code, ppc_r3, ins->sreg1);
4059 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4060 (gpointer)"mono_arch_rethrow_exception");
4061 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
4062 ppc_load_func (code, PPC_CALL_REG, 0);
4063 ppc_mtlr (code, PPC_CALL_REG);
4070 case OP_START_HANDLER: {
4071 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4072 g_assert (spvar->inst_basereg != ppc_sp);
4073 code = emit_reserve_param_area (cfg, code);
4074 ppc_mflr (code, ppc_r0);
4075 if (ppc_is_imm16 (spvar->inst_offset)) {
4076 ppc_stptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4078 ppc_load (code, ppc_r12, spvar->inst_offset);
4079 ppc_stptr_indexed (code, ppc_r0, ppc_r12, spvar->inst_basereg);
4083 case OP_ENDFILTER: {
4084 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4085 g_assert (spvar->inst_basereg != ppc_sp);
4086 code = emit_unreserve_param_area (cfg, code);
4087 if (ins->sreg1 != ppc_r3)
4088 ppc_mr (code, ppc_r3, ins->sreg1);
4089 if (ppc_is_imm16 (spvar->inst_offset)) {
4090 ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4092 ppc_load (code, ppc_r12, spvar->inst_offset);
4093 ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r12);
4095 ppc_mtlr (code, ppc_r0);
4099 case OP_ENDFINALLY: {
4100 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4101 g_assert (spvar->inst_basereg != ppc_sp);
4102 code = emit_unreserve_param_area (cfg, code);
4103 ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
4104 ppc_mtlr (code, ppc_r0);
4108 case OP_CALL_HANDLER:
4109 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4111 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
4114 ins->inst_c0 = code - cfg->native_code;
4117 /*if (ins->inst_target_bb->native_offset) {
4119 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
4121 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4126 ppc_mtctr (code, ins->sreg1);
4127 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
4131 CASE_PPC64 (OP_LCEQ)
4132 ppc_li (code, ins->dreg, 0);
4133 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2);
4134 ppc_li (code, ins->dreg, 1);
4140 CASE_PPC64 (OP_LCLT)
4141 CASE_PPC64 (OP_LCLT_UN)
4142 ppc_li (code, ins->dreg, 1);
4143 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4144 ppc_li (code, ins->dreg, 0);
4150 CASE_PPC64 (OP_LCGT)
4151 CASE_PPC64 (OP_LCGT_UN)
4152 ppc_li (code, ins->dreg, 1);
4153 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4154 ppc_li (code, ins->dreg, 0);
4156 case OP_COND_EXC_EQ:
4157 case OP_COND_EXC_NE_UN:
4158 case OP_COND_EXC_LT:
4159 case OP_COND_EXC_LT_UN:
4160 case OP_COND_EXC_GT:
4161 case OP_COND_EXC_GT_UN:
4162 case OP_COND_EXC_GE:
4163 case OP_COND_EXC_GE_UN:
4164 case OP_COND_EXC_LE:
4165 case OP_COND_EXC_LE_UN:
4166 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
4168 case OP_COND_EXC_IEQ:
4169 case OP_COND_EXC_INE_UN:
4170 case OP_COND_EXC_ILT:
4171 case OP_COND_EXC_ILT_UN:
4172 case OP_COND_EXC_IGT:
4173 case OP_COND_EXC_IGT_UN:
4174 case OP_COND_EXC_IGE:
4175 case OP_COND_EXC_IGE_UN:
4176 case OP_COND_EXC_ILE:
4177 case OP_COND_EXC_ILE_UN:
4178 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
4190 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
4193 /* floating point opcodes */
4195 g_assert (cfg->compile_aot);
4197 /* FIXME: Optimize this */
4199 ppc_mflr (code, ppc_r12);
4201 *(double*)code = *(double*)ins->inst_p0;
4203 ppc_lfd (code, ins->dreg, 8, ppc_r12);
4206 g_assert_not_reached ();
4208 case OP_STORER8_MEMBASE_REG:
4209 if (ppc_is_imm16 (ins->inst_offset)) {
4210 ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4212 if (ppc_is_imm32 (ins->inst_offset)) {
4213 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4214 ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r11);
4216 ppc_load (code, ppc_r0, ins->inst_offset);
4217 ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4221 case OP_LOADR8_MEMBASE:
4222 if (ppc_is_imm16 (ins->inst_offset)) {
4223 ppc_lfd (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
4225 if (ppc_is_imm32 (ins->inst_offset)) {
4226 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4227 ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r11);
4229 ppc_load (code, ppc_r0, ins->inst_offset);
4230 ppc_lfdx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
4234 case OP_STORER4_MEMBASE_REG:
4235 ppc_frsp (code, ins->sreg1, ins->sreg1);
4236 if (ppc_is_imm16 (ins->inst_offset)) {
4237 ppc_stfs (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4239 if (ppc_is_imm32 (ins->inst_offset)) {
4240 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4241 ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r11);
4243 ppc_load (code, ppc_r0, ins->inst_offset);
4244 ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4248 case OP_LOADR4_MEMBASE:
4249 if (ppc_is_imm16 (ins->inst_offset)) {
4250 ppc_lfs (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
4252 if (ppc_is_imm32 (ins->inst_offset)) {
4253 ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
4254 ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r11);
4256 ppc_load (code, ppc_r0, ins->inst_offset);
4257 ppc_lfsx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
4261 case OP_LOADR4_MEMINDEX:
4262 ppc_lfsx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4264 case OP_LOADR8_MEMINDEX:
4265 ppc_lfdx (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4267 case OP_STORER4_MEMINDEX:
4268 ppc_frsp (code, ins->sreg1, ins->sreg1);
4269 ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4271 case OP_STORER8_MEMINDEX:
4272 ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4275 case CEE_CONV_R4: /* FIXME: change precision */
4277 g_assert_not_reached ();
4278 case OP_FCONV_TO_I1:
4279 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4281 case OP_FCONV_TO_U1:
4282 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4284 case OP_FCONV_TO_I2:
4285 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4287 case OP_FCONV_TO_U2:
4288 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4290 case OP_FCONV_TO_I4:
4292 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4294 case OP_FCONV_TO_U4:
4296 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4298 case OP_LCONV_TO_R_UN:
4299 g_assert_not_reached ();
4300 /* Implemented as helper calls */
4302 case OP_LCONV_TO_OVF_I4_2:
4303 case OP_LCONV_TO_OVF_I: {
4304 #ifdef __mono_ppc64__
4307 guint8 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
4308 // Check if its negative
4309 ppc_cmpi (code, 0, 0, ins->sreg1, 0);
4310 negative_branch = code;
4311 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0);
4312 // Its positive msword == 0
4313 ppc_cmpi (code, 0, 0, ins->sreg2, 0);
4314 msword_positive_branch = code;
4315 ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
4317 ovf_ex_target = code;
4318 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException");
4320 ppc_patch (negative_branch, code);
4321 ppc_cmpi (code, 0, 0, ins->sreg2, -1);
4322 msword_negative_branch = code;
4323 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4324 ppc_patch (msword_negative_branch, ovf_ex_target);
4326 ppc_patch (msword_positive_branch, code);
4327 if (ins->dreg != ins->sreg1)
4328 ppc_mr (code, ins->dreg, ins->sreg1);
4333 ppc_fsqrtd (code, ins->dreg, ins->sreg1);
4336 ppc_fadd (code, ins->dreg, ins->sreg1, ins->sreg2);
4339 ppc_fsub (code, ins->dreg, ins->sreg1, ins->sreg2);
4342 ppc_fmul (code, ins->dreg, ins->sreg1, ins->sreg2);
4345 ppc_fdiv (code, ins->dreg, ins->sreg1, ins->sreg2);
4348 ppc_fneg (code, ins->dreg, ins->sreg1);
4352 g_assert_not_reached ();
4355 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4358 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4359 ppc_li (code, ins->dreg, 0);
4360 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2);
4361 ppc_li (code, ins->dreg, 1);
4364 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4365 ppc_li (code, ins->dreg, 1);
4366 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4367 ppc_li (code, ins->dreg, 0);
4370 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4371 ppc_li (code, ins->dreg, 1);
4372 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3);
4373 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
4374 ppc_li (code, ins->dreg, 0);
4377 ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
4378 ppc_li (code, ins->dreg, 1);
4379 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4380 ppc_li (code, ins->dreg, 0);
4383 ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
4384 ppc_li (code, ins->dreg, 1);
4385 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3);
4386 ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
4387 ppc_li (code, ins->dreg, 0);
4390 EMIT_COND_BRANCH (ins, CEE_BEQ - CEE_BEQ);
4393 EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ);
4396 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4397 EMIT_COND_BRANCH (ins, CEE_BLT - CEE_BEQ);
4400 EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO);
4401 EMIT_COND_BRANCH (ins, CEE_BLT_UN - CEE_BEQ);
4404 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4405 EMIT_COND_BRANCH (ins, CEE_BGT - CEE_BEQ);
4408 EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO);
4409 EMIT_COND_BRANCH (ins, CEE_BGT_UN - CEE_BEQ);
4412 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4413 EMIT_COND_BRANCH (ins, CEE_BGE - CEE_BEQ);
4416 EMIT_COND_BRANCH (ins, CEE_BGE_UN - CEE_BEQ);
4419 ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
4420 EMIT_COND_BRANCH (ins, CEE_BLE - CEE_BEQ);
4423 EMIT_COND_BRANCH (ins, CEE_BLE_UN - CEE_BEQ);
4426 g_assert_not_reached ();
4427 case OP_CHECK_FINITE: {
4428 ppc_rlwinm (code, ins->sreg1, ins->sreg1, 0, 1, 31);
4429 ppc_addis (code, ins->sreg1, ins->sreg1, -32752);
4430 ppc_rlwinmd (code, ins->sreg1, ins->sreg1, 1, 31, 31);
4431 EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException");
4434 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_c1, ins->inst_p0);
4435 #ifdef __mono_ppc64__
4436 ppc_load_sequence (code, ins->dreg, (guint64)0x0f0f0f0f0f0f0f0fLL);
4438 ppc_load_sequence (code, ins->dreg, (gulong)0x0f0f0f0fL);
4443 #ifdef __mono_ppc64__
4444 case OP_ICONV_TO_I4:
4446 ppc_extsw (code, ins->dreg, ins->sreg1);
4448 case OP_ICONV_TO_U4:
4450 ppc_clrldi (code, ins->dreg, ins->sreg1, 32);
4452 case OP_ICONV_TO_R4:
4453 case OP_ICONV_TO_R8:
4454 case OP_LCONV_TO_R4:
4455 case OP_LCONV_TO_R8: {
4457 if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_ICONV_TO_R8) {
4458 ppc_extsw (code, ppc_r0, ins->sreg1);
4463 if (cpu_hw_caps & PPC_MOVE_FPR_GPR) {
4464 ppc_mffgpr (code, ins->dreg, tmp);
4466 ppc_str (code, tmp, -8, ppc_r1);
4467 ppc_lfd (code, ins->dreg, -8, ppc_r1);
4469 ppc_fcfid (code, ins->dreg, ins->dreg);
4470 if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_LCONV_TO_R4)
4471 ppc_frsp (code, ins->dreg, ins->dreg);
4475 ppc_srad (code, ins->dreg, ins->sreg1, ins->sreg2);
4478 ppc_srd (code, ins->dreg, ins->sreg1, ins->sreg2);
4481 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
4483 ppc_mfspr (code, ppc_r0, ppc_xer);
4484 ppc_andisd (code, ppc_r0, ppc_r0, (1 << 13)); /* CA */
4485 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, ins->inst_p1);
4487 case OP_COND_EXC_OV:
4488 ppc_mfspr (code, ppc_r0, ppc_xer);
4489 ppc_andisd (code, ppc_r0, ppc_r0, (1 << 14)); /* OV */
4490 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, ins->inst_p1);
4502 EMIT_COND_BRANCH (ins, ins->opcode - OP_LBEQ);
4504 case OP_FCONV_TO_I8:
4505 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
4507 case OP_FCONV_TO_U8:
4508 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE);
4510 case OP_STOREI4_MEMBASE_REG:
4511 if (ppc_is_imm16 (ins->inst_offset)) {
4512 ppc_stw (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
4514 ppc_load (code, ppc_r0, ins->inst_offset);
4515 ppc_stwx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
4518 case OP_STOREI4_MEMINDEX:
4519 ppc_stwx (code, ins->sreg1, ins->sreg2, ins->inst_destbasereg);
4522 ppc_srawi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4524 case OP_ISHR_UN_IMM:
4525 if (ins->inst_imm & 0x1f)
4526 ppc_srwi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4528 ppc_mr (code, ins->dreg, ins->sreg1);
4531 case OP_ICONV_TO_R4:
4532 case OP_ICONV_TO_R8: {
4533 if (cpu_hw_caps & PPC_ISA_64) {
4534 ppc_srawi(code, ppc_r0, ins->sreg1, 31);
4535 ppc_stw (code, ppc_r0, -8, ppc_r1);
4536 ppc_stw (code, ins->sreg1, -4, ppc_r1);
4537 ppc_lfd (code, ins->dreg, -8, ppc_r1);
4538 ppc_fcfid (code, ins->dreg, ins->dreg);
4539 if (ins->opcode == OP_ICONV_TO_R4)
4540 ppc_frsp (code, ins->dreg, ins->dreg);
4546 case OP_ATOMIC_ADD_I4:
4547 CASE_PPC64 (OP_ATOMIC_ADD_I8) {
4548 int location = ins->inst_basereg;
4549 int addend = ins->sreg2;
4550 guint8 *loop, *branch;
4551 g_assert (ins->inst_offset == 0);
4555 if (ins->opcode == OP_ATOMIC_ADD_I4)
4556 ppc_lwarx (code, ppc_r0, 0, location);
4557 #ifdef __mono_ppc64__
4559 ppc_ldarx (code, ppc_r0, 0, location);
4562 ppc_add (code, ppc_r0, ppc_r0, addend);
4564 if (ins->opcode == OP_ATOMIC_ADD_I4)
4565 ppc_stwcxd (code, ppc_r0, 0, location);
4566 #ifdef __mono_ppc64__
4568 ppc_stdcxd (code, ppc_r0, 0, location);
4572 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4573 ppc_patch (branch, loop);
4576 ppc_mr (code, ins->dreg, ppc_r0);
4579 case OP_ATOMIC_CAS_I4:
4580 CASE_PPC64 (OP_ATOMIC_CAS_I8) {
4581 int location = ins->sreg1;
4582 int value = ins->sreg2;
4583 int comparand = ins->sreg3;
4584 guint8 *start, *not_equal, *lost_reservation;
4588 if (ins->opcode == OP_ATOMIC_CAS_I4)
4589 ppc_lwarx (code, ppc_r0, 0, location);
4590 #ifdef __mono_ppc64__
4592 ppc_ldarx (code, ppc_r0, 0, location);
4595 ppc_cmp (code, 0, ins->opcode == OP_ATOMIC_CAS_I4 ? 0 : 1, ppc_r0, comparand);
4597 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4599 if (ins->opcode == OP_ATOMIC_CAS_I4)
4600 ppc_stwcxd (code, value, 0, location);
4601 #ifdef __mono_ppc64__
4603 ppc_stdcxd (code, value, 0, location);
4606 lost_reservation = code;
4607 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
4608 ppc_patch (lost_reservation, start);
4609 ppc_patch (not_equal, code);
4612 ppc_mr (code, ins->dreg, ppc_r0);
4615 case OP_GC_SAFE_POINT:
4619 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4620 g_assert_not_reached ();
4623 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4624 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4625 mono_inst_name (ins->opcode), max_len, (glong)(code - cfg->native_code - offset));
4626 g_assert_not_reached ();
4632 last_offset = offset;
4635 cfg->code_len = code - cfg->native_code;
4637 #endif /* !DISABLE_JIT */
4640 mono_arch_register_lowlevel_calls (void)
4642 /* The signature doesn't matter */
4643 mono_register_jit_icall (mono_ppc_throw_exception, "mono_ppc_throw_exception", mono_create_icall_signature ("void"), TRUE);
4646 #ifdef __mono_ppc64__
4647 #ifdef _LITTLE_ENDIAN
4648 #define patch_load_sequence(ip,val) do {\
4649 guint16 *__load = (guint16*)(ip); \
4650 g_assert (sizeof (val) == sizeof (gsize)); \
4651 __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4652 __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4653 __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4654 __load [8] = ((guint64)(gsize)(val)) & 0xffff; \
4656 #elif defined _BIG_ENDIAN
4657 #define patch_load_sequence(ip,val) do {\
4658 guint16 *__load = (guint16*)(ip); \
4659 g_assert (sizeof (val) == sizeof (gsize)); \
4660 __load [1] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4661 __load [3] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4662 __load [7] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4663 __load [9] = ((guint64)(gsize)(val)) & 0xffff; \
4666 #error huh? No endianess defined by compiler
4669 #define patch_load_sequence(ip,val) do {\
4670 guint16 *__lis_ori = (guint16*)(ip); \
4671 __lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \
4672 __lis_ori [3] = ((gulong)(val)) & 0xffff; \
4678 mono_arch_patch_code (MonoCompile *cfg, MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors, MonoError *error)
4680 MonoJumpInfo *patch_info;
4681 gboolean compile_aot = !run_cctors;
4685 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4686 unsigned char *ip = patch_info->ip.i + code;
4687 unsigned char *target;
4688 gboolean is_fd = FALSE;
4690 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors, error);
4691 return_if_nok (error);
4694 switch (patch_info->type) {
4695 case MONO_PATCH_INFO_BB:
4696 case MONO_PATCH_INFO_LABEL:
4699 /* No need to patch these */
4704 switch (patch_info->type) {
4705 case MONO_PATCH_INFO_IP:
4706 patch_load_sequence (ip, ip);
4708 case MONO_PATCH_INFO_METHOD_REL:
4709 g_assert_not_reached ();
4710 *((gpointer *)(ip)) = code + patch_info->data.offset;
4712 case MONO_PATCH_INFO_SWITCH: {
4713 gpointer *table = (gpointer *)patch_info->data.table->table;
4716 patch_load_sequence (ip, table);
4718 for (i = 0; i < patch_info->data.table->table_size; i++) {
4719 table [i] = (glong)patch_info->data.table->table [i] + code;
4721 /* we put into the table the absolute address, no need for ppc_patch in this case */
4724 case MONO_PATCH_INFO_METHODCONST:
4725 case MONO_PATCH_INFO_CLASS:
4726 case MONO_PATCH_INFO_IMAGE:
4727 case MONO_PATCH_INFO_FIELD:
4728 case MONO_PATCH_INFO_VTABLE:
4729 case MONO_PATCH_INFO_IID:
4730 case MONO_PATCH_INFO_SFLDA:
4731 case MONO_PATCH_INFO_LDSTR:
4732 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4733 case MONO_PATCH_INFO_LDTOKEN:
4734 /* from OP_AOTCONST : lis + ori */
4735 patch_load_sequence (ip, target);
4737 case MONO_PATCH_INFO_R4:
4738 case MONO_PATCH_INFO_R8:
4739 g_assert_not_reached ();
4740 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4742 case MONO_PATCH_INFO_EXC_NAME:
4743 g_assert_not_reached ();
4744 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4746 case MONO_PATCH_INFO_NONE:
4747 case MONO_PATCH_INFO_BB_OVF:
4748 case MONO_PATCH_INFO_EXC_OVF:
4749 /* everything is dealt with at epilog output time */
4751 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
4752 case MONO_PATCH_INFO_INTERNAL_METHOD:
4753 case MONO_PATCH_INFO_ABS:
4754 case MONO_PATCH_INFO_RGCTX_FETCH:
4755 case MONO_PATCH_INFO_JIT_ICALL_ADDR:
4762 ppc_patch_full (ip, target, is_fd);
4767 * Emit code to save the registers in used_int_regs or the registers in the MonoLMF
4768 * structure at positive offset pos from register base_reg. pos is guaranteed to fit into
4769 * the instruction offset immediate for all the registers.
4772 save_registers (MonoCompile *cfg, guint8* code, int pos, int base_reg, gboolean save_lmf, guint32 used_int_regs, int cfa_offset)
4776 for (i = 13; i <= 31; i++) {
4777 if (used_int_regs & (1 << i)) {
4778 ppc_str (code, i, pos, base_reg);
4779 mono_emit_unwind_op_offset (cfg, code, i, pos - cfa_offset);
4780 pos += sizeof (mgreg_t);
4784 /* pos is the start of the MonoLMF structure */
4785 int offset = pos + G_STRUCT_OFFSET (MonoLMF, iregs);
4786 for (i = 13; i <= 31; i++) {
4787 ppc_str (code, i, offset, base_reg);
4788 mono_emit_unwind_op_offset (cfg, code, i, offset - cfa_offset);
4789 offset += sizeof (mgreg_t);
4791 offset = pos + G_STRUCT_OFFSET (MonoLMF, fregs);
4792 for (i = 14; i < 32; i++) {
4793 ppc_stfd (code, i, offset, base_reg);
4794 offset += sizeof (gdouble);
4801 * Stack frame layout:
4803 * ------------------- sp
4804 * MonoLMF structure or saved registers
4805 * -------------------
4807 * -------------------
4809 * -------------------
4810 * optional 8 bytes for tracing
4811 * -------------------
4812 * param area size is cfg->param_area
4813 * -------------------
4814 * linkage area size is PPC_STACK_PARAM_OFFSET
4815 * ------------------- sp
4819 mono_arch_emit_prolog (MonoCompile *cfg)
4821 MonoMethod *method = cfg->method;
4823 MonoMethodSignature *sig;
4825 long alloc_size, pos, max_offset, cfa_offset;
4831 int tailcall_struct_index;
4833 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4836 sig = mono_method_signature (method);
4837 cfg->code_size = 512 + sig->param_count * 32;
4838 code = cfg->native_code = g_malloc (cfg->code_size);
4842 /* We currently emit unwind info for aot, but don't use it */
4843 mono_emit_unwind_op_def_cfa (cfg, code, ppc_r1, 0);
4845 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
4846 ppc_mflr (code, ppc_r0);
4847 ppc_str (code, ppc_r0, PPC_RET_ADDR_OFFSET, ppc_sp);
4848 mono_emit_unwind_op_offset (cfg, code, ppc_lr, PPC_RET_ADDR_OFFSET);
4851 alloc_size = cfg->stack_offset;
4854 if (!method->save_lmf) {
4855 for (i = 31; i >= 13; --i) {
4856 if (cfg->used_int_regs & (1 << i)) {
4857 pos += sizeof (mgreg_t);
4861 pos += sizeof (MonoLMF);
4865 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4866 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4867 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4868 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4871 cfg->stack_usage = alloc_size;
4872 g_assert ((alloc_size & (MONO_ARCH_FRAME_ALIGNMENT-1)) == 0);
4874 if (ppc_is_imm16 (-alloc_size)) {
4875 ppc_str_update (code, ppc_sp, -alloc_size, ppc_sp);
4876 cfa_offset = alloc_size;
4877 mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
4878 code = save_registers (cfg, code, alloc_size - pos, ppc_sp, method->save_lmf, cfg->used_int_regs, cfa_offset);
4881 ppc_addi (code, ppc_r12, ppc_sp, -pos);
4882 ppc_load (code, ppc_r0, -alloc_size);
4883 ppc_str_update_indexed (code, ppc_sp, ppc_sp, ppc_r0);
4884 cfa_offset = alloc_size;
4885 mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
4886 code = save_registers (cfg, code, 0, ppc_r12, method->save_lmf, cfg->used_int_regs, cfa_offset);
4889 if (cfg->frame_reg != ppc_sp) {
4890 ppc_mr (code, cfg->frame_reg, ppc_sp);
4891 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4894 /* store runtime generic context */
4895 if (cfg->rgctx_var) {
4896 g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET &&
4897 (cfg->rgctx_var->inst_basereg == ppc_r1 || cfg->rgctx_var->inst_basereg == ppc_r31));
4899 ppc_stptr (code, MONO_ARCH_RGCTX_REG, cfg->rgctx_var->inst_offset, cfg->rgctx_var->inst_basereg);
4902 /* compute max_offset in order to use short forward jumps
4903 * we always do it on ppc because the immediate displacement
4904 * for jumps is too small
4907 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4909 bb->max_offset = max_offset;
4911 MONO_BB_FOR_EACH_INS (bb, ins)
4912 max_offset += ins_native_length (cfg, ins);
4915 /* load arguments allocated to register from the stack */
4918 cinfo = get_call_info (sig);
4920 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
4921 ArgInfo *ainfo = &cinfo->ret;
4923 inst = cfg->vret_addr;
4926 if (ppc_is_imm16 (inst->inst_offset)) {
4927 ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4929 ppc_load (code, ppc_r12, inst->inst_offset);
4930 ppc_stptr_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
4934 tailcall_struct_index = 0;
4935 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4936 ArgInfo *ainfo = cinfo->args + i;
4937 inst = cfg->args [pos];
4939 if (cfg->verbose_level > 2)
4940 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
4941 if (inst->opcode == OP_REGVAR) {
4942 if (ainfo->regtype == RegTypeGeneral)
4943 ppc_mr (code, inst->dreg, ainfo->reg);
4944 else if (ainfo->regtype == RegTypeFP)
4945 ppc_fmr (code, inst->dreg, ainfo->reg);
4946 else if (ainfo->regtype == RegTypeBase) {
4947 ppc_ldr (code, ppc_r12, 0, ppc_sp);
4948 ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r12);
4950 g_assert_not_reached ();
4952 if (cfg->verbose_level > 2)
4953 g_print ("Argument %ld assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4955 /* the argument should be put on the stack: FIXME handle size != word */
4956 if (ainfo->regtype == RegTypeGeneral) {
4957 switch (ainfo->size) {
4959 if (ppc_is_imm16 (inst->inst_offset)) {
4960 ppc_stb (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4962 if (ppc_is_imm32 (inst->inst_offset)) {
4963 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4964 ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r12);
4966 ppc_load (code, ppc_r12, inst->inst_offset);
4967 ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
4972 if (ppc_is_imm16 (inst->inst_offset)) {
4973 ppc_sth (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4975 if (ppc_is_imm32 (inst->inst_offset)) {
4976 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4977 ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r12);
4979 ppc_load (code, ppc_r12, inst->inst_offset);
4980 ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
4984 #ifdef __mono_ppc64__
4986 if (ppc_is_imm16 (inst->inst_offset)) {
4987 ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
4989 if (ppc_is_imm32 (inst->inst_offset)) {
4990 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
4991 ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r12);
4993 ppc_load (code, ppc_r12, inst->inst_offset);
4994 ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
4999 if (ppc_is_imm16 (inst->inst_offset)) {
5000 ppc_str (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5002 ppc_load (code, ppc_r12, inst->inst_offset);
5003 ppc_str_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
5008 if (ppc_is_imm16 (inst->inst_offset + 4)) {
5009 ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5010 ppc_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg);
5012 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5013 ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
5014 ppc_stw (code, ainfo->reg, 0, ppc_r12);
5015 ppc_stw (code, ainfo->reg + 1, 4, ppc_r12);
5020 if (ppc_is_imm16 (inst->inst_offset)) {
5021 ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5023 if (ppc_is_imm32 (inst->inst_offset)) {
5024 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5025 ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r12);
5027 ppc_load (code, ppc_r12, inst->inst_offset);
5028 ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r12);
5033 } else if (ainfo->regtype == RegTypeBase) {
5034 g_assert (ppc_is_imm16 (ainfo->offset));
5035 /* load the previous stack pointer in r12 */
5036 ppc_ldr (code, ppc_r12, 0, ppc_sp);
5037 ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r12);
5038 switch (ainfo->size) {
5040 if (ppc_is_imm16 (inst->inst_offset)) {
5041 ppc_stb (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5043 if (ppc_is_imm32 (inst->inst_offset)) {
5044 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5045 ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r12);
5047 ppc_load (code, ppc_r12, inst->inst_offset);
5048 ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r12);
5053 if (ppc_is_imm16 (inst->inst_offset)) {
5054 ppc_sth (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5056 if (ppc_is_imm32 (inst->inst_offset)) {
5057 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5058 ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r12);
5060 ppc_load (code, ppc_r12, inst->inst_offset);
5061 ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r12);
5065 #ifdef __mono_ppc64__
5067 if (ppc_is_imm16 (inst->inst_offset)) {
5068 ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5070 if (ppc_is_imm32 (inst->inst_offset)) {
5071 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5072 ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r12);
5074 ppc_load (code, ppc_r12, inst->inst_offset);
5075 ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r12);
5080 if (ppc_is_imm16 (inst->inst_offset)) {
5081 ppc_str (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5083 ppc_load (code, ppc_r12, inst->inst_offset);
5084 ppc_str_indexed (code, ppc_r0, ppc_r12, inst->inst_basereg);
5089 g_assert (ppc_is_imm16 (ainfo->offset + 4));
5090 if (ppc_is_imm16 (inst->inst_offset + 4)) {
5091 ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5092 ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r12);
5093 ppc_stw (code, ppc_r0, inst->inst_offset + 4, inst->inst_basereg);
5095 /* use r11 to load the 2nd half of the long before we clobber r12. */
5096 ppc_lwz (code, ppc_r11, ainfo->offset + 4, ppc_r12);
5097 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5098 ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
5099 ppc_stw (code, ppc_r0, 0, ppc_r12);
5100 ppc_stw (code, ppc_r11, 4, ppc_r12);
5105 if (ppc_is_imm16 (inst->inst_offset)) {
5106 ppc_stptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
5108 if (ppc_is_imm32 (inst->inst_offset)) {
5109 ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
5110 ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r12);
5112 ppc_load (code, ppc_r12, inst->inst_offset);
5113 ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r12);
5118 } else if (ainfo->regtype == RegTypeFP) {
5119 g_assert (ppc_is_imm16 (inst->inst_offset));
5120 if (ainfo->size == 8)
5121 ppc_stfd (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5122 else if (ainfo->size == 4)
5123 ppc_stfs (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
5125 g_assert_not_reached ();
5126 } else if (ainfo->regtype == RegTypeFPStructByVal) {
5127 int doffset = inst->inst_offset;
5131 g_assert (ppc_is_imm16 (inst->inst_offset));
5132 g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (gpointer)));
5133 /* FIXME: what if there is no class? */
5134 if (sig->pinvoke && mono_class_from_mono_type (inst->inst_vtype))
5135 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
5136 for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) {
5137 if (ainfo->size == 4) {
5138 ppc_stfs (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5140 ppc_stfd (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5142 soffset += ainfo->size;
5143 doffset += ainfo->size;
5145 } else if (ainfo->regtype == RegTypeStructByVal) {
5146 int doffset = inst->inst_offset;
5150 g_assert (ppc_is_imm16 (inst->inst_offset));
5151 g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (gpointer)));
5152 /* FIXME: what if there is no class? */
5153 if (sig->pinvoke && mono_class_from_mono_type (inst->inst_vtype))
5154 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
5155 for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) {
5158 * Darwin handles 1 and 2 byte
5159 * structs specially by
5160 * loading h/b into the arg
5161 * register. Only done for
5165 ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5167 ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5171 #ifdef __mono_ppc64__
5173 g_assert (cur_reg == 0);
5174 #if G_BYTE_ORDER == G_BIG_ENDIAN
5175 ppc_sldi (code, ppc_r0, ainfo->reg,
5176 (sizeof (gpointer) - ainfo->bytes) * 8);
5177 ppc_stptr (code, ppc_r0, doffset, inst->inst_basereg);
5179 if (mono_class_native_size (inst->klass, NULL) == 1) {
5180 ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5181 } else if (mono_class_native_size (inst->klass, NULL) == 2) {
5182 ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5183 } else if (mono_class_native_size (inst->klass, NULL) == 4) { // WDS -- maybe <=4?
5184 ppc_stw (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg);
5186 ppc_stptr (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); // WDS -- Better way?
5192 ppc_stptr (code, ainfo->reg + cur_reg, doffset,
5193 inst->inst_basereg);
5196 soffset += sizeof (gpointer);
5197 doffset += sizeof (gpointer);
5199 if (ainfo->vtsize) {
5200 /* FIXME: we need to do the shifting here, too */
5203 /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */
5204 ppc_ldr (code, ppc_r12, 0, ppc_sp);
5205 if ((size & MONO_PPC_32_64_CASE (3, 7)) != 0) {
5206 code = emit_memcpy (code, size - soffset,
5207 inst->inst_basereg, doffset,
5208 ppc_r12, ainfo->offset + soffset);
5210 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer),
5211 inst->inst_basereg, doffset,
5212 ppc_r12, ainfo->offset + soffset);
5215 } else if (ainfo->regtype == RegTypeStructByAddr) {
5216 /* if it was originally a RegTypeBase */
5217 if (ainfo->offset) {
5218 /* load the previous stack pointer in r12 */
5219 ppc_ldr (code, ppc_r12, 0, ppc_sp);
5220 ppc_ldptr (code, ppc_r12, ainfo->offset, ppc_r12);
5222 ppc_mr (code, ppc_r12, ainfo->reg);
5225 if (cfg->tailcall_valuetype_addrs) {
5226 MonoInst *addr = cfg->tailcall_valuetype_addrs [tailcall_struct_index];
5228 g_assert (ppc_is_imm16 (addr->inst_offset));
5229 ppc_stptr (code, ppc_r12, addr->inst_offset, addr->inst_basereg);
5231 tailcall_struct_index++;
5234 g_assert (ppc_is_imm16 (inst->inst_offset));
5235 code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r12, 0);
5236 /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
5238 g_assert_not_reached ();
5243 if (method->save_lmf) {
5244 if (cfg->compile_aot) {
5245 /* Compute the got address which is needed by the PLT entry */
5246 code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
5248 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5249 (gpointer)"mono_tls_get_lmf_addr");
5250 if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
5251 ppc_load_func (code, PPC_CALL_REG, 0);
5252 ppc_mtlr (code, PPC_CALL_REG);
5257 /* we build the MonoLMF structure on the stack - see mini-ppc.h */
5258 /* lmf_offset is the offset from the previous stack pointer,
5259 * alloc_size is the total stack space allocated, so the offset
5260 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
5261 * The pointer to the struct is put in ppc_r12 (new_lmf).
5262 * The callee-saved registers are already in the MonoLMF structure
5264 ppc_addi (code, ppc_r12, ppc_sp, alloc_size - lmf_offset);
5265 /* ppc_r3 is the result from mono_get_lmf_addr () */
5266 ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
5267 /* new_lmf->previous_lmf = *lmf_addr */
5268 ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
5269 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
5270 /* *(lmf_addr) = r12 */
5271 ppc_stptr (code, ppc_r12, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
5272 /* save method info */
5273 if (cfg->compile_aot)
5275 ppc_load (code, ppc_r0, 0);
5277 ppc_load_ptr (code, ppc_r0, method);
5278 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r12);
5279 ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r12);
5280 /* save the current IP */
5281 if (cfg->compile_aot) {
5283 ppc_mflr (code, ppc_r0);
5285 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
5286 #ifdef __mono_ppc64__
5287 ppc_load_sequence (code, ppc_r0, (guint64)0x0101010101010101LL);
5289 ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L);
5292 ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r12);
5296 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5298 cfg->code_len = code - cfg->native_code;
5299 g_assert (cfg->code_len <= cfg->code_size);
5306 mono_arch_emit_epilog (MonoCompile *cfg)
5308 MonoMethod *method = cfg->method;
5310 int max_epilog_size = 16 + 20*4;
5313 if (cfg->method->save_lmf)
5314 max_epilog_size += 128;
5316 if (mono_jit_trace_calls != NULL)
5317 max_epilog_size += 50;
5319 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5320 cfg->code_size *= 2;
5321 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5322 cfg->stat_code_reallocs++;
5326 * Keep in sync with OP_JMP
5328 code = cfg->native_code + cfg->code_len;
5330 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
5331 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5335 if (method->save_lmf) {
5337 pos += sizeof (MonoLMF);
5339 /* save the frame reg in r8 */
5340 ppc_mr (code, ppc_r8, cfg->frame_reg);
5341 ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage - lmf_offset);
5342 /* r5 = previous_lmf */
5343 ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
5345 ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
5346 /* *(lmf_addr) = previous_lmf */
5347 ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6);
5348 /* FIXME: speedup: there is no actual need to restore the registers if
5349 * we didn't actually change them (idea from Zoltan).
5352 ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r12);
5354 /*for (i = 14; i < 32; i++) {
5355 ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12);
5357 g_assert (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET));
5358 /* use the saved copy of the frame reg in r8 */
5359 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
5360 ppc_ldr (code, ppc_r0, cfg->stack_usage + PPC_RET_ADDR_OFFSET, ppc_r8);
5361 ppc_mtlr (code, ppc_r0);
5363 ppc_addic (code, ppc_sp, ppc_r8, cfg->stack_usage);
5365 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
5366 long return_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
5367 if (ppc_is_imm16 (return_offset)) {
5368 ppc_ldr (code, ppc_r0, return_offset, cfg->frame_reg);
5370 ppc_load (code, ppc_r12, return_offset);
5371 ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
5373 ppc_mtlr (code, ppc_r0);
5375 if (ppc_is_imm16 (cfg->stack_usage)) {
5376 int offset = cfg->stack_usage;
5377 for (i = 13; i <= 31; i++) {
5378 if (cfg->used_int_regs & (1 << i))
5379 offset -= sizeof (mgreg_t);
5381 if (cfg->frame_reg != ppc_sp)
5382 ppc_mr (code, ppc_r12, cfg->frame_reg);
5383 /* note r31 (possibly the frame register) is restored last */
5384 for (i = 13; i <= 31; i++) {
5385 if (cfg->used_int_regs & (1 << i)) {
5386 ppc_ldr (code, i, offset, cfg->frame_reg);
5387 offset += sizeof (mgreg_t);
5390 if (cfg->frame_reg != ppc_sp)
5391 ppc_addi (code, ppc_sp, ppc_r12, cfg->stack_usage);
5393 ppc_addi (code, ppc_sp, ppc_sp, cfg->stack_usage);
5395 ppc_load32 (code, ppc_r12, cfg->stack_usage);
5396 if (cfg->used_int_regs) {
5397 ppc_add (code, ppc_r12, cfg->frame_reg, ppc_r12);
5398 for (i = 31; i >= 13; --i) {
5399 if (cfg->used_int_regs & (1 << i)) {
5400 pos += sizeof (mgreg_t);
5401 ppc_ldr (code, i, -pos, ppc_r12);
5404 ppc_mr (code, ppc_sp, ppc_r12);
5406 ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r12);
5413 cfg->code_len = code - cfg->native_code;
5415 g_assert (cfg->code_len < cfg->code_size);
5418 #endif /* ifndef DISABLE_JIT */
5420 /* remove once throw_exception_by_name is eliminated */
5422 exception_id_by_name (const char *name)
5424 if (strcmp (name, "IndexOutOfRangeException") == 0)
5425 return MONO_EXC_INDEX_OUT_OF_RANGE;
5426 if (strcmp (name, "OverflowException") == 0)
5427 return MONO_EXC_OVERFLOW;
5428 if (strcmp (name, "ArithmeticException") == 0)
5429 return MONO_EXC_ARITHMETIC;
5430 if (strcmp (name, "DivideByZeroException") == 0)
5431 return MONO_EXC_DIVIDE_BY_ZERO;
5432 if (strcmp (name, "InvalidCastException") == 0)
5433 return MONO_EXC_INVALID_CAST;
5434 if (strcmp (name, "NullReferenceException") == 0)
5435 return MONO_EXC_NULL_REF;
5436 if (strcmp (name, "ArrayTypeMismatchException") == 0)
5437 return MONO_EXC_ARRAY_TYPE_MISMATCH;
5438 if (strcmp (name, "ArgumentException") == 0)
5439 return MONO_EXC_ARGUMENT;
5440 g_error ("Unknown intrinsic exception %s\n", name);
5446 mono_arch_emit_exceptions (MonoCompile *cfg)
5448 MonoJumpInfo *patch_info;
5451 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
5452 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
5453 int max_epilog_size = 50;
5455 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
5456 exc_throw_pos [i] = NULL;
5457 exc_throw_found [i] = 0;
5460 /* count the number of exception infos */
5463 * make sure we have enough space for exceptions
5465 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5466 if (patch_info->type == MONO_PATCH_INFO_EXC) {
5467 i = exception_id_by_name (patch_info->data.target);
5468 if (!exc_throw_found [i]) {
5469 max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4;
5470 exc_throw_found [i] = TRUE;
5472 } else if (patch_info->type == MONO_PATCH_INFO_BB_OVF)
5473 max_epilog_size += 12;
5474 else if (patch_info->type == MONO_PATCH_INFO_EXC_OVF) {
5475 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5476 i = exception_id_by_name (ovfj->data.exception);
5477 if (!exc_throw_found [i]) {
5478 max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4;
5479 exc_throw_found [i] = TRUE;
5481 max_epilog_size += 8;
5485 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5486 cfg->code_size *= 2;
5487 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5488 cfg->stat_code_reallocs++;
5491 code = cfg->native_code + cfg->code_len;
5493 /* add code to raise exceptions */
5494 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5495 switch (patch_info->type) {
5496 case MONO_PATCH_INFO_BB_OVF: {
5497 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5498 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5499 /* patch the initial jump */
5500 ppc_patch (ip, code);
5501 ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 2);
5503 ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */
5504 /* jump back to the true target */
5506 ip = ovfj->data.bb->native_offset + cfg->native_code;
5507 ppc_patch (code - 4, ip);
5508 patch_info->type = MONO_PATCH_INFO_NONE;
5511 case MONO_PATCH_INFO_EXC_OVF: {
5512 MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target;
5513 MonoJumpInfo *newji;
5514 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5515 unsigned char *bcl = code;
5516 /* patch the initial jump: we arrived here with a call */
5517 ppc_patch (ip, code);
5518 ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 0);
5520 ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */
5521 /* patch the conditional jump to the right handler */
5522 /* make it processed next */
5523 newji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
5524 newji->type = MONO_PATCH_INFO_EXC;
5525 newji->ip.i = bcl - cfg->native_code;
5526 newji->data.target = ovfj->data.exception;
5527 newji->next = patch_info->next;
5528 patch_info->next = newji;
5529 patch_info->type = MONO_PATCH_INFO_NONE;
5532 case MONO_PATCH_INFO_EXC: {
5533 MonoClass *exc_class;
5535 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5536 i = exception_id_by_name (patch_info->data.target);
5537 if (exc_throw_pos [i] && !(ip > exc_throw_pos [i] && ip - exc_throw_pos [i] > 50000)) {
5538 ppc_patch (ip, exc_throw_pos [i]);
5539 patch_info->type = MONO_PATCH_INFO_NONE;
5542 exc_throw_pos [i] = code;
5545 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5547 ppc_patch (ip, code);
5548 /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
5549 ppc_load (code, ppc_r3, exc_class->type_token);
5550 /* we got here from a conditional call, so the calling ip is set in lr */
5551 ppc_mflr (code, ppc_r4);
5552 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5553 patch_info->data.name = "mono_arch_throw_corlib_exception";
5554 patch_info->ip.i = code - cfg->native_code;
5555 if (FORCE_INDIR_CALL || cfg->method->dynamic) {
5556 ppc_load_func (code, PPC_CALL_REG, 0);
5557 ppc_mtctr (code, PPC_CALL_REG);
5558 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5570 cfg->code_len = code - cfg->native_code;
5572 g_assert (cfg->code_len <= cfg->code_size);
5578 try_offset_access (void *value, guint32 idx)
5580 register void* me __asm__ ("r2");
5581 void ***p = (void***)((char*)me + 284);
5582 int idx1 = idx / 32;
5583 int idx2 = idx % 32;
5586 if (value != p[idx1][idx2])
5593 mono_arch_finish_init (void)
5598 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5602 #define CMP_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
5604 #define LOADSTORE_SIZE 4
5605 #define JUMP_IMM_SIZE 12
5606 #define JUMP_IMM32_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 8)
5607 #define ENABLE_WRONG_METHOD_CHECK 0
5610 * LOCKING: called with the domain lock held
5613 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5614 gpointer fail_tramp)
5618 guint8 *code, *start;
5620 for (i = 0; i < count; ++i) {
5621 MonoIMTCheckItem *item = imt_entries [i];
5622 if (item->is_equals) {
5623 if (item->check_target_idx) {
5624 if (!item->compare_done)
5625 item->chunk_size += CMP_SIZE;
5626 if (item->has_target_code)
5627 item->chunk_size += BR_SIZE + JUMP_IMM32_SIZE;
5629 item->chunk_size += LOADSTORE_SIZE + BR_SIZE + JUMP_IMM_SIZE;
5632 item->chunk_size += CMP_SIZE + BR_SIZE + JUMP_IMM32_SIZE * 2;
5633 if (!item->has_target_code)
5634 item->chunk_size += LOADSTORE_SIZE;
5636 item->chunk_size += LOADSTORE_SIZE + JUMP_IMM_SIZE;
5637 #if ENABLE_WRONG_METHOD_CHECK
5638 item->chunk_size += CMP_SIZE + BR_SIZE + 4;
5643 item->chunk_size += CMP_SIZE + BR_SIZE;
5644 imt_entries [item->check_target_idx]->compare_done = TRUE;
5646 size += item->chunk_size;
5648 /* the initial load of the vtable address */
5649 size += PPC_LOAD_SEQUENCE_LENGTH + LOADSTORE_SIZE;
5651 code = mono_method_alloc_generic_virtual_trampoline (domain, size);
5653 code = mono_domain_code_reserve (domain, size);
5658 * We need to save and restore r12 because it might be
5659 * used by the caller as the vtable register, so
5660 * clobbering it will trip up the magic trampoline.
5662 * FIXME: Get rid of this by making sure that r12 is
5663 * not used as the vtable register in interface calls.
5665 ppc_stptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
5666 ppc_load (code, ppc_r12, (gsize)(& (vtable->vtable [0])));
5668 for (i = 0; i < count; ++i) {
5669 MonoIMTCheckItem *item = imt_entries [i];
5670 item->code_target = code;
5671 if (item->is_equals) {
5672 if (item->check_target_idx) {
5673 if (!item->compare_done) {
5674 ppc_load (code, ppc_r0, (gsize)item->key);
5675 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5677 item->jmp_code = code;
5678 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5679 if (item->has_target_code) {
5680 ppc_load_ptr (code, ppc_r0, item->value.target_code);
5682 ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
5683 ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
5685 ppc_mtctr (code, ppc_r0);
5686 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5689 ppc_load (code, ppc_r0, (gulong)item->key);
5690 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5691 item->jmp_code = code;
5692 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5693 if (item->has_target_code) {
5694 ppc_load_ptr (code, ppc_r0, item->value.target_code);
5697 ppc_load_ptr (code, ppc_r0, & (vtable->vtable [item->value.vtable_slot]));
5698 ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0);
5700 ppc_mtctr (code, ppc_r0);
5701 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5702 ppc_patch (item->jmp_code, code);
5703 ppc_load_ptr (code, ppc_r0, fail_tramp);
5704 ppc_mtctr (code, ppc_r0);
5705 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5706 item->jmp_code = NULL;
5708 /* enable the commented code to assert on wrong method */
5709 #if ENABLE_WRONG_METHOD_CHECK
5710 ppc_load (code, ppc_r0, (guint32)item->key);
5711 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5712 item->jmp_code = code;
5713 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
5715 ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
5716 ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
5717 ppc_mtctr (code, ppc_r0);
5718 ppc_bcctr (code, PPC_BR_ALWAYS, 0);
5719 #if ENABLE_WRONG_METHOD_CHECK
5720 ppc_patch (item->jmp_code, code);
5722 item->jmp_code = NULL;
5727 ppc_load (code, ppc_r0, (gulong)item->key);
5728 ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
5729 item->jmp_code = code;
5730 ppc_bc (code, PPC_BR_FALSE, PPC_BR_LT, 0);
5733 /* patch the branches to get to the target items */
5734 for (i = 0; i < count; ++i) {
5735 MonoIMTCheckItem *item = imt_entries [i];
5736 if (item->jmp_code) {
5737 if (item->check_target_idx) {
5738 ppc_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5744 UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
5745 g_assert (code - start <= size);
5746 mono_arch_flush_icache (start, size);
5748 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);
5754 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5756 mgreg_t *r = (mgreg_t*)regs;
5758 return (MonoMethod*)(gsize) r [MONO_ARCH_IMT_REG];
5762 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5764 mgreg_t *r = (mgreg_t*)regs;
5766 return (MonoVTable*)(gsize) r [MONO_ARCH_RGCTX_REG];
5770 mono_arch_get_cie_program (void)
5774 mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ppc_r1, 0);
5780 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5787 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5790 return (mgreg_t)MONO_CONTEXT_GET_SP (ctx);
5792 return ctx->regs [reg];
5796 mono_arch_get_patch_offset (guint8 *code)
5802 * mono_aot_emit_load_got_addr:
5804 * Emit code to load the got address.
5805 * On PPC, the result is placed into r30.
5808 mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji)
5811 ppc_mflr (code, ppc_r30);
5813 mono_add_patch_info (cfg, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
5815 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
5816 /* arch_emit_got_address () patches this */
5817 #if defined(TARGET_POWERPC64)
5823 ppc_load32 (code, ppc_r0, 0);
5824 ppc_add (code, ppc_r30, ppc_r30, ppc_r0);
5831 * mono_ppc_emit_load_aotconst:
5833 * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
5834 * TARGET from the mscorlib GOT in full-aot code.
5835 * On PPC, the GOT address is assumed to be in r30, and the result is placed into
5839 mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target)
5841 /* Load the mscorlib got address */
5842 ppc_ldptr (code, ppc_r12, sizeof (gpointer), ppc_r30);
5843 *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
5844 /* arch_emit_got_access () patches this */
5845 ppc_load32 (code, ppc_r0, 0);
5846 ppc_ldptr_indexed (code, ppc_r12, ppc_r12, ppc_r0);
5851 /* Soft Debug support */
5852 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5859 * mono_arch_set_breakpoint:
5861 * See mini-amd64.c for docs.
5864 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5867 guint8 *orig_code = code;
5869 ppc_load_sequence (code, ppc_r12, (gsize)bp_trigger_page);
5870 ppc_ldptr (code, ppc_r12, 0, ppc_r12);
5872 g_assert (code - orig_code == BREAKPOINT_SIZE);
5874 mono_arch_flush_icache (orig_code, code - orig_code);
5878 * mono_arch_clear_breakpoint:
5880 * See mini-amd64.c for docs.
5883 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5888 for (i = 0; i < BREAKPOINT_SIZE / 4; ++i)
5891 mono_arch_flush_icache (ip, code - ip);
5895 * mono_arch_is_breakpoint_event:
5897 * See mini-amd64.c for docs.
5900 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5902 siginfo_t* sinfo = (siginfo_t*) info;
5903 /* Sometimes the address is off by 4 */
5904 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5911 * mono_arch_skip_breakpoint:
5913 * See mini-amd64.c for docs.
5916 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
5918 /* skip the ldptr */
5919 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5927 * mono_arch_start_single_stepping:
5929 * See mini-amd64.c for docs.
5932 mono_arch_start_single_stepping (void)
5934 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5938 * mono_arch_stop_single_stepping:
5940 * See mini-amd64.c for docs.
5943 mono_arch_stop_single_stepping (void)
5945 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5949 * mono_arch_is_single_step_event:
5951 * See mini-amd64.c for docs.
5954 mono_arch_is_single_step_event (void *info, void *sigctx)
5956 siginfo_t* sinfo = (siginfo_t*) info;
5957 /* Sometimes the address is off by 4 */
5958 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5965 * mono_arch_skip_single_step:
5967 * See mini-amd64.c for docs.
5970 mono_arch_skip_single_step (MonoContext *ctx)
5972 /* skip the ldptr */
5973 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5977 * mono_arch_create_seq_point_info:
5979 * See mini-amd64.c for docs.
5982 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5989 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
5991 ext->lmf.previous_lmf = prev_lmf;
5992 /* Mark that this is a MonoLMFExt */
5993 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
5994 ext->lmf.ebp = (gssize)ext;
6000 mono_arch_opcode_supported (int opcode)
6003 case OP_ATOMIC_ADD_I4:
6004 case OP_ATOMIC_CAS_I4:
6005 #ifdef TARGET_POWERPC64
6006 case OP_ATOMIC_ADD_I8:
6007 case OP_ATOMIC_CAS_I8: