2 * mini-ia64.c: IA64 backend for the Mono code generator
5 * Zoltan Varga (vargaz@gmail.com)
7 * (C) 2003 Ximian, Inc.
15 #ifdef __INTEL_COMPILER
16 #include <ia64intrin.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/utils/mono-math.h>
24 #include <mono/utils/mono-hwcap-ia64.h>
27 #include "mini-ia64.h"
29 #include "jit-icalls.h"
32 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
34 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
37 * IA64 register usage:
38 * - local registers are used for global register allocation
39 * - r8..r11, r14..r30 is used for local register allocation
40 * - r31 is a scratch register used within opcode implementations
41 * - FIXME: Use out registers as well
42 * - the first three locals are used for saving ar.pfst, b0, and sp
43 * - compare instructions allways set p6 and p7
47 * There are a lot of places where generated code is disassembled/patched.
48 * The automatic bundling of instructions done by the code generation macros
49 * could complicate things, so it is best to call
50 * ia64_codegen_set_one_ins_per_bundle () at those places.
53 #define ARGS_OFFSET 16
55 #define GP_SCRATCH_REG 31
56 #define GP_SCRATCH_REG2 30
57 #define FP_SCRATCH_REG 32
58 #define FP_SCRATCH_REG2 33
60 #define LOOP_ALIGNMENT 8
61 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
63 static const char* gregs [] = {
64 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
65 "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
66 "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29",
67 "r30", "r31", "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", "r48", "r49",
69 "r50", "r51", "r52", "r53", "r54", "r55", "r56", "r57", "r58", "r59",
70 "r60", "r61", "r62", "r63", "r64", "r65", "r66", "r67", "r68", "r69",
71 "r70", "r71", "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
72 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", "r88", "r89",
73 "r90", "r91", "r92", "r93", "r94", "r95", "r96", "r97", "r98", "r99",
74 "r100", "r101", "r102", "r103", "r104", "r105", "r106", "r107", "r108", "r109",
75 "r110", "r111", "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
76 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127"
80 mono_arch_regname (int reg)
88 static const char* fregs [] = {
89 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9",
90 "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19",
91 "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29",
92 "f30", "f31", "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
93 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47", "f48", "f49",
94 "f50", "f51", "f52", "f53", "f54", "f55", "f56", "f57", "f58", "f59",
95 "f60", "f61", "f62", "f63", "f64", "f65", "f66", "f67", "f68", "f69",
96 "f70", "f71", "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
97 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87", "f88", "f89",
98 "f90", "f91", "f92", "f93", "f94", "f95", "f96", "f97", "f98", "f99",
99 "f100", "f101", "f102", "f103", "f104", "f105", "f106", "f107", "f108", "f109",
100 "f110", "f111", "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
101 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127"
105 mono_arch_fregname (int reg)
114 debug_ins_sched (void)
117 return mono_debug_count ();
127 return mono_debug_count ();
134 ia64_patch (unsigned char* code, gpointer target);
141 ArgValuetypeAddrInIReg,
159 /* Only if storage == ArgAggregate */
169 gboolean need_stack_align;
170 gboolean vtype_retaddr;
171 /* The index of the vret arg in the argument list */
178 #define DEBUG(a) if (cfg->verbose_level > 1) a
183 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
185 ainfo->offset = *stack_size;
187 if (*gr >= PARAM_REGS) {
188 ainfo->storage = ArgOnStack;
189 (*stack_size) += sizeof (gpointer);
192 ainfo->storage = ArgInIReg;
198 #define FLOAT_PARAM_REGS 8
201 add_float (guint32 *gr, guint32 *fr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
203 ainfo->offset = *stack_size;
205 if (*gr >= PARAM_REGS) {
206 ainfo->storage = ArgOnStack;
207 (*stack_size) += sizeof (gpointer);
210 ainfo->storage = is_double ? ArgInFloatReg : ArgInFloatRegR4;
211 ainfo->reg = 8 + *fr;
218 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
220 guint32 *gr, guint32 *fr, guint32 *stack_size)
224 MonoMarshalType *info;
225 gboolean is_hfa = TRUE;
226 guint32 hfa_type = 0;
228 klass = mono_class_from_mono_type (type);
229 if (type->type == MONO_TYPE_TYPEDBYREF)
230 size = 3 * sizeof (gpointer);
231 else if (sig->pinvoke)
232 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
234 size = mini_type_stack_size (gsctx, &klass->byval_arg, NULL);
236 if (!sig->pinvoke || (size == 0)) {
237 /* Allways pass in memory */
238 ainfo->offset = *stack_size;
239 *stack_size += ALIGN_TO (size, 8);
240 ainfo->storage = ArgOnStack;
245 /* Determine whenever it is a HFA (Homogeneous Floating Point Aggregate) */
246 info = mono_marshal_load_type_info (klass);
248 for (i = 0; i < info->num_fields; ++i) {
249 guint32 ftype = info->fields [i].field->type->type;
250 if (!(info->fields [i].field->type->byref) &&
251 ((ftype == MONO_TYPE_R4) || (ftype == MONO_TYPE_R8))) {
254 else if (hfa_type != ftype)
263 ainfo->storage = ArgAggregate;
264 ainfo->atype = AggregateNormal;
267 ainfo->atype = hfa_type == MONO_TYPE_R4 ? AggregateSingleHFA : AggregateDoubleHFA;
269 if (info->num_fields <= 8) {
271 ainfo->nregs = info->num_fields;
272 ainfo->nslots = ainfo->nregs;
278 if ((*fr) + info->num_fields > 8)
281 ainfo->reg = 8 + (*fr);
282 ainfo->nregs = info->num_fields;
283 ainfo->nslots = ainfo->nregs;
284 (*fr) += info->num_fields;
285 if (ainfo->atype == AggregateSingleHFA) {
287 * FIXME: Have to keep track of the parameter slot number, which is
288 * not the same as *gr.
290 (*gr) += ALIGN_TO (info->num_fields, 2) / 2;
292 (*gr) += info->num_fields;
298 /* This also handles returning of TypedByRef used by some icalls */
301 ainfo->reg = IA64_R8;
302 ainfo->nregs = (size + 7) / 8;
303 ainfo->nslots = ainfo->nregs;
310 ainfo->offset = *stack_size;
311 ainfo->nslots = (size + 7) / 8;
313 if (((*gr) + ainfo->nslots) <= 8) {
314 /* Fits entirely in registers */
315 ainfo->nregs = ainfo->nslots;
316 (*gr) += ainfo->nregs;
320 ainfo->nregs = 8 - (*gr);
322 (*stack_size) += (ainfo->nslots - ainfo->nregs) * 8;
328 * Obtain information about a call according to the calling convention.
329 * For IA64, see the "Itanium Software Conventions and Runtime Architecture
330 * Gude" document for more information.
333 get_call_info (MonoCompile *cfg, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
335 guint32 i, gr, fr, pstart;
337 int n = sig->hasthis + sig->param_count;
338 guint32 stack_size = 0;
340 MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
343 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
345 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
352 ret_type = mono_type_get_underlying_type (sig->ret);
353 ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
354 switch (ret_type->type) {
355 case MONO_TYPE_BOOLEAN:
366 case MONO_TYPE_FNPTR:
367 case MONO_TYPE_CLASS:
368 case MONO_TYPE_OBJECT:
369 case MONO_TYPE_SZARRAY:
370 case MONO_TYPE_ARRAY:
371 case MONO_TYPE_STRING:
372 cinfo->ret.storage = ArgInIReg;
373 cinfo->ret.reg = IA64_R8;
377 cinfo->ret.storage = ArgInIReg;
378 cinfo->ret.reg = IA64_R8;
382 cinfo->ret.storage = ArgInFloatReg;
385 case MONO_TYPE_GENERICINST:
386 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
387 cinfo->ret.storage = ArgInIReg;
388 cinfo->ret.reg = IA64_R8;
392 case MONO_TYPE_VALUETYPE:
393 case MONO_TYPE_TYPEDBYREF: {
394 guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
396 if (sig->ret->byref) {
397 /* This seems to happen with ldfld wrappers */
398 cinfo->ret.storage = ArgInIReg;
400 add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
401 if (cinfo->ret.storage == ArgOnStack) {
402 /* The caller passes the address where the value is stored */
403 cinfo->vtype_retaddr = TRUE;
409 cinfo->ret.storage = ArgNone;
412 g_error ("Can't handle as return value 0x%x", sig->ret->type);
418 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
419 * the first argument, allowing 'this' to be always passed in the first arg reg.
420 * Also do this if the first argument is a reference type, since virtual calls
421 * are sometimes made using calli without sig->hasthis set, like in the delegate
424 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
426 add_general (&gr, &stack_size, cinfo->args + 0);
428 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
431 add_general (&gr, &stack_size, &cinfo->ret);
432 if (cinfo->ret.storage == ArgInIReg)
433 cinfo->ret.storage = ArgValuetypeAddrInIReg;
434 cinfo->vret_arg_index = 1;
438 add_general (&gr, &stack_size, cinfo->args + 0);
440 if (cinfo->vtype_retaddr) {
441 add_general (&gr, &stack_size, &cinfo->ret);
442 if (cinfo->ret.storage == ArgInIReg)
443 cinfo->ret.storage = ArgValuetypeAddrInIReg;
447 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
449 fr = FLOAT_PARAM_REGS;
451 /* Emit the signature cookie just before the implicit arguments */
452 add_general (&gr, &stack_size, &cinfo->sig_cookie);
455 for (i = pstart; i < sig->param_count; ++i) {
456 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
459 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
460 /* We allways pass the sig cookie on the stack for simplicity */
462 * Prevent implicit arguments + the sig cookie from being passed
466 fr = FLOAT_PARAM_REGS;
468 /* Emit the signature cookie just before the implicit arguments */
469 add_general (&gr, &stack_size, &cinfo->sig_cookie);
472 if (sig->params [i]->byref) {
473 add_general (&gr, &stack_size, ainfo);
476 ptype = mono_type_get_underlying_type (sig->params [i]);
477 ptype = mini_get_basic_type_from_generic (gsctx, ptype);
478 switch (ptype->type) {
479 case MONO_TYPE_BOOLEAN:
482 add_general (&gr, &stack_size, ainfo);
487 add_general (&gr, &stack_size, ainfo);
491 add_general (&gr, &stack_size, ainfo);
496 case MONO_TYPE_FNPTR:
497 case MONO_TYPE_CLASS:
498 case MONO_TYPE_OBJECT:
499 case MONO_TYPE_STRING:
500 case MONO_TYPE_SZARRAY:
501 case MONO_TYPE_ARRAY:
502 add_general (&gr, &stack_size, ainfo);
504 case MONO_TYPE_GENERICINST:
505 if (!mono_type_generic_inst_is_valuetype (ptype)) {
506 add_general (&gr, &stack_size, ainfo);
510 case MONO_TYPE_VALUETYPE:
511 case MONO_TYPE_TYPEDBYREF:
513 /* We allways pass valuetypes on the stack */
514 add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
518 add_general (&gr, &stack_size, ainfo);
521 add_float (&gr, &fr, &stack_size, ainfo, FALSE);
524 add_float (&gr, &fr, &stack_size, ainfo, TRUE);
527 g_assert_not_reached ();
531 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
533 fr = FLOAT_PARAM_REGS;
535 /* Emit the signature cookie just before the implicit arguments */
536 add_general (&gr, &stack_size, &cinfo->sig_cookie);
539 cinfo->stack_usage = stack_size;
540 cinfo->reg_usage = gr;
541 cinfo->freg_usage = fr;
546 * mono_arch_get_argument_info:
547 * @csig: a method signature
548 * @param_count: the number of parameters to consider
549 * @arg_info: an array to store the result infos
551 * Gathers information on parameters such as size, alignment and
552 * padding. arg_info should be large enought to hold param_count + 1 entries.
554 * Returns the size of the argument area on the stack.
557 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
560 CallInfo *cinfo = get_call_info (NULL, NULL, csig, FALSE);
561 guint32 args_size = cinfo->stack_usage;
563 /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
565 arg_info [0].offset = 0;
568 for (k = 0; k < param_count; k++) {
569 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
571 arg_info [k + 1].size = 0;
580 * Initialize the cpu to execute managed code.
583 mono_arch_cpu_init (void)
588 * Initialize architecture specific code.
591 mono_arch_init (void)
596 * Cleanup architecture specific code.
599 mono_arch_cleanup (void)
604 * This function returns the optimizations supported on this cpu.
607 mono_arch_cpu_optimizations (guint32 *exclude_mask)
615 * This function test for all SIMD functions supported.
617 * Returns a bitmask corresponding to all supported versions.
621 mono_arch_cpu_enumerate_simd_versions (void)
623 /* SIMD is currently unimplemented */
628 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
632 MonoMethodSignature *sig;
633 MonoMethodHeader *header;
636 header = cfg->header;
638 sig = mono_method_signature (cfg->method);
640 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
642 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
643 MonoInst *ins = cfg->args [i];
645 ArgInfo *ainfo = &cinfo->args [i];
647 if (ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT))
650 if (ainfo->storage == ArgInIReg) {
651 /* The input registers are non-volatile */
652 ins->opcode = OP_REGVAR;
653 ins->dreg = 32 + ainfo->reg;
657 for (i = 0; i < cfg->num_varinfo; i++) {
658 MonoInst *ins = cfg->varinfo [i];
659 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
662 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
665 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
666 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
669 if (mono_is_regsize_var (ins->inst_vtype)) {
670 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
671 g_assert (i == vmv->idx);
672 vars = g_list_prepend (vars, vmv);
676 vars = mono_varlist_sort (cfg, vars, 0);
682 mono_ia64_alloc_stacked_registers (MonoCompile *cfg)
685 guint32 reserved_regs;
686 MonoMethodHeader *header;
688 if (cfg->arch.reg_local0 > 0)
692 cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (cfg->method), FALSE);
694 header = cfg->header;
696 /* Some registers are reserved for use by the prolog/epilog */
697 reserved_regs = header->num_clauses ? 4 : 3;
699 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
700 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
701 /* One registers is needed by instrument_epilog to save the return value */
703 if (cinfo->reg_usage < 2)
704 /* Number of arguments passed to function call in instrument_prolog */
705 cinfo->reg_usage = 2;
708 cfg->arch.reg_in0 = 32;
709 cfg->arch.reg_local0 = cfg->arch.reg_in0 + cinfo->reg_usage + reserved_regs;
710 cfg->arch.reg_out0 = cfg->arch.reg_local0 + 16;
712 cfg->arch.reg_saved_ar_pfs = cfg->arch.reg_local0 - 1;
713 cfg->arch.reg_saved_b0 = cfg->arch.reg_local0 - 2;
714 cfg->arch.reg_fp = cfg->arch.reg_local0 - 3;
717 * Frames without handlers save sp to fp, frames with handlers save it into
718 * a dedicated register.
720 if (header->num_clauses)
721 cfg->arch.reg_saved_sp = cfg->arch.reg_local0 - 4;
723 cfg->arch.reg_saved_sp = cfg->arch.reg_fp;
725 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
726 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
727 cfg->arch.reg_saved_return_val = cfg->arch.reg_local0 - reserved_regs;
731 * Need to allocate at least 2 out register for use by OP_THROW / the system
732 * exception throwing code.
734 cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, 2);
738 mono_arch_get_global_int_regs (MonoCompile *cfg)
743 mono_ia64_alloc_stacked_registers (cfg);
745 for (i = cfg->arch.reg_local0; i < cfg->arch.reg_out0; ++i) {
748 regs = g_list_prepend (regs, (gpointer)(gssize)(i));
755 * mono_arch_regalloc_cost:
757 * Return the cost, in number of memory references, of the action of
758 * allocating the variable VMV into a register during global register
762 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
764 /* FIXME: Increase costs linearly to avoid using all local registers */
770 mono_arch_allocate_vars (MonoCompile *cfg)
772 MonoMethodSignature *sig;
773 MonoMethodHeader *header;
776 guint32 locals_stack_size, locals_stack_align;
780 header = cfg->header;
782 sig = mono_method_signature (cfg->method);
784 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
787 * Determine whenever the frame pointer can be eliminated.
788 * FIXME: Remove some of the restrictions.
790 cfg->arch.omit_fp = TRUE;
792 if (!debug_omit_fp ())
793 cfg->arch.omit_fp = FALSE;
795 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
796 cfg->arch.omit_fp = FALSE;
797 if (header->num_clauses)
798 cfg->arch.omit_fp = FALSE;
800 cfg->arch.omit_fp = FALSE;
801 if ((sig->ret->type != MONO_TYPE_VOID) && (cinfo->ret.storage == ArgAggregate))
802 cfg->arch.omit_fp = FALSE;
803 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
804 cfg->arch.omit_fp = FALSE;
805 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
806 ArgInfo *ainfo = &cinfo->args [i];
808 if (ainfo->storage == ArgOnStack) {
810 * The stack offset can only be determined when the frame
813 cfg->arch.omit_fp = FALSE;
817 mono_ia64_alloc_stacked_registers (cfg);
820 * We use the ABI calling conventions for managed code as well.
821 * Exception: valuetypes are never passed or returned in registers.
824 if (cfg->arch.omit_fp) {
825 cfg->flags |= MONO_CFG_HAS_SPILLUP;
826 cfg->frame_reg = IA64_SP;
827 offset = ARGS_OFFSET;
830 /* Locals are allocated backwards from %fp */
831 cfg->frame_reg = cfg->arch.reg_fp;
835 if (cfg->method->save_lmf) {
839 if (sig->ret->type != MONO_TYPE_VOID) {
840 switch (cinfo->ret.storage) {
842 cfg->ret->opcode = OP_REGVAR;
843 cfg->ret->inst_c0 = cinfo->ret.reg;
846 cfg->ret->opcode = OP_REGVAR;
847 cfg->ret->inst_c0 = cinfo->ret.reg;
849 case ArgValuetypeAddrInIReg:
850 cfg->vret_addr->opcode = OP_REGVAR;
851 cfg->vret_addr->dreg = cfg->arch.reg_in0 + cinfo->ret.reg;
854 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
855 if (cfg->arch.omit_fp)
856 g_assert_not_reached ();
857 offset = ALIGN_TO (offset, 8);
858 offset += cinfo->ret.nslots * 8;
859 cfg->ret->opcode = OP_REGOFFSET;
860 cfg->ret->inst_basereg = cfg->frame_reg;
861 cfg->ret->inst_offset = - offset;
864 g_assert_not_reached ();
866 cfg->ret->dreg = cfg->ret->inst_c0;
869 /* Allocate locals */
870 offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE : TRUE, &locals_stack_size, &locals_stack_align);
871 if (locals_stack_align) {
872 offset = ALIGN_TO (offset, locals_stack_align);
874 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
875 if (offsets [i] != -1) {
876 MonoInst *inst = cfg->varinfo [i];
877 inst->opcode = OP_REGOFFSET;
878 inst->inst_basereg = cfg->frame_reg;
879 if (cfg->arch.omit_fp)
880 inst->inst_offset = (offset + offsets [i]);
882 inst->inst_offset = - (offset + offsets [i]);
883 // printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
886 offset += locals_stack_size;
888 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
889 if (cfg->arch.omit_fp)
890 g_assert_not_reached ();
891 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
892 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
895 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
896 inst = cfg->args [i];
897 if (inst->opcode != OP_REGVAR) {
898 ArgInfo *ainfo = &cinfo->args [i];
899 gboolean inreg = TRUE;
902 if (sig->hasthis && (i == 0))
903 arg_type = &mono_defaults.object_class->byval_arg;
905 arg_type = sig->params [i - sig->hasthis];
907 /* FIXME: VOLATILE is only set if the liveness pass runs */
908 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
911 inst->opcode = OP_REGOFFSET;
913 switch (ainfo->storage) {
915 inst->opcode = OP_REGVAR;
916 inst->dreg = cfg->arch.reg_in0 + ainfo->reg;
919 case ArgInFloatRegR4:
921 * Since float regs are volatile, we save the arguments to
922 * the stack in the prolog.
927 if (cfg->arch.omit_fp)
928 g_assert_not_reached ();
929 inst->opcode = OP_REGOFFSET;
930 inst->inst_basereg = cfg->frame_reg;
931 inst->inst_offset = ARGS_OFFSET + ainfo->offset;
940 if (!inreg && (ainfo->storage != ArgOnStack)) {
943 inst->opcode = OP_REGOFFSET;
944 inst->inst_basereg = cfg->frame_reg;
945 /* These arguments are saved to the stack in the prolog */
946 switch (ainfo->storage) {
948 if (ainfo->atype == AggregateSingleHFA)
949 size = ainfo->nslots * 4;
951 size = ainfo->nslots * 8;
954 size = sizeof (gpointer);
958 offset = ALIGN_TO (offset, sizeof (gpointer));
960 if (cfg->arch.omit_fp) {
961 inst->inst_offset = offset;
965 inst->inst_offset = - offset;
972 * FIXME: This doesn't work because some variables are allocated during local
976 if (cfg->arch.omit_fp && offset == 16)
980 cfg->stack_offset = offset;
984 mono_arch_create_vars (MonoCompile *cfg)
986 MonoMethodSignature *sig;
989 sig = mono_method_signature (cfg->method);
991 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
993 if (cinfo->ret.storage == ArgAggregate)
994 cfg->ret_var_is_local = TRUE;
995 if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
996 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
997 if (G_UNLIKELY (cfg->verbose_level > 1)) {
998 printf ("vret_addr = ");
999 mono_print_ins (cfg->vret_addr);
1005 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
1009 MONO_INST_NEW (cfg, arg, OP_NOP);
1010 arg->sreg1 = tree->dreg;
1014 arg->opcode = OP_MOVE;
1015 arg->dreg = mono_alloc_ireg (cfg);
1017 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1020 arg->opcode = OP_FMOVE;
1021 arg->dreg = mono_alloc_freg (cfg);
1023 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1025 case ArgInFloatRegR4:
1026 arg->opcode = OP_FCONV_TO_R4;
1027 arg->dreg = mono_alloc_freg (cfg);
1029 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1032 g_assert_not_reached ();
1035 MONO_ADD_INS (cfg->cbb, arg);
1039 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1041 MonoMethodSignature *tmp_sig;
1043 /* Emit the signature cookie just before the implicit arguments */
1045 /* FIXME: Add support for signature tokens to AOT */
1046 cfg->disable_aot = TRUE;
1048 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1051 * mono_ArgIterator_Setup assumes the signature cookie is
1052 * passed first and all the arguments which were before it are
1053 * passed on the stack after the signature. So compensate by
1054 * passing a different signature.
1056 tmp_sig = mono_metadata_signature_dup (call->signature);
1057 tmp_sig->param_count -= call->signature->sentinelpos;
1058 tmp_sig->sentinelpos = 0;
1059 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1061 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1062 sig_arg->dreg = mono_alloc_ireg (cfg);
1063 sig_arg->inst_p0 = tmp_sig;
1064 MONO_ADD_INS (cfg->cbb, sig_arg);
1066 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + cinfo->sig_cookie.offset, sig_arg->dreg);
1070 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1073 MonoMethodSignature *sig;
1074 int i, n, stack_size;
1080 mono_ia64_alloc_stacked_registers (cfg);
1082 sig = call->signature;
1083 n = sig->param_count + sig->hasthis;
1085 cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
1087 if (cinfo->ret.storage == ArgAggregate) {
1092 * The valuetype is in registers after the call, need to be copied
1093 * to the stack. Save the address to a local here, so the call
1094 * instruction can access it.
1096 local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1097 local->flags |= MONO_INST_VOLATILE;
1098 cfg->arch.ret_var_addr_local = local;
1100 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1101 vtarg->sreg1 = call->vret_var->dreg;
1102 vtarg->dreg = local->dreg;
1103 MONO_ADD_INS (cfg->cbb, vtarg);
1106 if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
1107 add_outarg_reg (cfg, call, ArgInIReg, cfg->arch.reg_out0 + cinfo->ret.reg, call->vret_var);
1110 for (i = 0; i < n; ++i) {
1113 ainfo = cinfo->args + i;
1115 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1116 /* Emit the signature cookie just before the implicit arguments */
1117 emit_sig_cookie (cfg, call, cinfo);
1120 in = call->args [i];
1122 if (sig->hasthis && (i == 0))
1123 arg_type = &mono_defaults.object_class->byval_arg;
1125 arg_type = sig->params [i - sig->hasthis];
1127 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
1131 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1132 size = sizeof (MonoTypedRef);
1133 align = sizeof (gpointer);
1135 else if (sig->pinvoke)
1136 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1139 * Other backends use mono_type_stack_size (), but that
1140 * aligns the size to 8, which is larger than the size of
1141 * the source, leading to reads of invalid memory if the
1142 * source is at the end of address space.
1144 size = mono_class_value_size (in->klass, &align);
1150 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1151 arg->sreg1 = in->dreg;
1152 arg->klass = in->klass;
1153 arg->backend.size = size;
1154 arg->inst_p0 = call;
1155 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1156 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1158 MONO_ADD_INS (cfg->cbb, arg);
1162 switch (ainfo->storage) {
1164 add_outarg_reg (cfg, call, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
1167 case ArgInFloatRegR4:
1168 add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
1171 if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref)
1172 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1173 else if (arg_type->type == MONO_TYPE_R8 && !arg_type->byref)
1174 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1176 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1179 g_assert_not_reached ();
1184 /* Handle the case where there are no implicit arguments */
1185 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1186 emit_sig_cookie (cfg, call, cinfo);
1189 call->stack_usage = cinfo->stack_usage;
1190 cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
1194 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1196 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1197 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1198 int size = ins->backend.size;
1200 if (ainfo->storage == ArgAggregate) {
1201 MonoInst *load, *store;
1205 * Part of the structure is passed in registers.
1207 for (i = 0; i < ainfo->nregs; ++i) {
1208 slot = ainfo->reg + i;
1210 if (ainfo->atype == AggregateSingleHFA) {
1211 MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
1212 load->inst_basereg = src->dreg;
1213 load->inst_offset = i * 4;
1214 load->dreg = mono_alloc_freg (cfg);
1216 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
1217 } else if (ainfo->atype == AggregateDoubleHFA) {
1218 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1219 load->inst_basereg = src->dreg;
1220 load->inst_offset = i * 8;
1221 load->dreg = mono_alloc_freg (cfg);
1223 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
1225 MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
1226 load->inst_basereg = src->dreg;
1227 load->inst_offset = i * 8;
1228 load->dreg = mono_alloc_ireg (cfg);
1230 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, cfg->arch.reg_out0 + ainfo->reg + i, FALSE);
1232 MONO_ADD_INS (cfg->cbb, load);
1236 * Part of the structure is passed on the stack.
1238 for (i = ainfo->nregs; i < ainfo->nslots; ++i) {
1239 slot = ainfo->reg + i;
1241 MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
1242 load->inst_basereg = src->dreg;
1243 load->inst_offset = i * sizeof (gpointer);
1244 load->dreg = mono_alloc_preg (cfg);
1245 MONO_ADD_INS (cfg->cbb, load);
1247 MONO_INST_NEW (cfg, store, OP_STOREI8_MEMBASE_REG);
1248 store->sreg1 = load->dreg;
1249 store->inst_destbasereg = IA64_SP;
1250 store->inst_offset = 16 + ainfo->offset + (slot - 8) * 8;
1251 MONO_ADD_INS (cfg->cbb, store);
1254 mini_emit_memcpy (cfg, IA64_SP, 16 + ainfo->offset, src->dreg, 0, size, 4);
1259 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1261 CallInfo *cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
1263 switch (cinfo->ret.storage) {
1265 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1268 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1271 g_assert_not_reached ();
1276 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1281 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1283 MonoInst *ins, *n, *last_ins = NULL;
1286 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1287 switch (ins->opcode) {
1295 if (ins->dreg == ins->sreg1) {
1296 MONO_DELETE_INS (bb, ins);
1302 * OP_MOVE sreg, dreg
1303 * OP_MOVE dreg, sreg
1305 if (last_ins && last_ins->opcode == OP_MOVE &&
1306 ins->sreg1 == last_ins->dreg &&
1307 ins->dreg == last_ins->sreg1) {
1308 MONO_DELETE_INS (bb, ins);
1314 /* remove unnecessary multiplication with 1 */
1315 if (ins->inst_imm == 1) {
1316 if (ins->dreg != ins->sreg1) {
1317 ins->opcode = OP_MOVE;
1319 MONO_DELETE_INS (bb, ins);
1329 bb->last_ins = last_ins;
1332 int cond_to_ia64_cmp [][3] = {
1333 {OP_IA64_CMP_EQ, OP_IA64_CMP4_EQ, OP_IA64_FCMP_EQ},
1334 {OP_IA64_CMP_NE, OP_IA64_CMP4_NE, OP_IA64_FCMP_NE},
1335 {OP_IA64_CMP_LE, OP_IA64_CMP4_LE, OP_IA64_FCMP_LE},
1336 {OP_IA64_CMP_GE, OP_IA64_CMP4_GE, OP_IA64_FCMP_GE},
1337 {OP_IA64_CMP_LT, OP_IA64_CMP4_LT, OP_IA64_FCMP_LT},
1338 {OP_IA64_CMP_GT, OP_IA64_CMP4_GT, OP_IA64_FCMP_GT},
1339 {OP_IA64_CMP_LE_UN, OP_IA64_CMP4_LE_UN, OP_IA64_FCMP_LE_UN},
1340 {OP_IA64_CMP_GE_UN, OP_IA64_CMP4_GE_UN, OP_IA64_FCMP_GE_UN},
1341 {OP_IA64_CMP_LT_UN, OP_IA64_CMP4_LT_UN, OP_IA64_FCMP_LT_UN},
1342 {OP_IA64_CMP_GT_UN, OP_IA64_CMP4_GT_UN, OP_IA64_FCMP_GT_UN}
1346 opcode_to_ia64_cmp (int opcode, int cmp_opcode)
1348 return cond_to_ia64_cmp [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
1351 int cond_to_ia64_cmp_imm [][3] = {
1352 {OP_IA64_CMP_EQ_IMM, OP_IA64_CMP4_EQ_IMM, 0},
1353 {OP_IA64_CMP_NE_IMM, OP_IA64_CMP4_NE_IMM, 0},
1354 {OP_IA64_CMP_GE_IMM, OP_IA64_CMP4_GE_IMM, 0},
1355 {OP_IA64_CMP_LE_IMM, OP_IA64_CMP4_LE_IMM, 0},
1356 {OP_IA64_CMP_GT_IMM, OP_IA64_CMP4_GT_IMM, 0},
1357 {OP_IA64_CMP_LT_IMM, OP_IA64_CMP4_LT_IMM, 0},
1358 {OP_IA64_CMP_GE_UN_IMM, OP_IA64_CMP4_GE_UN_IMM, 0},
1359 {OP_IA64_CMP_LE_UN_IMM, OP_IA64_CMP4_LE_UN_IMM, 0},
1360 {OP_IA64_CMP_GT_UN_IMM, OP_IA64_CMP4_GT_UN_IMM, 0},
1361 {OP_IA64_CMP_LT_UN_IMM, OP_IA64_CMP4_LT_UN_IMM, 0},
1365 opcode_to_ia64_cmp_imm (int opcode, int cmp_opcode)
1367 /* The condition needs to be reversed */
1368 return cond_to_ia64_cmp_imm [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
1371 #define NEW_INS(cfg,dest,op) do { \
1372 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
1373 (dest)->opcode = (op); \
1374 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
1375 last_ins = (dest); \
1379 * mono_arch_lowering_pass:
1381 * Converts complex opcodes into simpler ones so that each IR instruction
1382 * corresponds to one machine instruction.
1385 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1387 MonoInst *ins, *n, *next, *temp, *temp2, *temp3, *last_ins = NULL;
1390 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1391 switch (ins->opcode) {
1392 case OP_STOREI1_MEMBASE_IMM:
1393 case OP_STOREI2_MEMBASE_IMM:
1394 case OP_STOREI4_MEMBASE_IMM:
1395 case OP_STOREI8_MEMBASE_IMM:
1396 case OP_STORE_MEMBASE_IMM:
1397 /* There are no store_membase instructions on ia64 */
1398 if (ins->inst_offset == 0) {
1400 } else if (ia64_is_imm14 (ins->inst_offset)) {
1401 NEW_INS (cfg, temp2, OP_ADD_IMM);
1402 temp2->sreg1 = ins->inst_destbasereg;
1403 temp2->inst_imm = ins->inst_offset;
1404 temp2->dreg = mono_alloc_ireg (cfg);
1407 NEW_INS (cfg, temp, OP_I8CONST);
1408 temp->inst_c0 = ins->inst_offset;
1409 temp->dreg = mono_alloc_ireg (cfg);
1411 NEW_INS (cfg, temp2, OP_LADD);
1412 temp2->sreg1 = ins->inst_destbasereg;
1413 temp2->sreg2 = temp->dreg;
1414 temp2->dreg = mono_alloc_ireg (cfg);
1417 switch (ins->opcode) {
1418 case OP_STOREI1_MEMBASE_IMM:
1419 ins->opcode = OP_STOREI1_MEMBASE_REG;
1421 case OP_STOREI2_MEMBASE_IMM:
1422 ins->opcode = OP_STOREI2_MEMBASE_REG;
1424 case OP_STOREI4_MEMBASE_IMM:
1425 ins->opcode = OP_STOREI4_MEMBASE_REG;
1427 case OP_STOREI8_MEMBASE_IMM:
1428 case OP_STORE_MEMBASE_IMM:
1429 ins->opcode = OP_STOREI8_MEMBASE_REG;
1432 g_assert_not_reached ();
1435 if (ins->inst_imm == 0)
1436 ins->sreg1 = IA64_R0;
1438 NEW_INS (cfg, temp3, OP_I8CONST);
1439 temp3->inst_c0 = ins->inst_imm;
1440 temp3->dreg = mono_alloc_ireg (cfg);
1441 ins->sreg1 = temp3->dreg;
1444 ins->inst_offset = 0;
1446 ins->inst_destbasereg = temp2->dreg;
1448 case OP_STOREI1_MEMBASE_REG:
1449 case OP_STOREI2_MEMBASE_REG:
1450 case OP_STOREI4_MEMBASE_REG:
1451 case OP_STOREI8_MEMBASE_REG:
1452 case OP_STORER4_MEMBASE_REG:
1453 case OP_STORER8_MEMBASE_REG:
1454 case OP_STORE_MEMBASE_REG:
1455 /* There are no store_membase instructions on ia64 */
1456 if (ins->inst_offset == 0) {
1459 else if (ia64_is_imm14 (ins->inst_offset)) {
1460 NEW_INS (cfg, temp2, OP_ADD_IMM);
1461 temp2->sreg1 = ins->inst_destbasereg;
1462 temp2->inst_imm = ins->inst_offset;
1463 temp2->dreg = mono_alloc_ireg (cfg);
1466 NEW_INS (cfg, temp, OP_I8CONST);
1467 temp->inst_c0 = ins->inst_offset;
1468 temp->dreg = mono_alloc_ireg (cfg);
1469 NEW_INS (cfg, temp2, OP_LADD);
1470 temp2->sreg1 = ins->inst_destbasereg;
1471 temp2->sreg2 = temp->dreg;
1472 temp2->dreg = mono_alloc_ireg (cfg);
1475 ins->inst_offset = 0;
1476 ins->inst_destbasereg = temp2->dreg;
1478 case OP_LOADI1_MEMBASE:
1479 case OP_LOADU1_MEMBASE:
1480 case OP_LOADI2_MEMBASE:
1481 case OP_LOADU2_MEMBASE:
1482 case OP_LOADI4_MEMBASE:
1483 case OP_LOADU4_MEMBASE:
1484 case OP_LOADI8_MEMBASE:
1485 case OP_LOAD_MEMBASE:
1486 case OP_LOADR4_MEMBASE:
1487 case OP_LOADR8_MEMBASE:
1488 case OP_ATOMIC_EXCHANGE_I4:
1489 case OP_ATOMIC_EXCHANGE_I8:
1490 case OP_ATOMIC_ADD_NEW_I4:
1491 case OP_ATOMIC_ADD_NEW_I8:
1492 case OP_ATOMIC_ADD_IMM_NEW_I4:
1493 case OP_ATOMIC_ADD_IMM_NEW_I8:
1494 /* There are no membase instructions on ia64 */
1495 if (ins->inst_offset == 0) {
1498 else if (ia64_is_imm14 (ins->inst_offset)) {
1499 NEW_INS (cfg, temp2, OP_ADD_IMM);
1500 temp2->sreg1 = ins->inst_basereg;
1501 temp2->inst_imm = ins->inst_offset;
1502 temp2->dreg = mono_alloc_ireg (cfg);
1505 NEW_INS (cfg, temp, OP_I8CONST);
1506 temp->inst_c0 = ins->inst_offset;
1507 temp->dreg = mono_alloc_ireg (cfg);
1508 NEW_INS (cfg, temp2, OP_LADD);
1509 temp2->sreg1 = ins->inst_basereg;
1510 temp2->sreg2 = temp->dreg;
1511 temp2->dreg = mono_alloc_ireg (cfg);
1514 ins->inst_offset = 0;
1515 ins->inst_basereg = temp2->dreg;
1535 case OP_ISHR_UN_IMM:
1536 case OP_LSHR_UN_IMM: {
1537 gboolean is_imm = FALSE;
1538 gboolean switched = FALSE;
1540 if (ins->opcode == OP_AND_IMM && ins->inst_imm == 255) {
1541 ins->opcode = OP_ZEXT_I1;
1545 switch (ins->opcode) {
1549 is_imm = ia64_is_imm14 (ins->inst_imm);
1554 is_imm = ia64_is_imm14 (- (ins->inst_imm));
1556 /* A = B - IMM -> A = B + (-IMM) */
1557 ins->inst_imm = - ins->inst_imm;
1558 ins->opcode = OP_IADD_IMM;
1569 is_imm = ia64_is_imm8 (ins->inst_imm);
1578 case OP_ISHR_UN_IMM:
1579 case OP_LSHR_UN_IMM:
1580 is_imm = (ins->inst_imm >= 0) && (ins->inst_imm < 64);
1588 ins->sreg2 = ins->sreg1;
1592 ins->opcode = mono_op_imm_to_op (ins->opcode);
1594 if (ins->inst_imm == 0)
1595 ins->sreg2 = IA64_R0;
1597 NEW_INS (cfg, temp, OP_I8CONST);
1598 temp->inst_c0 = ins->inst_imm;
1599 temp->dreg = mono_alloc_ireg (cfg);
1600 ins->sreg2 = temp->dreg;
1604 case OP_COMPARE_IMM:
1605 case OP_ICOMPARE_IMM:
1606 case OP_LCOMPARE_IMM: {
1607 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1613 /* Branch opts can eliminate the branch */
1614 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
1620 * The compare_imm instructions have switched up arguments, and
1621 * some of them take an imm between -127 and 128.
1624 cond = mono_opcode_to_cond (next->opcode);
1625 if ((cond == CMP_LT) || (cond == CMP_GE))
1626 imm = ia64_is_imm8 (ins->inst_imm - 1);
1627 else if ((cond == CMP_LT_UN) || (cond == CMP_GE_UN))
1628 imm = ia64_is_imm8 (ins->inst_imm - 1) && (ins->inst_imm > 0);
1630 imm = ia64_is_imm8 (ins->inst_imm);
1633 ins->opcode = opcode_to_ia64_cmp_imm (next->opcode, ins->opcode);
1634 ins->sreg2 = ins->sreg1;
1637 ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
1639 if (ins->inst_imm == 0)
1640 ins->sreg2 = IA64_R0;
1642 NEW_INS (cfg, temp, OP_I8CONST);
1643 temp->inst_c0 = ins->inst_imm;
1644 temp->dreg = mono_alloc_ireg (cfg);
1645 ins->sreg2 = temp->dreg;
1649 if (MONO_IS_COND_BRANCH_OP (next)) {
1650 next->opcode = OP_IA64_BR_COND;
1651 next->inst_target_bb = next->inst_true_bb;
1652 } else if (MONO_IS_COND_EXC (next)) {
1653 next->opcode = OP_IA64_COND_EXC;
1654 } else if (MONO_IS_SETCC (next)) {
1655 next->opcode = OP_IA64_CSET;
1657 printf ("%s\n", mono_inst_name (next->opcode));
1667 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1671 /* Branch opts can eliminate the branch */
1672 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
1677 ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
1679 if (MONO_IS_COND_BRANCH_OP (next)) {
1680 next->opcode = OP_IA64_BR_COND;
1681 next->inst_target_bb = next->inst_true_bb;
1682 } else if (MONO_IS_COND_EXC (next)) {
1683 next->opcode = OP_IA64_COND_EXC;
1684 } else if (MONO_IS_SETCC (next)) {
1685 next->opcode = OP_IA64_CSET;
1687 printf ("%s\n", mono_inst_name (next->opcode));
1698 /* The front end removes the fcompare, so introduce it again */
1699 NEW_INS (cfg, temp, opcode_to_ia64_cmp (ins->opcode, OP_FCOMPARE));
1700 temp->sreg1 = ins->sreg1;
1701 temp->sreg2 = ins->sreg2;
1703 ins->opcode = OP_IA64_CSET;
1704 MONO_INST_NULLIFY_SREGS (ins);
1710 gboolean found = FALSE;
1711 int shl_op = ins->opcode == OP_IMUL_IMM ? OP_ISHL_IMM : OP_SHL_IMM;
1713 /* First the easy cases */
1714 if (ins->inst_imm == 1) {
1715 ins->opcode = OP_MOVE;
1718 for (i = 1; i < 64; ++i)
1719 if (ins->inst_imm == (((gint64)1) << i)) {
1720 ins->opcode = shl_op;
1726 /* This could be optimized */
1729 for (i = 0; i < 64; ++i) {
1730 if (ins->inst_imm & (((gint64)1) << i)) {
1731 NEW_INS (cfg, temp, shl_op);
1732 temp->dreg = mono_alloc_ireg (cfg);
1733 temp->sreg1 = ins->sreg1;
1737 sum_reg = temp->dreg;
1739 NEW_INS (cfg, temp2, OP_LADD);
1740 temp2->dreg = mono_alloc_ireg (cfg);
1741 temp2->sreg1 = sum_reg;
1742 temp2->sreg2 = temp->dreg;
1743 sum_reg = temp2->dreg;
1747 ins->opcode = OP_MOVE;
1748 ins->sreg1 = sum_reg;
1752 case OP_LCONV_TO_OVF_U4:
1753 NEW_INS (cfg, temp, OP_IA64_CMP4_LT);
1754 temp->sreg1 = ins->sreg1;
1755 temp->sreg2 = IA64_R0;
1757 NEW_INS (cfg, temp, OP_IA64_COND_EXC);
1758 temp->inst_p1 = (char*)"OverflowException";
1760 ins->opcode = OP_MOVE;
1762 case OP_LCONV_TO_OVF_I4_UN:
1763 NEW_INS (cfg, temp, OP_ICONST);
1764 temp->inst_c0 = 0x7fffffff;
1765 temp->dreg = mono_alloc_ireg (cfg);
1767 NEW_INS (cfg, temp2, OP_IA64_CMP4_GT_UN);
1768 temp2->sreg1 = ins->sreg1;
1769 temp2->sreg2 = temp->dreg;
1771 NEW_INS (cfg, temp, OP_IA64_COND_EXC);
1772 temp->inst_p1 = (char*)"OverflowException";
1774 ins->opcode = OP_MOVE;
1776 case OP_FCONV_TO_I4:
1777 case OP_FCONV_TO_I2:
1778 case OP_FCONV_TO_U2:
1779 case OP_FCONV_TO_I1:
1780 case OP_FCONV_TO_U1:
1781 NEW_INS (cfg, temp, OP_FCONV_TO_I8);
1782 temp->sreg1 = ins->sreg1;
1783 temp->dreg = ins->dreg;
1785 switch (ins->opcode) {
1786 case OP_FCONV_TO_I4:
1787 ins->opcode = OP_SEXT_I4;
1789 case OP_FCONV_TO_I2:
1790 ins->opcode = OP_SEXT_I2;
1792 case OP_FCONV_TO_U2:
1793 ins->opcode = OP_ZEXT_I4;
1795 case OP_FCONV_TO_I1:
1796 ins->opcode = OP_SEXT_I1;
1798 case OP_FCONV_TO_U1:
1799 ins->opcode = OP_ZEXT_I1;
1802 g_assert_not_reached ();
1804 ins->sreg1 = ins->dreg;
1812 bb->last_ins = last_ins;
1814 bb->max_vreg = cfg->next_vreg;
1818 * emit_load_volatile_arguments:
1820 * Load volatile arguments from the stack to the original input registers.
1821 * Required before a tail call.
1823 static Ia64CodegenState
1824 emit_load_volatile_arguments (MonoCompile *cfg, Ia64CodegenState code)
1826 MonoMethod *method = cfg->method;
1827 MonoMethodSignature *sig;
1832 /* FIXME: Generate intermediate code instead */
1834 sig = mono_method_signature (method);
1836 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
1838 /* This is the opposite of the code in emit_prolog */
1839 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1840 ArgInfo *ainfo = cinfo->args + i;
1841 gint32 stack_offset;
1844 ins = cfg->args [i];
1846 if (sig->hasthis && (i == 0))
1847 arg_type = &mono_defaults.object_class->byval_arg;
1849 arg_type = sig->params [i - sig->hasthis];
1851 arg_type = mono_type_get_underlying_type (arg_type);
1853 stack_offset = ainfo->offset + ARGS_OFFSET;
1855 /* Save volatile arguments to the stack */
1856 if (ins->opcode != OP_REGVAR) {
1857 switch (ainfo->storage) {
1860 /* FIXME: big offsets */
1861 g_assert (ins->opcode == OP_REGOFFSET);
1862 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
1863 if (arg_type->byref)
1864 ia64_ld8 (code, cfg->arch.reg_in0 + ainfo->reg, GP_SCRATCH_REG);
1866 switch (arg_type->type) {
1868 ia64_ldfs (code, ainfo->reg, GP_SCRATCH_REG);
1871 ia64_ldfd (code, ainfo->reg, GP_SCRATCH_REG);
1874 ia64_ld8 (code, cfg->arch.reg_in0 + ainfo->reg, GP_SCRATCH_REG);
1886 if (ins->opcode == OP_REGVAR) {
1887 /* Argument allocated to (non-volatile) register */
1888 switch (ainfo->storage) {
1890 if (ins->dreg != cfg->arch.reg_in0 + ainfo->reg)
1891 ia64_mov (code, cfg->arch.reg_in0 + ainfo->reg, ins->dreg);
1894 ia64_adds_imm (code, GP_SCRATCH_REG, 16 + ainfo->offset, cfg->frame_reg);
1895 ia64_st8 (code, GP_SCRATCH_REG, ins->dreg);
1906 static Ia64CodegenState
1907 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, Ia64CodegenState code)
1912 /* Move return value to the target register */
1913 switch (ins->opcode) {
1915 case OP_VOIDCALL_REG:
1916 case OP_VOIDCALL_MEMBASE:
1920 case OP_CALL_MEMBASE:
1923 case OP_LCALL_MEMBASE:
1924 g_assert (ins->dreg == IA64_R8);
1928 case OP_FCALL_MEMBASE:
1929 g_assert (ins->dreg == 8);
1930 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
1931 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
1935 case OP_VCALL_MEMBASE:
1938 case OP_VCALL2_MEMBASE: {
1941 cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
1942 storage = cinfo->ret.storage;
1944 if (storage == ArgAggregate) {
1945 MonoInst *local = (MonoInst*)cfg->arch.ret_var_addr_local;
1947 /* Load address of stack space allocated for the return value */
1948 ia64_movl (code, GP_SCRATCH_REG, local->inst_offset);
1949 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, local->inst_basereg);
1950 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
1952 for (i = 0; i < cinfo->ret.nregs; ++i) {
1953 switch (cinfo->ret.atype) {
1954 case AggregateNormal:
1955 ia64_st8_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 8, 0);
1957 case AggregateSingleHFA:
1958 ia64_stfs_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 4, 0);
1960 case AggregateDoubleHFA:
1961 ia64_stfd_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 8, 0);
1964 g_assert_not_reached ();
1971 g_assert_not_reached ();
1977 #define add_patch_info(cfg,code,patch_type,data) do { \
1978 mono_add_patch_info (cfg, code.buf + code.nins - cfg->native_code, patch_type, data); \
1981 #define emit_cond_system_exception(cfg,code,exc_name,predicate) do { \
1982 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1984 add_patch_info (cfg, code, MONO_PATCH_INFO_EXC, exc_name); \
1986 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, tins->inst_true_bb); \
1987 ia64_br_cond_pred (code, (predicate), 0); \
1990 static Ia64CodegenState
1991 emit_call (MonoCompile *cfg, Ia64CodegenState code, guint32 patch_type, gconstpointer data)
1993 add_patch_info (cfg, code, patch_type, data);
1995 if ((patch_type == MONO_PATCH_INFO_ABS) || (patch_type == MONO_PATCH_INFO_INTERNAL_METHOD)) {
1997 /* mono_arch_patch_callsite will patch this */
1998 /* mono_arch_nullify_class_init_trampoline will patch this */
1999 ia64_movl (code, GP_SCRATCH_REG, 0);
2000 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
2001 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
2002 ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
2003 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2006 /* Can't use a direct call since the displacement might be too small */
2007 /* mono_arch_patch_callsite will patch this */
2008 ia64_movl (code, GP_SCRATCH_REG, 0);
2009 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2010 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2016 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
2019 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2024 Ia64CodegenState code;
2025 guint8 *code_start = cfg->native_code + cfg->code_len;
2026 MonoInst *last_ins = NULL;
2027 guint last_offset = 0;
2030 if (cfg->opt & MONO_OPT_LOOP) {
2034 if (cfg->verbose_level > 2)
2035 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2037 cpos = bb->max_offset;
2039 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2043 offset = code_start - cfg->native_code;
2045 ia64_codegen_init (code, code_start);
2048 if (strstr (cfg->method->name, "conv_ovf_i1") && (bb->block_num == 2))
2052 MONO_BB_FOR_EACH_INS (bb, ins) {
2053 offset = code.buf - cfg->native_code;
2055 max_len = ((int)(((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN])) + 128;
2057 while (offset + max_len + 16 > cfg->code_size) {
2058 ia64_codegen_close (code);
2060 offset = code.buf - cfg->native_code;
2062 cfg->code_size *= 2;
2063 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2064 code_start = cfg->native_code + offset;
2065 cfg->stat_code_reallocs++;
2067 ia64_codegen_init (code, code_start);
2070 mono_debug_record_line_number (cfg, ins, offset);
2072 switch (ins->opcode) {
2075 if (ia64_is_imm14 (ins->inst_c0))
2076 ia64_adds_imm (code, ins->dreg, ins->inst_c0, IA64_R0);
2078 ia64_movl (code, ins->dreg, ins->inst_c0);
2081 add_patch_info (cfg, code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2082 ia64_movl (code, ins->dreg, 0);
2085 ia64_mov (code, ins->dreg, ins->sreg1);
2088 case OP_IA64_BR_COND: {
2090 if (ins->opcode == OP_IA64_BR_COND)
2092 if (ins->inst_target_bb->native_offset) {
2093 guint8 *pos = code.buf + code.nins;
2095 ia64_br_cond_pred (code, pred, 0);
2096 ia64_begin_bundle (code);
2097 ia64_patch (pos, cfg->native_code + ins->inst_target_bb->native_offset);
2099 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2100 ia64_br_cond_pred (code, pred, 0);
2105 ia64_begin_bundle (code);
2106 ins->inst_c0 = code.buf - cfg->native_code;
2109 case OP_RELAXED_NOP:
2111 case OP_DUMMY_STORE:
2112 case OP_NOT_REACHED:
2116 ia64_mov_to_br (code, IA64_B6, ins->sreg1);
2117 ia64_br_cond_reg (code, IA64_B6);
2121 ia64_add (code, ins->dreg, ins->sreg1, ins->sreg2);
2125 ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
2129 ia64_and (code, ins->dreg, ins->sreg1, ins->sreg2);
2133 ia64_or (code, ins->dreg, ins->sreg1, ins->sreg2);
2137 ia64_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
2141 ia64_sub (code, ins->dreg, IA64_R0, ins->sreg1);
2145 ia64_andcm_imm (code, ins->dreg, -1, ins->sreg1);
2149 ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
2152 ia64_sxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2153 ia64_shr (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2156 ia64_shr (code, ins->dreg, ins->sreg1, ins->sreg2);
2159 ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2160 ia64_shr_u (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2163 ia64_shr_u (code, ins->dreg, ins->sreg1, ins->sreg2);
2166 /* p6 and p7 is set if there is signed/unsigned overflow */
2168 /* Set p8-p9 == (sreg2 > 0) */
2169 ia64_cmp4_lt (code, 8, 9, IA64_R0, ins->sreg2);
2171 ia64_add (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2173 /* (sreg2 > 0) && (res < ins->sreg1) => signed overflow */
2174 ia64_cmp4_lt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2175 /* (sreg2 <= 0) && (res > ins->sreg1) => signed overflow */
2176 ia64_cmp4_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
2178 /* res <u sreg1 => unsigned overflow */
2179 ia64_cmp4_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
2181 /* FIXME: Predicate this since this is a side effect */
2182 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2185 /* p6 and p7 is set if there is signed/unsigned overflow */
2187 /* Set p8-p9 == (sreg2 > 0) */
2188 ia64_cmp4_lt (code, 8, 9, IA64_R0, ins->sreg2);
2190 ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2192 /* (sreg2 > 0) && (res > ins->sreg1) => signed overflow */
2193 ia64_cmp4_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2194 /* (sreg2 <= 0) && (res < ins->sreg1) => signed overflow */
2195 ia64_cmp4_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2197 /* sreg1 <u sreg2 => unsigned overflow */
2198 ia64_cmp4_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
2200 /* FIXME: Predicate this since this is a side effect */
2201 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2204 /* Same as OP_IADDCC */
2205 ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
2207 ia64_add (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2209 ia64_cmp_lt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2210 ia64_cmp_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
2212 ia64_cmp_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
2214 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2217 /* Same as OP_ISUBCC */
2219 ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
2221 ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2223 ia64_cmp_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2224 ia64_cmp_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2226 ia64_cmp_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
2228 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2233 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2238 ia64_and_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2242 ia64_or_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2246 ia64_xor_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2251 ia64_shl_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2255 ia64_shr_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2258 g_assert (ins->inst_imm <= 64);
2259 ia64_extr (code, ins->dreg, ins->sreg1, ins->inst_imm, 32 - ins->inst_imm);
2261 case OP_ISHR_UN_IMM:
2262 ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2263 ia64_shr_u_imm (code, ins->dreg, GP_SCRATCH_REG, ins->inst_imm);
2265 case OP_LSHR_UN_IMM:
2266 ia64_shr_u_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2269 /* Based on gcc code */
2270 ia64_setf_sig (code, FP_SCRATCH_REG, ins->sreg1);
2271 ia64_setf_sig (code, FP_SCRATCH_REG2, ins->sreg2);
2272 ia64_xmpy_l (code, FP_SCRATCH_REG, FP_SCRATCH_REG, FP_SCRATCH_REG2);
2273 ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
2276 case OP_STOREI1_MEMBASE_REG:
2277 ia64_st1_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2279 case OP_STOREI2_MEMBASE_REG:
2280 ia64_st2_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2282 case OP_STOREI4_MEMBASE_REG:
2283 ia64_st4_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2285 case OP_STOREI8_MEMBASE_REG:
2286 case OP_STORE_MEMBASE_REG:
2287 if (ins->inst_offset != 0) {
2288 /* This is generated by local regalloc */
2289 if (ia64_is_imm14 (ins->inst_offset)) {
2290 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
2292 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2293 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
2295 ins->inst_destbasereg = GP_SCRATCH_REG;
2297 ia64_st8_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2300 case OP_IA64_STOREI1_MEMBASE_INC_REG:
2301 ia64_st1_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 1, 0);
2303 case OP_IA64_STOREI2_MEMBASE_INC_REG:
2304 ia64_st2_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 2, 0);
2306 case OP_IA64_STOREI4_MEMBASE_INC_REG:
2307 ia64_st4_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 4, 0);
2309 case OP_IA64_STOREI8_MEMBASE_INC_REG:
2310 ia64_st8_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 8, 0);
2313 case OP_LOADU1_MEMBASE:
2314 ia64_ld1 (code, ins->dreg, ins->inst_basereg);
2316 case OP_LOADU2_MEMBASE:
2317 ia64_ld2 (code, ins->dreg, ins->inst_basereg);
2319 case OP_LOADU4_MEMBASE:
2320 ia64_ld4 (code, ins->dreg, ins->inst_basereg);
2322 case OP_LOADI1_MEMBASE:
2323 ia64_ld1 (code, ins->dreg, ins->inst_basereg);
2324 ia64_sxt1 (code, ins->dreg, ins->dreg);
2326 case OP_LOADI2_MEMBASE:
2327 ia64_ld2 (code, ins->dreg, ins->inst_basereg);
2328 ia64_sxt2 (code, ins->dreg, ins->dreg);
2330 case OP_LOADI4_MEMBASE:
2331 ia64_ld4 (code, ins->dreg, ins->inst_basereg);
2332 ia64_sxt4 (code, ins->dreg, ins->dreg);
2334 case OP_LOAD_MEMBASE:
2335 case OP_LOADI8_MEMBASE:
2336 if (ins->inst_offset != 0) {
2337 /* This is generated by local regalloc */
2338 if (ia64_is_imm14 (ins->inst_offset)) {
2339 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
2341 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2342 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
2344 ins->inst_basereg = GP_SCRATCH_REG;
2346 ia64_ld8 (code, ins->dreg, ins->inst_basereg);
2349 case OP_IA64_LOADU1_MEMBASE_INC:
2350 ia64_ld1_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 1, 0);
2352 case OP_IA64_LOADU2_MEMBASE_INC:
2353 ia64_ld2_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 2, 0);
2355 case OP_IA64_LOADU4_MEMBASE_INC:
2356 ia64_ld4_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 4, 0);
2358 case OP_IA64_LOADI8_MEMBASE_INC:
2359 ia64_ld8_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 8, 0);
2363 ia64_sxt1 (code, ins->dreg, ins->sreg1);
2366 ia64_sxt2 (code, ins->dreg, ins->sreg1);
2369 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2372 ia64_zxt1 (code, ins->dreg, ins->sreg1);
2375 ia64_zxt2 (code, ins->dreg, ins->sreg1);
2378 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2381 /* Compare opcodes */
2382 case OP_IA64_CMP4_EQ:
2383 ia64_cmp4_eq (code, 6, 7, ins->sreg1, ins->sreg2);
2385 case OP_IA64_CMP4_NE:
2386 ia64_cmp4_ne (code, 6, 7, ins->sreg1, ins->sreg2);
2388 case OP_IA64_CMP4_LE:
2389 ia64_cmp4_le (code, 6, 7, ins->sreg1, ins->sreg2);
2391 case OP_IA64_CMP4_LT:
2392 ia64_cmp4_lt (code, 6, 7, ins->sreg1, ins->sreg2);
2394 case OP_IA64_CMP4_GE:
2395 ia64_cmp4_ge (code, 6, 7, ins->sreg1, ins->sreg2);
2397 case OP_IA64_CMP4_GT:
2398 ia64_cmp4_gt (code, 6, 7, ins->sreg1, ins->sreg2);
2400 case OP_IA64_CMP4_LT_UN:
2401 ia64_cmp4_ltu (code, 6, 7, ins->sreg1, ins->sreg2);
2403 case OP_IA64_CMP4_LE_UN:
2404 ia64_cmp4_leu (code, 6, 7, ins->sreg1, ins->sreg2);
2406 case OP_IA64_CMP4_GT_UN:
2407 ia64_cmp4_gtu (code, 6, 7, ins->sreg1, ins->sreg2);
2409 case OP_IA64_CMP4_GE_UN:
2410 ia64_cmp4_geu (code, 6, 7, ins->sreg1, ins->sreg2);
2412 case OP_IA64_CMP_EQ:
2413 ia64_cmp_eq (code, 6, 7, ins->sreg1, ins->sreg2);
2415 case OP_IA64_CMP_NE:
2416 ia64_cmp_ne (code, 6, 7, ins->sreg1, ins->sreg2);
2418 case OP_IA64_CMP_LE:
2419 ia64_cmp_le (code, 6, 7, ins->sreg1, ins->sreg2);
2421 case OP_IA64_CMP_LT:
2422 ia64_cmp_lt (code, 6, 7, ins->sreg1, ins->sreg2);
2424 case OP_IA64_CMP_GE:
2425 ia64_cmp_ge (code, 6, 7, ins->sreg1, ins->sreg2);
2427 case OP_IA64_CMP_GT:
2428 ia64_cmp_gt (code, 6, 7, ins->sreg1, ins->sreg2);
2430 case OP_IA64_CMP_GT_UN:
2431 ia64_cmp_gtu (code, 6, 7, ins->sreg1, ins->sreg2);
2433 case OP_IA64_CMP_LT_UN:
2434 ia64_cmp_ltu (code, 6, 7, ins->sreg1, ins->sreg2);
2436 case OP_IA64_CMP_GE_UN:
2437 ia64_cmp_geu (code, 6, 7, ins->sreg1, ins->sreg2);
2439 case OP_IA64_CMP_LE_UN:
2440 ia64_cmp_leu (code, 6, 7, ins->sreg1, ins->sreg2);
2442 case OP_IA64_CMP4_EQ_IMM:
2443 ia64_cmp4_eq_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2445 case OP_IA64_CMP4_NE_IMM:
2446 ia64_cmp4_ne_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2448 case OP_IA64_CMP4_LE_IMM:
2449 ia64_cmp4_le_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2451 case OP_IA64_CMP4_LT_IMM:
2452 ia64_cmp4_lt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2454 case OP_IA64_CMP4_GE_IMM:
2455 ia64_cmp4_ge_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2457 case OP_IA64_CMP4_GT_IMM:
2458 ia64_cmp4_gt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2460 case OP_IA64_CMP4_LT_UN_IMM:
2461 ia64_cmp4_ltu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2463 case OP_IA64_CMP4_LE_UN_IMM:
2464 ia64_cmp4_leu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2466 case OP_IA64_CMP4_GT_UN_IMM:
2467 ia64_cmp4_gtu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2469 case OP_IA64_CMP4_GE_UN_IMM:
2470 ia64_cmp4_geu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2472 case OP_IA64_CMP_EQ_IMM:
2473 ia64_cmp_eq_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2475 case OP_IA64_CMP_NE_IMM:
2476 ia64_cmp_ne_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2478 case OP_IA64_CMP_LE_IMM:
2479 ia64_cmp_le_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2481 case OP_IA64_CMP_LT_IMM:
2482 ia64_cmp_lt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2484 case OP_IA64_CMP_GE_IMM:
2485 ia64_cmp_ge_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2487 case OP_IA64_CMP_GT_IMM:
2488 ia64_cmp_gt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2490 case OP_IA64_CMP_GT_UN_IMM:
2491 ia64_cmp_gtu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2493 case OP_IA64_CMP_LT_UN_IMM:
2494 ia64_cmp_ltu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2496 case OP_IA64_CMP_GE_UN_IMM:
2497 ia64_cmp_geu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2499 case OP_IA64_CMP_LE_UN_IMM:
2500 ia64_cmp_leu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2502 case OP_IA64_FCMP_EQ:
2503 ia64_fcmp_eq_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2505 case OP_IA64_FCMP_NE:
2506 ia64_fcmp_ne_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2508 case OP_IA64_FCMP_LT:
2509 ia64_fcmp_lt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2511 case OP_IA64_FCMP_GT:
2512 ia64_fcmp_gt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2514 case OP_IA64_FCMP_LE:
2515 ia64_fcmp_le_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2517 case OP_IA64_FCMP_GE:
2518 ia64_fcmp_ge_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2520 case OP_IA64_FCMP_GT_UN:
2521 ia64_fcmp_gt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2522 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2524 case OP_IA64_FCMP_LT_UN:
2525 ia64_fcmp_lt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2526 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2528 case OP_IA64_FCMP_GE_UN:
2529 ia64_fcmp_ge_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2530 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2532 case OP_IA64_FCMP_LE_UN:
2533 ia64_fcmp_le_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2534 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2537 case OP_COND_EXC_IOV:
2538 case OP_COND_EXC_OV:
2539 emit_cond_system_exception (cfg, code, "OverflowException", 6);
2541 case OP_COND_EXC_IC:
2543 emit_cond_system_exception (cfg, code, "OverflowException", 7);
2545 case OP_IA64_COND_EXC:
2546 emit_cond_system_exception (cfg, code, ins->inst_p1, 6);
2549 ia64_mov_pred (code, 7, ins->dreg, IA64_R0);
2550 ia64_no_stop (code);
2551 ia64_add1_pred (code, 6, ins->dreg, IA64_R0, IA64_R0);
2553 case OP_ICONV_TO_I1:
2554 case OP_LCONV_TO_I1:
2555 /* FIXME: Is this needed ? */
2556 ia64_sxt1 (code, ins->dreg, ins->sreg1);
2558 case OP_ICONV_TO_I2:
2559 case OP_LCONV_TO_I2:
2560 /* FIXME: Is this needed ? */
2561 ia64_sxt2 (code, ins->dreg, ins->sreg1);
2563 case OP_LCONV_TO_I4:
2564 /* FIXME: Is this needed ? */
2565 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2567 case OP_ICONV_TO_U1:
2568 case OP_LCONV_TO_U1:
2569 /* FIXME: Is this needed */
2570 ia64_zxt1 (code, ins->dreg, ins->sreg1);
2572 case OP_ICONV_TO_U2:
2573 case OP_LCONV_TO_U2:
2574 /* FIXME: Is this needed */
2575 ia64_zxt2 (code, ins->dreg, ins->sreg1);
2577 case OP_LCONV_TO_U4:
2578 /* FIXME: Is this needed */
2579 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2581 case OP_ICONV_TO_I8:
2583 case OP_LCONV_TO_I8:
2585 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2587 case OP_LCONV_TO_U8:
2589 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2596 double d = *(double *)ins->inst_p0;
2598 if ((d == 0.0) && (mono_signbit (d) == 0))
2599 ia64_fmov (code, ins->dreg, 0);
2601 ia64_fmov (code, ins->dreg, 1);
2603 add_patch_info (cfg, code, MONO_PATCH_INFO_R8, ins->inst_p0);
2604 ia64_movl (code, GP_SCRATCH_REG, 0);
2605 ia64_ldfd (code, ins->dreg, GP_SCRATCH_REG);
2610 float f = *(float *)ins->inst_p0;
2612 if ((f == 0.0) && (mono_signbit (f) == 0))
2613 ia64_fmov (code, ins->dreg, 0);
2615 ia64_fmov (code, ins->dreg, 1);
2617 add_patch_info (cfg, code, MONO_PATCH_INFO_R4, ins->inst_p0);
2618 ia64_movl (code, GP_SCRATCH_REG, 0);
2619 ia64_ldfs (code, ins->dreg, GP_SCRATCH_REG);
2624 ia64_fmov (code, ins->dreg, ins->sreg1);
2626 case OP_STORER8_MEMBASE_REG:
2627 if (ins->inst_offset != 0) {
2628 /* This is generated by local regalloc */
2629 if (ia64_is_imm14 (ins->inst_offset)) {
2630 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
2632 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2633 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
2635 ins->inst_destbasereg = GP_SCRATCH_REG;
2637 ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2639 case OP_STORER4_MEMBASE_REG:
2640 ia64_fnorm_s_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
2641 ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
2643 case OP_LOADR8_MEMBASE:
2644 if (ins->inst_offset != 0) {
2645 /* This is generated by local regalloc */
2646 if (ia64_is_imm14 (ins->inst_offset)) {
2647 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
2649 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2650 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
2652 ins->inst_basereg = GP_SCRATCH_REG;
2654 ia64_ldfd (code, ins->dreg, ins->inst_basereg);
2656 case OP_LOADR4_MEMBASE:
2657 ia64_ldfs (code, ins->dreg, ins->inst_basereg);
2658 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
2660 case OP_ICONV_TO_R4:
2661 case OP_LCONV_TO_R4:
2662 ia64_setf_sig (code, ins->dreg, ins->sreg1);
2663 ia64_fcvt_xf (code, ins->dreg, ins->dreg);
2664 ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
2666 case OP_ICONV_TO_R8:
2667 case OP_LCONV_TO_R8:
2668 ia64_setf_sig (code, ins->dreg, ins->sreg1);
2669 ia64_fcvt_xf (code, ins->dreg, ins->dreg);
2670 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
2672 case OP_FCONV_TO_R4:
2673 ia64_fnorm_s_sf (code, ins->dreg, ins->sreg1, 0);
2675 case OP_FCONV_TO_I8:
2677 ia64_fcvt_fx_trunc_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
2678 ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
2681 ia64_fma_d_sf (code, ins->dreg, ins->sreg1, 1, ins->sreg2, 0);
2684 ia64_fms_d_sf (code, ins->dreg, ins->sreg1, 1, ins->sreg2, 0);
2687 ia64_fma_d_sf (code, ins->dreg, ins->sreg1, ins->sreg2, 0, 0);
2690 ia64_fmerge_ns (code, ins->dreg, ins->sreg1, ins->sreg1);
2694 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x080);
2695 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2697 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x040);
2698 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2699 /* Positive infinity */
2700 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x021);
2701 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2702 /* Negative infinity */
2703 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x022);
2704 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2709 /* ensure ins->sreg1 is not NULL */
2710 /* Can't use ld8 as this could be a vtype address */
2711 ia64_ld1 (code, GP_SCRATCH_REG, ins->sreg1);
2714 ia64_adds_imm (code, GP_SCRATCH_REG, cfg->sig_cookie, cfg->frame_reg);
2715 ia64_st8 (code, ins->sreg1, GP_SCRATCH_REG);
2723 call = (MonoCallInst*)ins;
2725 if (ins->flags & MONO_INST_HAS_METHOD)
2726 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2728 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2730 code = emit_move_return_value (cfg, ins, code);
2738 case OP_VOIDCALL_REG: {
2739 MonoCallInst *call = (MonoCallInst*)ins;
2744 * mono_arch_get_this_arg_from_call () needs to find the this argument in a global
2747 cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
2748 out_reg = cfg->arch.reg_out0;
2749 ia64_mov (code, IA64_R10, out_reg);
2752 ia64_mov (code, IA64_R8, ins->sreg1);
2753 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, IA64_R8, 8);
2754 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
2755 ia64_ld8 (code, IA64_GP, IA64_R8);
2756 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2758 code = emit_move_return_value (cfg, ins, code);
2761 case OP_FCALL_MEMBASE:
2762 case OP_LCALL_MEMBASE:
2763 case OP_VCALL_MEMBASE:
2764 case OP_VCALL2_MEMBASE:
2765 case OP_VOIDCALL_MEMBASE:
2766 case OP_CALL_MEMBASE: {
2767 MonoCallInst *call = (MonoCallInst*)ins;
2771 ia64_mov (code, IA64_R11, ins->sreg1);
2772 if (ia64_is_imm14 (ins->inst_offset))
2773 ia64_adds_imm (code, IA64_R8, ins->inst_offset, ins->sreg1);
2775 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2776 ia64_add (code, IA64_R8, GP_SCRATCH_REG, ins->sreg1);
2779 if (call->method && ins->inst_offset < 0) {
2781 * This is a possible IMT call so save the IMT method in a global
2782 * register where mono_arch_find_imt_method () and its friends can
2785 ia64_movl (code, IA64_R9, call->method);
2789 * mono_arch_find_this_arg () needs to find the this argument in a global
2792 cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
2793 out_reg = cfg->arch.reg_out0;
2794 ia64_mov (code, IA64_R10, out_reg);
2796 ia64_ld8 (code, GP_SCRATCH_REG, IA64_R8);
2798 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2800 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2802 code = emit_move_return_value (cfg, ins, code);
2807 * Keep in sync with the code in emit_epilog.
2810 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2813 g_assert (!cfg->method->save_lmf);
2815 /* Load arguments into their original registers */
2816 code = emit_load_volatile_arguments (cfg, code);
2818 if (cfg->arch.stack_alloc_size) {
2819 if (cfg->arch.omit_fp) {
2820 if (ia64_is_imm14 (cfg->arch.stack_alloc_size))
2821 ia64_adds_imm (code, IA64_SP, (cfg->arch.stack_alloc_size), IA64_SP);
2823 ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
2824 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
2828 ia64_mov (code, IA64_SP, cfg->arch.reg_saved_sp);
2830 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
2831 ia64_mov_ret_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
2833 add_patch_info (cfg, code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2834 ia64_movl (code, GP_SCRATCH_REG, 0);
2835 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2836 ia64_br_cond_reg (code, IA64_B6);
2841 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_break);
2847 /* FIXME: Sigaltstack support */
2849 /* keep alignment */
2850 ia64_adds_imm (code, GP_SCRATCH_REG, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->sreg1);
2851 ia64_movl (code, GP_SCRATCH_REG2, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
2852 ia64_and (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2854 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
2856 ia64_mov (code, ins->dreg, IA64_SP);
2858 /* An area at sp is reserved by the ABI for parameter passing */
2859 abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_LOCALLOC_ALIGNMENT);
2860 if (ia64_is_adds_imm (abi_offset))
2861 ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
2863 ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
2864 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
2867 if (ins->flags & MONO_INST_INIT) {
2869 ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
2871 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
2874 ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
2875 ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
2876 ia64_br_cond_pred (code, 8, -2);
2878 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
2880 ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
2885 case OP_LOCALLOC_IMM: {
2888 /* FIXME: Sigaltstack support */
2890 gssize size = ins->inst_imm;
2891 size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
2893 if (ia64_is_adds_imm (size))
2894 ia64_adds_imm (code, GP_SCRATCH_REG, size, IA64_R0);
2896 ia64_movl (code, GP_SCRATCH_REG, size);
2898 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
2899 ia64_mov (code, ins->dreg, IA64_SP);
2901 /* An area at sp is reserved by the ABI for parameter passing */
2902 abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_FRAME_ALIGNMENT);
2903 if (ia64_is_adds_imm (abi_offset))
2904 ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
2906 ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
2907 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
2910 if (ins->flags & MONO_INST_INIT) {
2912 ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
2914 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
2917 ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
2918 ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
2919 ia64_br_cond_pred (code, 8, -2);
2921 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
2923 ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
2929 ia64_adds_imm (code, ins->dreg, ins->inst_offset, IA64_TP);
2930 ia64_ld8 (code, ins->dreg, ins->dreg);
2933 /* Synchronization */
2934 case OP_MEMORY_BARRIER:
2937 case OP_ATOMIC_ADD_IMM_NEW_I4:
2938 g_assert (ins->inst_offset == 0);
2939 ia64_fetchadd4_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
2940 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
2942 case OP_ATOMIC_ADD_IMM_NEW_I8:
2943 g_assert (ins->inst_offset == 0);
2944 ia64_fetchadd8_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
2945 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
2947 case OP_ATOMIC_EXCHANGE_I4:
2948 ia64_xchg4_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
2949 ia64_sxt4 (code, ins->dreg, ins->dreg);
2951 case OP_ATOMIC_EXCHANGE_I8:
2952 ia64_xchg8_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
2954 case OP_ATOMIC_ADD_NEW_I4: {
2955 guint8 *label, *buf;
2957 /* From libatomic_ops */
2960 ia64_begin_bundle (code);
2961 label = code.buf + code.nins;
2962 ia64_ld4_acq (code, GP_SCRATCH_REG, ins->sreg1);
2963 ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
2964 ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
2965 ia64_cmpxchg4_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
2966 ia64_cmp4_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2967 buf = code.buf + code.nins;
2968 ia64_br_cond_pred (code, 7, 0);
2969 ia64_begin_bundle (code);
2970 ia64_patch (buf, label);
2971 ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2974 case OP_ATOMIC_ADD_NEW_I8: {
2975 guint8 *label, *buf;
2977 /* From libatomic_ops */
2980 ia64_begin_bundle (code);
2981 label = code.buf + code.nins;
2982 ia64_ld8_acq (code, GP_SCRATCH_REG, ins->sreg1);
2983 ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
2984 ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
2985 ia64_cmpxchg8_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
2986 ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2987 buf = code.buf + code.nins;
2988 ia64_br_cond_pred (code, 7, 0);
2989 ia64_begin_bundle (code);
2990 ia64_patch (buf, label);
2991 ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2995 /* Exception handling */
2996 case OP_CALL_HANDLER:
2998 * Using a call instruction would mess up the register stack, so
2999 * save the return address to a register and use a
3002 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
3003 ia64_mov (code, IA64_R15, IA64_R0);
3004 ia64_mov_from_ip (code, GP_SCRATCH_REG);
3005 /* Add the length of OP_CALL_HANDLER */
3006 ia64_adds_imm (code, GP_SCRATCH_REG, 5 * 16, GP_SCRATCH_REG);
3007 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3008 ia64_movl (code, GP_SCRATCH_REG2, 0);
3009 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
3010 ia64_br_cond_reg (code, IA64_B6);
3012 //mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3013 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
3015 case OP_START_HANDLER: {
3017 * We receive the return address in GP_SCRATCH_REG.
3019 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3022 * R15 determines our caller. It is used since it is writable using
3024 * R15 == 0 means we are called by OP_CALL_HANDLER or via resume_context ()
3025 * R15 != 0 means we are called by call_filter ().
3027 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
3028 ia64_cmp_eq (code, 6, 7, IA64_R15, IA64_R0);
3030 ia64_br_cond_pred (code, 6, 6);
3033 * Called by call_filter:
3034 * Allocate a new stack frame, and set the fp register from the
3035 * value passed in by the caller.
3036 * We allocate a similar frame as is done by the prolog, so
3037 * if an exception is thrown while executing the filter, the
3038 * unwinder can unwind through the filter frame using the unwind
3039 * info for the prolog.
3041 ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
3042 ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
3043 ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
3044 ia64_mov (code, cfg->frame_reg, IA64_R15);
3045 /* Signal to endfilter that we are called by call_filter */
3046 ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
3048 /* Branch target: */
3049 if (ia64_is_imm14 (spvar->inst_offset))
3050 ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
3052 ia64_movl (code, GP_SCRATCH_REG2, spvar->inst_offset);
3053 ia64_add (code, GP_SCRATCH_REG2, cfg->frame_reg, GP_SCRATCH_REG2);
3056 /* Save the return address */
3057 ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
3058 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
3063 case OP_ENDFILTER: {
3064 /* FIXME: Return the value in ENDFILTER */
3065 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3067 /* Load the return address */
3068 if (ia64_is_imm14 (spvar->inst_offset)) {
3069 ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
3071 ia64_movl (code, GP_SCRATCH_REG, spvar->inst_offset);
3072 ia64_add (code, GP_SCRATCH_REG, cfg->frame_reg, GP_SCRATCH_REG);
3074 ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
3077 ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, IA64_R0);
3078 ia64_br_cond_pred (code, 7, 4);
3080 /* Called by call_filter */
3082 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
3083 ia64_mov_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
3084 ia64_br_ret_reg (code, IA64_B0);
3086 /* Called by CALL_HANDLER */
3087 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
3088 ia64_br_cond_reg (code, IA64_B6);
3092 ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
3093 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
3094 (gpointer)"mono_arch_throw_exception");
3097 * This might be the last instruction in the method, so add a dummy
3098 * instruction so the unwinder will work.
3100 ia64_break_i (code, 0);
3103 ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
3104 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
3105 (gpointer)"mono_arch_rethrow_exception");
3107 ia64_break_i (code, 0);
3111 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3112 g_assert_not_reached ();
3115 if ((code.buf - cfg->native_code - offset) > max_len) {
3116 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
3117 mono_inst_name (ins->opcode), max_len, code.buf - cfg->native_code - offset);
3118 g_assert_not_reached ();
3124 last_offset = offset;
3127 ia64_codegen_close (code);
3129 cfg->code_len = code.buf - cfg->native_code;
3133 mono_arch_register_lowlevel_calls (void)
3137 static Ia64InsType ins_types_in_template [32][3] = {
3138 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3139 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3140 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3141 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3142 {IA64_INS_TYPE_M, IA64_INS_TYPE_LX, IA64_INS_TYPE_LX},
3143 {IA64_INS_TYPE_M, IA64_INS_TYPE_LX, IA64_INS_TYPE_LX},
3146 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3147 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3148 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3149 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3150 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_I},
3151 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_I},
3152 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_F},
3153 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_F},
3154 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_B},
3155 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_B},
3156 {IA64_INS_TYPE_M, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3157 {IA64_INS_TYPE_M, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3160 {IA64_INS_TYPE_B, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3161 {IA64_INS_TYPE_B, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3162 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_B},
3163 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_B},
3166 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_B},
3167 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_B},
3172 static gboolean stops_in_template [32][3] = {
3173 { FALSE, FALSE, FALSE },
3174 { FALSE, FALSE, TRUE },
3175 { FALSE, TRUE, FALSE },
3176 { FALSE, TRUE, TRUE },
3177 { FALSE, FALSE, FALSE },
3178 { FALSE, FALSE, TRUE },
3179 { FALSE, FALSE, FALSE },
3180 { FALSE, FALSE, FALSE },
3182 { FALSE, FALSE, FALSE },
3183 { FALSE, FALSE, TRUE },
3184 { TRUE, FALSE, FALSE },
3185 { TRUE, FALSE, TRUE },
3186 { FALSE, FALSE, FALSE },
3187 { FALSE, FALSE, TRUE },
3188 { FALSE, FALSE, FALSE },
3189 { FALSE, FALSE, TRUE },
3191 { FALSE, FALSE, FALSE },
3192 { FALSE, FALSE, TRUE },
3193 { FALSE, FALSE, FALSE },
3194 { FALSE, FALSE, TRUE },
3195 { FALSE, FALSE, FALSE },
3196 { FALSE, FALSE, FALSE },
3197 { FALSE, FALSE, FALSE },
3198 { FALSE, FALSE, TRUE },
3200 { FALSE, FALSE, FALSE },
3201 { FALSE, FALSE, TRUE },
3202 { FALSE, FALSE, FALSE },
3203 { FALSE, FALSE, FALSE },
3204 { FALSE, FALSE, FALSE },
3205 { FALSE, FALSE, TRUE },
3206 { FALSE, FALSE, FALSE },
3207 { FALSE, FALSE, FALSE }
3210 static int last_stop_in_template [32] = {
3211 -1, 2, 1, 2, -1, 2, -1, -1,
3212 -1, 2, 0, 2, -1, 2, -1, 2,
3213 -1, 2, -1, 2, -1, -1, -1, 2,
3214 -1, 2, -1, -1, -1, 2, -1, -1
3217 static guint64 nops_for_ins_types [6] = {
3226 #define ITYPE_MATCH(itype1, itype2) (((itype1) == (itype2)) || (((itype2) == IA64_INS_TYPE_A) && (((itype1) == IA64_INS_TYPE_I) || ((itype1) == IA64_INS_TYPE_M))))
3233 #define DEBUG_INS_SCHED(a) do { a; } while (0)
3235 #define DEBUG_INS_SCHED(a)
3239 ia64_analyze_deps (Ia64CodegenState *code, int *deps_start, int *stops)
3241 int i, pos, ins_index, current_deps_start, current_ins_start, reg;
3242 guint8 *deps = code->dep_info;
3243 gboolean need_stop, no_stop;
3245 for (i = 0; i < code->nins; ++i)
3249 current_deps_start = 0;
3250 current_ins_start = 0;
3251 deps_start [ins_index] = current_ins_start;
3254 DEBUG_INS_SCHED (printf ("BEGIN.\n"));
3255 while (pos < code->dep_info_pos) {
3257 switch (deps [pos]) {
3258 case IA64_END_OF_INS:
3260 current_ins_start = pos + 2;
3261 deps_start [ins_index] = current_ins_start;
3263 DEBUG_INS_SCHED (printf ("(%d) END INS.\n", ins_index - 1));
3268 reg = deps [pos + 1];
3270 DEBUG_INS_SCHED (printf ("READ GR: %d\n", reg));
3271 for (i = current_deps_start; i < current_ins_start; i += 2)
3272 if (deps [i] == IA64_WRITE_GR && deps [i + 1] == reg)
3276 reg = code->dep_info [pos + 1];
3278 DEBUG_INS_SCHED (printf ("WRITE GR: %d\n", reg));
3279 for (i = current_deps_start; i < current_ins_start; i += 2)
3280 if (deps [i] == IA64_WRITE_GR && deps [i + 1] == reg)
3284 reg = deps [pos + 1];
3286 DEBUG_INS_SCHED (printf ("READ PR: %d\n", reg));
3287 for (i = current_deps_start; i < current_ins_start; i += 2)
3288 if (((deps [i] == IA64_WRITE_PR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3291 case IA64_READ_PR_BRANCH:
3292 reg = deps [pos + 1];
3294 /* Writes to prs by non-float instructions are visible to branches */
3295 DEBUG_INS_SCHED (printf ("READ PR BRANCH: %d\n", reg));
3296 for (i = current_deps_start; i < current_ins_start; i += 2)
3297 if (deps [i] == IA64_WRITE_PR_FLOAT && deps [i + 1] == reg)
3301 reg = code->dep_info [pos + 1];
3303 DEBUG_INS_SCHED (printf ("WRITE PR: %d\n", reg));
3304 for (i = current_deps_start; i < current_ins_start; i += 2)
3305 if (((deps [i] == IA64_WRITE_PR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3308 case IA64_WRITE_PR_FLOAT:
3309 reg = code->dep_info [pos + 1];
3311 DEBUG_INS_SCHED (printf ("WRITE PR FP: %d\n", reg));
3312 for (i = current_deps_start; i < current_ins_start; i += 2)
3313 if (((deps [i] == IA64_WRITE_GR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3317 reg = deps [pos + 1];
3319 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg));
3320 for (i = current_deps_start; i < current_ins_start; i += 2)
3321 if (deps [i] == IA64_WRITE_BR && deps [i + 1] == reg)
3325 reg = code->dep_info [pos + 1];
3327 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg));
3328 for (i = current_deps_start; i < current_ins_start; i += 2)
3329 if (deps [i] == IA64_WRITE_BR && deps [i + 1] == reg)
3332 case IA64_READ_BR_BRANCH:
3333 reg = deps [pos + 1];
3335 /* Writes to brs are visible to branches */
3336 DEBUG_INS_SCHED (printf ("READ BR BRACH: %d\n", reg));
3339 reg = deps [pos + 1];
3341 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg));
3342 for (i = current_deps_start; i < current_ins_start; i += 2)
3343 if (deps [i] == IA64_WRITE_FR && deps [i + 1] == reg)
3347 reg = code->dep_info [pos + 1];
3349 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg));
3350 for (i = current_deps_start; i < current_ins_start; i += 2)
3351 if (deps [i] == IA64_WRITE_FR && deps [i + 1] == reg)
3355 reg = deps [pos + 1];
3357 DEBUG_INS_SCHED (printf ("READ AR: %d\n", reg));
3358 for (i = current_deps_start; i < current_ins_start; i += 2)
3359 if (deps [i] == IA64_WRITE_AR && deps [i + 1] == reg)
3363 reg = code->dep_info [pos + 1];
3365 DEBUG_INS_SCHED (printf ("WRITE AR: %d\n", reg));
3366 for (i = current_deps_start; i < current_ins_start; i += 2)
3367 if (deps [i] == IA64_WRITE_AR && deps [i + 1] == reg)
3372 * Explicitly indicate that a stop is not required. Useful for
3373 * example when two predicated instructions with negated predicates
3374 * write the same registers.
3379 g_assert_not_reached ();
3383 if (need_stop && !no_stop) {
3384 g_assert (ins_index > 0);
3385 stops [ins_index - 1] = 1;
3387 DEBUG_INS_SCHED (printf ("STOP\n"));
3388 current_deps_start = current_ins_start;
3390 /* Skip remaining deps for this instruction */
3391 while (deps [pos] != IA64_END_OF_INS)
3396 if (code->nins > 0) {
3397 /* No dependency info for the last instruction */
3398 stops [code->nins - 1] = 1;
3401 deps_start [code->nins] = code->dep_info_pos;
3405 ia64_real_emit_bundle (Ia64CodegenState *code, int *deps_start, int *stops, int n, guint64 template, guint64 ins1, guint64 ins2, guint64 ins3, guint8 nops)
3407 int stop_pos, i, deps_to_shift, dep_shift;
3409 g_assert (n <= code->nins);
3411 // if (n > 1) printf ("FOUND: %ld.\n", template);
3413 ia64_emit_bundle_template (code, template, ins1, ins2, ins3);
3415 stop_pos = last_stop_in_template [template] + 1;
3419 /* Compute the number of 'real' instructions before the stop */
3420 deps_to_shift = stop_pos;
3421 if (stop_pos >= 3 && (nops & (1 << 2)))
3423 if (stop_pos >= 2 && (nops & (1 << 1)))
3425 if (stop_pos >= 1 && (nops & (1 << 0)))
3429 * We have to keep some dependencies whose instructions have been shifted
3430 * out of the buffer. So nullify the end_of_ins markers in the dependency
3433 for (i = deps_start [deps_to_shift]; i < deps_start [n]; i += 2)
3434 if (code->dep_info [i] == IA64_END_OF_INS)
3435 code->dep_info [i] = IA64_NONE;
3437 g_assert (deps_start [deps_to_shift] <= code->dep_info_pos);
3438 memcpy (code->dep_info, &code->dep_info [deps_start [deps_to_shift]], code->dep_info_pos - deps_start [deps_to_shift]);
3439 code->dep_info_pos = code->dep_info_pos - deps_start [deps_to_shift];
3441 dep_shift = deps_start [deps_to_shift];
3442 for (i = 0; i < code->nins + 1 - n; ++i)
3443 deps_start [i] = deps_start [n + i] - dep_shift;
3445 /* Determine the exact positions of instructions with unwind ops */
3446 if (code->unw_op_count) {
3448 int curr_ins, curr_ins_pos;
3451 curr_ins_pos = ((code->buf - code->region_start - 16) / 16) * 3;
3452 for (i = 0; i < 3; ++i) {
3453 if (! (nops & (1 << i))) {
3454 ins_pos [curr_ins] = curr_ins_pos + i;
3459 for (i = code->unw_op_pos; i < code->unw_op_count; ++i) {
3460 if (code->unw_ops_pos [i] < n) {
3461 code->unw_ops [i].when = ins_pos [code->unw_ops_pos [i]];
3462 //printf ("UNW-OP: %d -> %d\n", code->unw_ops_pos [i], code->unw_ops [i].when);
3465 if (code->unw_op_pos < code->unw_op_count)
3466 code->unw_op_pos += n;
3469 if (n == code->nins) {
3474 memcpy (&code->instructions [0], &code->instructions [n], (code->nins - n) * sizeof (guint64));
3475 memcpy (&code->itypes [0], &code->itypes [n], (code->nins - n) * sizeof (int));
3476 memcpy (&stops [0], &stops [n], (code->nins - n) * sizeof (int));
3482 ia64_emit_bundle (Ia64CodegenState *code, gboolean flush)
3484 int i, ins_type, template, nins_to_emit;
3485 int deps_start [16];
3490 * We implement a simple scheduler which tries to put three instructions
3491 * per bundle, then two, then one.
3493 ia64_analyze_deps (code, deps_start, stops);
3495 if ((code->nins >= 3) && !code->one_ins_per_bundle) {
3496 /* Find a suitable template */
3497 for (template = 0; template < 32; ++template) {
3498 if (stops_in_template [template][0] != stops [0] ||
3499 stops_in_template [template][1] != stops [1] ||
3500 stops_in_template [template][2] != stops [2])
3504 for (i = 0; i < 3; ++i) {
3505 ins_type = ins_types_in_template [template][i];
3506 switch (code->itypes [i]) {
3507 case IA64_INS_TYPE_A:
3508 found &= (ins_type == IA64_INS_TYPE_I) || (ins_type == IA64_INS_TYPE_M);
3511 found &= (ins_type == code->itypes [i]);
3517 found = debug_ins_sched ();
3520 ia64_real_emit_bundle (code, deps_start, stops, 3, template, code->instructions [0], code->instructions [1], code->instructions [2], 0);
3526 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3527 /* Wait for more instructions */
3530 /* If it didn't work out, try putting two instructions into one bundle */
3531 if ((code->nins >= 2) && !code->one_ins_per_bundle) {
3532 /* Try a nop at the end */
3533 for (template = 0; template < 32; ++template) {
3534 if (stops_in_template [template][0] != stops [0] ||
3535 ((stops_in_template [template][1] != stops [1]) &&
3536 (stops_in_template [template][2] != stops [1])))
3540 if (!ITYPE_MATCH (ins_types_in_template [template][0], code->itypes [0]) ||
3541 !ITYPE_MATCH (ins_types_in_template [template][1], code->itypes [1]))
3544 if (!debug_ins_sched ())
3547 ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], code->instructions [1], nops_for_ins_types [ins_types_in_template [template][2]], 1 << 2);
3552 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3553 /* Wait for more instructions */
3556 if ((code->nins >= 2) && !code->one_ins_per_bundle) {
3557 /* Try a nop in the middle */
3558 for (template = 0; template < 32; ++template) {
3559 if (((stops_in_template [template][0] != stops [0]) &&
3560 (stops_in_template [template][1] != stops [0])) ||
3561 stops_in_template [template][2] != stops [1])
3564 if (!ITYPE_MATCH (ins_types_in_template [template][0], code->itypes [0]) ||
3565 !ITYPE_MATCH (ins_types_in_template [template][2], code->itypes [1]))
3568 if (!debug_ins_sched ())
3571 ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], nops_for_ins_types [ins_types_in_template [template][1]], code->instructions [1], 1 << 1);
3576 if ((code->nins >= 2) && flush && !code->one_ins_per_bundle) {
3577 /* Try a nop at the beginning */
3578 for (template = 0; template < 32; ++template) {
3579 if ((stops_in_template [template][1] != stops [0]) ||
3580 (stops_in_template [template][2] != stops [1]))
3583 if (!ITYPE_MATCH (ins_types_in_template [template][1], code->itypes [0]) ||
3584 !ITYPE_MATCH (ins_types_in_template [template][2], code->itypes [1]))
3587 if (!debug_ins_sched ())
3590 ia64_real_emit_bundle (code, deps_start, stops, 2, template, nops_for_ins_types [ins_types_in_template [template][0]], code->instructions [0], code->instructions [1], 1 << 0);
3595 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3596 /* Wait for more instructions */
3600 nins_to_emit = code->nins;
3604 while (nins_to_emit > 0) {
3605 if (!debug_ins_sched ())
3607 switch (code->itypes [0]) {
3608 case IA64_INS_TYPE_A:
3610 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3612 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3614 case IA64_INS_TYPE_I:
3616 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3618 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3620 case IA64_INS_TYPE_M:
3622 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3624 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3626 case IA64_INS_TYPE_B:
3628 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
3630 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIB, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
3632 case IA64_INS_TYPE_F:
3634 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3636 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFI, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3638 case IA64_INS_TYPE_LX:
3639 if (stops [0] || stops [1])
3640 ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
3642 ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLX, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
3646 g_assert_not_reached ();
3652 unw_dyn_region_info_t*
3653 mono_ia64_create_unwind_region (Ia64CodegenState *code)
3655 unw_dyn_region_info_t *r;
3657 g_assert (code->nins == 0);
3658 r = g_malloc0 (_U_dyn_region_info_size (code->unw_op_count));
3659 memcpy (&r->op, &code->unw_ops, sizeof (unw_dyn_op_t) * code->unw_op_count);
3660 r->op_count = code->unw_op_count;
3661 r->insn_count = ((code->buf - code->region_start) >> 4) * 3;
3662 code->unw_op_count = 0;
3663 code->unw_op_pos = 0;
3664 code->region_start = code->buf;
3670 ia64_patch (unsigned char* code, gpointer target)
3673 guint64 instructions [3];
3674 guint8 gen_buf [16];
3675 Ia64CodegenState gen;
3680 * code encodes both the position inside the buffer and code.nins when
3681 * the instruction was emitted.
3683 ins_to_skip = (guint64)code % 16;
3684 code = (unsigned char*)((guint64)code & ~15);
3687 * Search for the first instruction which is 'patchable', skipping
3688 * ins_to_skip instructions.
3693 template = ia64_bundle_template (code);
3694 instructions [0] = ia64_bundle_ins1 (code);
3695 instructions [1] = ia64_bundle_ins2 (code);
3696 instructions [2] = ia64_bundle_ins3 (code);
3698 ia64_codegen_init (gen, gen_buf);
3701 for (i = 0; i < 3; ++i) {
3702 guint64 ins = instructions [i];
3703 int opcode = ia64_ins_opcode (ins);
3705 if (ins == nops_for_ins_types [ins_types_in_template [template][i]])
3713 switch (ins_types_in_template [template][i]) {
3714 case IA64_INS_TYPE_A:
3715 case IA64_INS_TYPE_M:
3716 if ((opcode == 8) && (ia64_ins_x2a (ins) == 2) && (ia64_ins_ve (ins) == 0)) {
3718 ia64_adds_imm_pred (gen, ia64_ins_qp (ins), ia64_ins_r1 (ins), (guint64)target, ia64_ins_r3 (ins));
3719 instructions [i] = gen.instructions [0];
3725 case IA64_INS_TYPE_B:
3726 if ((opcode == 4) && (ia64_ins_btype (ins) == 0)) {
3728 gint64 disp = ((guint8*)target - code) >> 4;
3731 ia64_br_cond_hint_pred (gen, ia64_ins_qp (ins), disp, 0, 0, 0);
3733 instructions [i] = gen.instructions [0];
3736 else if (opcode == 5) {
3738 gint64 disp = ((guint8*)target - code) >> 4;
3741 ia64_br_call_hint_pred (gen, ia64_ins_qp (ins), ia64_ins_b1 (ins), disp, 0, 0, 0);
3742 instructions [i] = gen.instructions [0];
3748 case IA64_INS_TYPE_LX:
3752 if ((opcode == 6) && (ia64_ins_vc (ins) == 0)) {
3754 ia64_movl_pred (gen, ia64_ins_qp (ins), ia64_ins_r1 (ins), target);
3755 instructions [1] = gen.instructions [0];
3756 instructions [2] = gen.instructions [1];
3769 ia64_codegen_init (gen, code);
3770 ia64_emit_bundle_template (&gen, template, instructions [0], instructions [1], instructions [2]);
3780 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
3782 MonoJumpInfo *patch_info;
3784 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3785 unsigned char *ip = patch_info->ip.i + code;
3786 const unsigned char *target;
3788 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3790 if (patch_info->type == MONO_PATCH_INFO_NONE)
3792 if (mono_compile_aot) {
3796 ia64_patch (ip, (gpointer)target);
3801 mono_arch_emit_prolog (MonoCompile *cfg)
3803 MonoMethod *method = cfg->method;
3804 MonoMethodSignature *sig;
3806 int alloc_size, pos, i;
3807 Ia64CodegenState code;
3810 sig = mono_method_signature (method);
3813 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
3815 cfg->code_size = MAX (cfg->header->code_size * 4, 512);
3817 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3818 cfg->code_size += 1024;
3819 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3820 cfg->code_size += 1024;
3822 cfg->native_code = g_malloc (cfg->code_size);
3824 ia64_codegen_init (code, cfg->native_code);
3826 alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
3827 if (cfg->param_area)
3828 alloc_size += cfg->param_area;
3832 alloc_size = ALIGN_TO (alloc_size, MONO_ARCH_FRAME_ALIGNMENT);
3834 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
3835 /* Force sp to be saved/restored */
3836 alloc_size += MONO_ARCH_FRAME_ALIGNMENT;
3838 cfg->arch.stack_alloc_size = alloc_size;
3842 if (method->save_lmf) {
3843 /* No LMF on IA64 */
3848 ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + cfg->arch.reg_saved_ar_pfs);
3849 ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
3850 ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + cfg->arch.reg_saved_b0);
3851 ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
3853 if ((alloc_size || cinfo->stack_usage) && !cfg->arch.omit_fp) {
3854 ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + cfg->arch.reg_saved_sp);
3855 ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
3856 if (cfg->frame_reg != cfg->arch.reg_saved_sp)
3857 ia64_mov (code, cfg->frame_reg, IA64_SP);
3861 #if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
3862 int pagesize = getpagesize ();
3864 if (alloc_size >= pagesize) {
3865 gint32 remaining_size = alloc_size;
3867 /* Generate stack touching code */
3868 ia64_mov (code, GP_SCRATCH_REG, IA64_SP);
3869 while (remaining_size >= pagesize) {
3870 ia64_movl (code, GP_SCRATCH_REG2, pagesize);
3871 ia64_sub (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
3872 ia64_ld8 (code, GP_SCRATCH_REG2, GP_SCRATCH_REG);
3873 remaining_size -= pagesize;
3877 if (ia64_is_imm14 (-alloc_size)) {
3878 if (cfg->arch.omit_fp)
3879 ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
3880 ia64_adds_imm (code, IA64_SP, (-alloc_size), IA64_SP);
3883 ia64_movl (code, GP_SCRATCH_REG, -alloc_size);
3884 if (cfg->arch.omit_fp)
3885 ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
3886 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
3890 ia64_begin_bundle (code);
3892 /* Initialize unwind info */
3893 cfg->arch.r_pro = mono_ia64_create_unwind_region (&code);
3895 if (sig->ret->type != MONO_TYPE_VOID) {
3896 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
3897 /* Save volatile arguments to the stack */
3902 /* Keep this in sync with emit_load_volatile_arguments */
3903 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3904 ArgInfo *ainfo = cinfo->args + i;
3905 gint32 stack_offset;
3908 inst = cfg->args [i];
3910 if (sig->hasthis && (i == 0))
3911 arg_type = &mono_defaults.object_class->byval_arg;
3913 arg_type = sig->params [i - sig->hasthis];
3915 arg_type = mono_type_get_underlying_type (arg_type);
3917 stack_offset = ainfo->offset + ARGS_OFFSET;
3920 * FIXME: Native code might pass non register sized integers
3921 * without initializing the upper bits.
3923 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED && !arg_type->byref && ainfo->storage == ArgInIReg) {
3924 int reg = cfg->arch.reg_in0 + ainfo->reg;
3926 switch (mono_type_to_load_membase (cfg, arg_type)) {
3927 case OP_LOADI1_MEMBASE:
3928 ia64_sxt1 (code, reg, reg);
3930 case OP_LOADU1_MEMBASE:
3931 ia64_zxt1 (code, reg, reg);
3933 case OP_LOADI2_MEMBASE:
3934 ia64_sxt2 (code, reg, reg);
3936 case OP_LOADU2_MEMBASE:
3937 ia64_zxt2 (code, reg, reg);
3944 /* Save volatile arguments to the stack */
3945 if (inst->opcode != OP_REGVAR) {
3946 switch (ainfo->storage) {
3949 case ArgInFloatRegR4:
3950 g_assert (inst->opcode == OP_REGOFFSET);
3951 if (ia64_is_adds_imm (inst->inst_offset))
3952 ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
3954 ia64_movl (code, GP_SCRATCH_REG2, inst->inst_offset);
3955 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
3957 if (arg_type->byref)
3958 ia64_st8_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg, 0);
3960 switch (arg_type->type) {
3962 ia64_stfs_hint (code, GP_SCRATCH_REG, ainfo->reg, 0);
3965 ia64_stfd_hint (code, GP_SCRATCH_REG, ainfo->reg, 0);
3968 ia64_st8_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg, 0);
3976 if (ainfo->nslots != ainfo->nregs)
3979 g_assert (inst->opcode == OP_REGOFFSET);
3980 ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
3981 for (i = 0; i < ainfo->nregs; ++i) {
3982 switch (ainfo->atype) {
3983 case AggregateNormal:
3984 ia64_st8_inc_imm_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg + i, sizeof (gpointer), 0);
3986 case AggregateSingleHFA:
3987 ia64_stfs_inc_imm_hint (code, GP_SCRATCH_REG, ainfo->reg + i, 4, 0);
3989 case AggregateDoubleHFA:
3990 ia64_stfd_inc_imm_hint (code, GP_SCRATCH_REG, ainfo->reg + i, sizeof (gpointer), 0);
3998 g_assert_not_reached ();
4002 if (inst->opcode == OP_REGVAR) {
4003 /* Argument allocated to (non-volatile) register */
4004 switch (ainfo->storage) {
4006 if (inst->dreg != cfg->arch.reg_in0 + ainfo->reg)
4007 ia64_mov (code, inst->dreg, cfg->arch.reg_in0 + ainfo->reg);
4010 ia64_adds_imm (code, GP_SCRATCH_REG, 16 + ainfo->offset, cfg->frame_reg);
4011 ia64_ld8 (code, inst->dreg, GP_SCRATCH_REG);
4019 if (method->save_lmf) {
4020 /* No LMF on IA64 */
4023 ia64_codegen_close (code);
4025 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4026 code.buf = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code.buf, TRUE);
4028 cfg->code_len = code.buf - cfg->native_code;
4030 g_assert (cfg->code_len < cfg->code_size);
4032 cfg->arch.prolog_end_offset = cfg->code_len;
4038 mono_arch_emit_epilog (MonoCompile *cfg)
4040 MonoMethod *method = cfg->method;
4042 int max_epilog_size = 16 * 4;
4043 Ia64CodegenState code;
4048 if (mono_jit_trace_calls != NULL)
4049 max_epilog_size += 1024;
4051 cfg->arch.epilog_begin_offset = cfg->code_len;
4053 while (cfg->code_len + max_epilog_size > cfg->code_size) {
4054 cfg->code_size *= 2;
4055 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4056 cfg->stat_code_reallocs++;
4059 /* FIXME: Emit unwind info */
4061 buf = cfg->native_code + cfg->code_len;
4063 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4064 buf = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, buf, TRUE);
4066 ia64_codegen_init (code, buf);
4068 /* the code restoring the registers must be kept in sync with OP_JMP */
4071 if (method->save_lmf) {
4072 /* No LMF on IA64 */
4075 /* Load returned vtypes into registers if needed */
4076 cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
4077 ainfo = &cinfo->ret;
4078 switch (ainfo->storage) {
4080 if (ainfo->nslots != ainfo->nregs)
4083 g_assert (cfg->ret->opcode == OP_REGOFFSET);
4084 ia64_adds_imm (code, GP_SCRATCH_REG, cfg->ret->inst_offset, cfg->ret->inst_basereg);
4085 for (i = 0; i < ainfo->nregs; ++i) {
4086 switch (ainfo->atype) {
4087 case AggregateNormal:
4088 ia64_ld8_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, sizeof (gpointer), 0);
4090 case AggregateSingleHFA:
4091 ia64_ldfs_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, 4, 0);
4093 case AggregateDoubleHFA:
4094 ia64_ldfd_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, sizeof (gpointer), 0);
4097 g_assert_not_reached ();
4105 ia64_begin_bundle (code);
4107 code.region_start = cfg->native_code;
4109 /* Label the unwind state at the start of the exception throwing region */
4110 //ia64_unw_label_state (code, 1234);
4112 if (cfg->arch.stack_alloc_size) {
4113 if (cfg->arch.omit_fp) {
4114 if (ia64_is_imm14 (cfg->arch.stack_alloc_size)) {
4115 ia64_unw_pop_frames (code, 1);
4116 ia64_adds_imm (code, IA64_SP, (cfg->arch.stack_alloc_size), IA64_SP);
4118 ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
4119 ia64_unw_pop_frames (code, 1);
4120 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
4124 ia64_unw_pop_frames (code, 1);
4125 ia64_mov (code, IA64_SP, cfg->arch.reg_saved_sp);
4128 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
4129 ia64_mov_ret_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
4130 ia64_br_ret_reg (code, IA64_B0);
4132 ia64_codegen_close (code);
4134 cfg->arch.r_epilog = mono_ia64_create_unwind_region (&code);
4135 cfg->arch.r_pro->next = cfg->arch.r_epilog;
4137 cfg->code_len = code.buf - cfg->native_code;
4139 g_assert (cfg->code_len < cfg->code_size);
4143 mono_arch_emit_exceptions (MonoCompile *cfg)
4145 MonoJumpInfo *patch_info;
4147 Ia64CodegenState code;
4148 gboolean empty = TRUE;
4149 //unw_dyn_region_info_t *r_exceptions;
4150 MonoClass *exc_classes [16];
4151 guint8 *exc_throw_start [16], *exc_throw_end [16];
4152 guint32 code_size = 0;
4154 /* Compute needed space */
4155 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4156 if (patch_info->type == MONO_PATCH_INFO_EXC)
4158 if (patch_info->type == MONO_PATCH_INFO_R8)
4159 code_size += 8 + 7; /* sizeof (double) + alignment */
4160 if (patch_info->type == MONO_PATCH_INFO_R4)
4161 code_size += 4 + 7; /* sizeof (float) + alignment */
4167 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4168 cfg->code_size *= 2;
4169 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4170 cfg->stat_code_reallocs++;
4173 ia64_codegen_init (code, cfg->native_code + cfg->code_len);
4175 /* The unwind state here is the same as before the epilog */
4176 //ia64_unw_copy_state (code, 1234);
4178 /* add code to raise exceptions */
4179 /* FIXME: Optimize this */
4181 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4182 switch (patch_info->type) {
4183 case MONO_PATCH_INFO_EXC: {
4184 MonoClass *exc_class;
4187 guint64 exc_token_index;
4189 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4190 g_assert (exc_class);
4191 exc_token_index = mono_metadata_token_index (exc_class->type_token);
4192 throw_ip = cfg->native_code + patch_info->ip.i;
4194 ia64_begin_bundle (code);
4196 ia64_patch (cfg->native_code + patch_info->ip.i, code.buf);
4198 /* Find a throw sequence for the same exception class */
4199 for (i = 0; i < nthrows; ++i)
4200 if (exc_classes [i] == exc_class)
4204 gint64 offset = exc_throw_end [i] - 16 - throw_ip;
4206 if (ia64_is_adds_imm (offset))
4207 ia64_adds_imm (code, cfg->arch.reg_out0 + 1, offset, IA64_R0);
4209 ia64_movl (code, cfg->arch.reg_out0 + 1, offset);
4211 buf = code.buf + code.nins;
4212 ia64_br_cond_pred (code, 0, 0);
4213 ia64_begin_bundle (code);
4214 ia64_patch (buf, exc_throw_start [i]);
4216 patch_info->type = MONO_PATCH_INFO_NONE;
4221 ia64_movl (code, cfg->arch.reg_out0 + 1, 0);
4223 ia64_begin_bundle (code);
4226 exc_classes [nthrows] = exc_class;
4227 exc_throw_start [nthrows] = code.buf;
4231 if (ia64_is_adds_imm (exc_token_index))
4232 ia64_adds_imm (code, cfg->arch.reg_out0 + 0, exc_token_index, IA64_R0);
4234 ia64_movl (code, cfg->arch.reg_out0 + 0, exc_token_index);
4236 patch_info->data.name = "mono_arch_throw_corlib_exception";
4237 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4238 patch_info->ip.i = code.buf + code.nins - cfg->native_code;
4241 ia64_movl (code, GP_SCRATCH_REG, 0);
4242 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
4243 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
4244 ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
4246 ia64_br_call_reg (code, IA64_B0, IA64_B6);
4248 /* Patch up the throw offset */
4249 ia64_begin_bundle (code);
4251 ia64_patch (buf, (gpointer)(code.buf - 16 - throw_ip));
4254 exc_throw_end [nthrows] = code.buf;
4268 /* The unwinder needs this to work */
4269 ia64_break_i (code, 0);
4271 ia64_codegen_close (code);
4274 //r_exceptions = mono_ia64_create_unwind_region (&code);
4275 //cfg->arch.r_epilog = r_exceptions;
4277 cfg->code_len = code.buf - cfg->native_code;
4279 g_assert (cfg->code_len < cfg->code_size);
4283 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4285 Ia64CodegenState code;
4286 CallInfo *cinfo = NULL;
4287 MonoMethodSignature *sig;
4289 int i, n, stack_area = 0;
4291 ia64_codegen_init (code, p);
4293 /* Keep this in sync with mono_arch_get_argument_info */
4295 if (enable_arguments) {
4296 /* Allocate a new area on the stack and save arguments there */
4297 sig = mono_method_signature (cfg->method);
4299 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
4301 n = sig->param_count + sig->hasthis;
4303 stack_area = ALIGN_TO (n * 8, 16);
4306 ia64_movl (code, GP_SCRATCH_REG, stack_area);
4308 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
4310 /* FIXME: Allocate out registers */
4312 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_SP);
4314 /* Required by the ABI */
4315 ia64_adds_imm (code, IA64_SP, -16, IA64_SP);
4317 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, cfg->method);
4318 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4320 /* Save arguments to the stack */
4321 for (i = 0; i < n; ++i) {
4322 ins = cfg->args [i];
4324 if (ins->opcode == OP_REGVAR) {
4325 ia64_movl (code, GP_SCRATCH_REG, (i * 8));
4326 ia64_add (code, GP_SCRATCH_REG, cfg->arch.reg_out0 + 1, GP_SCRATCH_REG);
4327 ia64_st8 (code, GP_SCRATCH_REG, ins->dreg);
4330 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
4331 ia64_add (code, GP_SCRATCH_REG, ins->inst_basereg, GP_SCRATCH_REG);
4332 ia64_ld8 (code, GP_SCRATCH_REG2, GP_SCRATCH_REG);
4333 ia64_movl (code, GP_SCRATCH_REG, (i * 8));
4334 ia64_add (code, GP_SCRATCH_REG, cfg->arch.reg_out0 + 1, GP_SCRATCH_REG);
4335 ia64_st8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG2);
4340 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_R0);
4343 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_R0);
4345 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, cfg->method);
4346 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4348 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4350 if (enable_arguments && stack_area) {
4351 ia64_movl (code, GP_SCRATCH_REG, stack_area);
4353 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
4355 ia64_adds_imm (code, IA64_SP, 16, IA64_SP);
4358 ia64_codegen_close (code);
4364 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
4366 Ia64CodegenState code;
4367 CallInfo *cinfo = NULL;
4368 MonoMethod *method = cfg->method;
4369 MonoMethodSignature *sig = mono_method_signature (cfg->method);
4371 ia64_codegen_init (code, p);
4373 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
4375 /* Save return value + pass it to func */
4376 switch (cinfo->ret.storage) {
4380 ia64_mov (code, cfg->arch.reg_saved_return_val, cinfo->ret.reg);
4381 ia64_mov (code, cfg->arch.reg_out0 + 1, cinfo->ret.reg);
4384 ia64_adds_imm (code, IA64_SP, -16, IA64_SP);
4385 ia64_adds_imm (code, GP_SCRATCH_REG, 16, IA64_SP);
4386 ia64_stfd_hint (code, GP_SCRATCH_REG, cinfo->ret.reg, 0);
4387 ia64_fmov (code, 8 + 1, cinfo->ret.reg);
4389 case ArgValuetypeAddrInIReg:
4390 ia64_mov (code, cfg->arch.reg_out0 + 1, cfg->arch.reg_in0 + cinfo->ret.reg);
4399 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, method);
4400 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4401 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4403 /* Restore return value */
4404 switch (cinfo->ret.storage) {
4408 ia64_mov (code, cinfo->ret.reg, cfg->arch.reg_saved_return_val);
4411 ia64_adds_imm (code, GP_SCRATCH_REG, 16, IA64_SP);
4412 ia64_ldfd (code, cinfo->ret.reg, GP_SCRATCH_REG);
4414 case ArgValuetypeAddrInIReg:
4422 ia64_codegen_close (code);
4428 mono_arch_save_unwind_info (MonoCompile *cfg)
4432 /* FIXME: Unregister this for dynamic methods */
4434 di = g_malloc0 (sizeof (unw_dyn_info_t));
4435 di->start_ip = (unw_word_t) cfg->native_code;
4436 di->end_ip = (unw_word_t) cfg->native_code + cfg->code_len;
4438 di->format = UNW_INFO_FORMAT_DYNAMIC;
4439 di->u.pi.name_ptr = (unw_word_t)mono_method_full_name (cfg->method, TRUE);
4440 di->u.pi.regions = cfg->arch.r_pro;
4442 _U_dyn_register (di);
4446 unw_dyn_region_info_t *region = di->u.pi.regions;
4448 printf ("Unwind info for method %s:\n", mono_method_full_name (cfg->method, TRUE));
4450 printf (" [Region: %d]\n", region->insn_count);
4451 region = region->next;
4458 mono_arch_flush_icache (guint8 *code, gint size)
4460 guint8* p = (guint8*)((guint64)code & ~(0x3f));
4461 guint8* end = (guint8*)((guint64)code + size);
4463 #ifdef __INTEL_COMPILER
4464 /* icc doesn't define an fc.i instrinsic, but fc==fc.i on itanium 2 */
4471 __asm__ __volatile__ ("fc.i %0"::"r"(p));
4472 /* FIXME: This could be increased to 128 on some cpus */
4479 mono_arch_flush_register_windows (void)
4481 /* Not needed because of libunwind */
4485 mono_arch_is_inst_imm (gint64 imm)
4487 /* The lowering pass will take care of it */
4493 * Determine whenever the trap whose info is in SIGINFO is caused by
4497 mono_arch_is_int_overflow (void *sigctx, void *info)
4499 /* Division is emulated with explicit overflow checks */
4504 mono_arch_get_patch_offset (guint8 *code)
4512 mono_arch_get_delegate_method_ptr_addr (guint8* code, mgreg_t *regs)
4520 mono_arch_finish_init (void)
4525 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4529 #ifdef MONO_ARCH_HAVE_IMT
4532 * LOCKING: called with the domain lock held
4535 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4536 gpointer fail_tramp)
4540 guint8 *start, *buf;
4541 Ia64CodegenState code;
4544 buf = g_malloc0 (size);
4545 ia64_codegen_init (code, buf);
4547 /* IA64_R9 contains the IMT method */
4549 for (i = 0; i < count; ++i) {
4550 MonoIMTCheckItem *item = imt_entries [i];
4551 ia64_begin_bundle (code);
4552 item->code_target = (guint8*)code.buf + code.nins;
4553 if (item->is_equals) {
4554 gboolean fail_case = !item->check_target_idx && fail_tramp;
4556 if (item->check_target_idx || fail_case) {
4557 if (!item->compare_done || fail_case) {
4558 ia64_movl (code, GP_SCRATCH_REG, item->key);
4559 ia64_cmp_eq (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
4561 item->jmp_code = (guint8*)code.buf + code.nins;
4562 ia64_br_cond_pred (code, 7, 0);
4564 if (item->has_target_code) {
4565 ia64_movl (code, GP_SCRATCH_REG, item->value.target_code);
4567 ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
4568 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
4570 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4571 ia64_br_cond_reg (code, IA64_B6);
4574 ia64_begin_bundle (code);
4575 ia64_patch (item->jmp_code, (guint8*)code.buf + code.nins);
4576 ia64_movl (code, GP_SCRATCH_REG, fail_tramp);
4577 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4578 ia64_br_cond_reg (code, IA64_B6);
4579 item->jmp_code = NULL;
4582 /* enable the commented code to assert on wrong method */
4583 #if ENABLE_WRONG_METHOD_CHECK
4584 g_assert_not_reached ();
4586 ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
4587 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
4588 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4589 ia64_br_cond_reg (code, IA64_B6);
4590 #if ENABLE_WRONG_METHOD_CHECK
4591 g_assert_not_reached ();
4595 ia64_movl (code, GP_SCRATCH_REG, item->key);
4596 ia64_cmp_geu (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
4597 item->jmp_code = (guint8*)code.buf + code.nins;
4598 ia64_br_cond_pred (code, 6, 0);
4601 /* patch the branches to get to the target items */
4602 for (i = 0; i < count; ++i) {
4603 MonoIMTCheckItem *item = imt_entries [i];
4604 if (item->jmp_code) {
4605 if (item->check_target_idx) {
4606 ia64_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4611 ia64_codegen_close (code);
4612 g_assert (code.buf - buf <= size);
4614 size = code.buf - buf;
4616 start = mono_method_alloc_generic_virtual_thunk (domain, size + 16);
4617 start = (gpointer)ALIGN_TO (start, 16);
4619 start = mono_domain_code_reserve (domain, size);
4621 memcpy (start, buf, size);
4623 mono_arch_flush_icache (start, size);
4625 mono_stats.imt_thunks_size += size;
4631 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
4633 return (MonoMethod*)regs [IA64_R9];
4637 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4639 /* Done by the implementation of the CALL_MEMBASE opcodes */
4644 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
4646 return (gpointer)regs [IA64_R10];
4650 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
4656 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4658 MonoInst *ins = NULL;
4660 if (cmethod->klass->image == mono_defaults.corlib &&
4661 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4662 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4665 * We don't use the generic version in mini_emit_inst_for_method () since we
4666 * ia64 has atomic_add_imm opcodes.
4668 if (strcmp (cmethod->name, "Increment") == 0) {
4671 if (fsig->params [0]->type == MONO_TYPE_I4)
4672 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4673 else if (fsig->params [0]->type == MONO_TYPE_I8)
4674 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4676 g_assert_not_reached ();
4677 MONO_INST_NEW (cfg, ins, opcode);
4678 ins->dreg = mono_alloc_preg (cfg);
4680 ins->inst_basereg = args [0]->dreg;
4681 ins->inst_offset = 0;
4682 MONO_ADD_INS (cfg->cbb, ins);
4683 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4686 if (fsig->params [0]->type == MONO_TYPE_I4)
4687 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4688 else if (fsig->params [0]->type == MONO_TYPE_I8)
4689 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4691 g_assert_not_reached ();
4692 MONO_INST_NEW (cfg, ins, opcode);
4693 ins->dreg = mono_alloc_preg (cfg);
4695 ins->inst_basereg = args [0]->dreg;
4696 ins->inst_offset = 0;
4697 MONO_ADD_INS (cfg->cbb, ins);
4698 } else if (strcmp (cmethod->name, "Add") == 0) {
4700 gboolean is_imm = FALSE;
4703 if ((args [1]->opcode == OP_ICONST) || (args [1]->opcode == OP_I8CONST)) {
4704 imm = (args [1]->opcode == OP_ICONST) ? args [1]->inst_c0 : args [1]->inst_l;
4706 is_imm = (imm == 1 || imm == 4 || imm == 8 || imm == 16 || imm == -1 || imm == -4 || imm == -8 || imm == -16);
4710 if (fsig->params [0]->type == MONO_TYPE_I4)
4711 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4712 else if (fsig->params [0]->type == MONO_TYPE_I8)
4713 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4715 g_assert_not_reached ();
4717 MONO_INST_NEW (cfg, ins, opcode);
4718 ins->dreg = mono_alloc_ireg (cfg);
4719 ins->inst_basereg = args [0]->dreg;
4720 ins->inst_offset = 0;
4721 ins->inst_imm = imm;
4722 ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
4724 if (fsig->params [0]->type == MONO_TYPE_I4)
4725 opcode = OP_ATOMIC_ADD_NEW_I4;
4726 else if (fsig->params [0]->type == MONO_TYPE_I8)
4727 opcode = OP_ATOMIC_ADD_NEW_I8;
4729 g_assert_not_reached ();
4731 MONO_INST_NEW (cfg, ins, opcode);
4732 ins->dreg = mono_alloc_ireg (cfg);
4733 ins->inst_basereg = args [0]->dreg;
4734 ins->inst_offset = 0;
4735 ins->sreg2 = args [1]->dreg;
4736 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4738 MONO_ADD_INS (cfg->cbb, ins);
4746 mono_arch_print_tree (MonoInst *tree, int arity)
4752 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4754 return mono_get_domain_intrinsic (cfg);
4758 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4760 /* FIXME: implement */
4761 g_assert_not_reached ();