2 * mini-hppa.c: HPPA backend for the Mono code generator
4 * Copyright (c) 2007 Randolph Chung
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/debug-helpers.h>
35 #include <mono/metadata/tokentype.h>
36 #include <mono/utils/mono-math.h>
38 #include "mini-hppa.h"
43 #define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0)
44 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
45 #define SIGNAL_STACK_SIZE (64 * 1024)
48 #define DEBUG_FUNC_ENTER() // printf("Entering %s\n", __FUNCTION__)
49 #define DEBUG_FUNC_EXIT() // printf("Exiting %s\n", __FUNCTION__)
52 branch_b0_table [] = {
53 TRUE, /* OP_HPPA_BEQ */
54 FALSE, /* OP_HPPA_BGE */
55 FALSE, /* OP_HPPA_BGT */
56 TRUE, /* OP_HPPA_BLE */
57 TRUE, /* OP_HPPA_BLT */
58 FALSE, /* OP_HPPA_BNE */
59 FALSE, /* OP_HPPA_BGE_UN */
60 FALSE, /* OP_HPPA_BGT_UN */
61 TRUE, /* OP_HPPA_BLE_UN */
62 TRUE, /* OP_HPPA_BLT_UN */
66 branch_b1_table [] = {
67 HPPA_CMP_COND_EQ, /* OP_HPPA_BEQ */
68 HPPA_CMP_COND_SLT, /* OP_HPPA_BGE */
69 HPPA_CMP_COND_SLE, /* OP_HPPA_BGT */
70 HPPA_CMP_COND_SLE, /* OP_HPPA_BLE */
71 HPPA_CMP_COND_SLT, /* OP_HPPA_BLT */
72 HPPA_CMP_COND_EQ, /* OP_HPPA_BNE_UN */
73 HPPA_CMP_COND_ULT, /* OP_HPPA_BGE_UN */
74 HPPA_CMP_COND_ULE, /* OP_HPPA_BGT_UN */
75 HPPA_CMP_COND_ULE, /* OP_HPPA_BLE_UN */
76 HPPA_CMP_COND_ULT, /* OP_HPPA_BLT_UN */
79 /* Note that these are inverted from the OP_xxx, because we nullify
80 * the branch if the condition is met
83 float_branch_table [] = {
97 float_ceq_table [] = {
106 * Branches have short (14 or 17 bit) targets on HPPA. To make longer jumps,
107 * we will need to rely on stubs - basically we create stub structures in
108 * the epilogue that uses a long branch to the destination, and any short
109 * jumps inside a method that cannot reach the destination directly will
110 * branch first to the stub.
112 typedef struct MonoOvfJump {
115 const char *exception;
120 /* Create a literal 0.0 double for FNEG */
121 double hppa_zero = 0;
124 mono_arch_regname (int reg)
126 static const char * rnames[] = {
127 "hppa_r0", "hppa_r1", "hppa_rp", "hppa_r3", "hppa_r4",
128 "hppa_r5", "hppa_r6", "hppa_r7", "hppa_r8", "hppa_r9",
129 "hppa_r10", "hppa_r11", "hppa_r12", "hppa_r13", "hppa_r14",
130 "hppa_r15", "hppa_r16", "hppa_r17", "hppa_r18", "hppa_r19",
131 "hppa_r20", "hppa_r21", "hppa_r22", "hppa_r23", "hppa_r24",
132 "hppa_r25", "hppa_r26", "hppa_r27", "hppa_r28", "hppa_r29",
133 "hppa_sp", "hppa_r31"
135 if (reg >= 0 && reg < MONO_MAX_IREGS)
141 mono_arch_fregname (int reg)
143 static const char *rnames [] = {
144 "hppa_fr0", "hppa_fr1", "hppa_fr2", "hppa_fr3", "hppa_fr4",
145 "hppa_fr5", "hppa_fr6", "hppa_fr7", "hppa_fr8", "hppa_fr9",
146 "hppa_fr10", "hppa_fr11", "hppa_fr12", "hppa_fr13", "hppa_fr14",
147 "hppa_fr15", "hppa_fr16", "hppa_fr17", "hppa_fr18", "hppa_fr19",
148 "hppa_fr20", "hppa_fr21", "hppa_fr22", "hppa_fr23", "hppa_fr24",
149 "hppa_fr25", "hppa_fr26", "hppa_fr27", "hppa_fr28", "hppa_fr29",
150 "hppa_fr30", "hppa_fr31",
153 if (reg >= 0 && reg < MONO_MAX_FREGS)
160 * Initialize the cpu to execute managed code.
163 mono_arch_cpu_init (void)
166 mono_arch_cpu_optimizazions(&dummy);
170 * Initialize architecture specific code.
173 mono_arch_init (void)
178 * Cleanup architecture specific code.
181 mono_arch_cleanup (void)
186 * This function returns the optimizations supported on this cpu.
189 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
197 mono_arch_flush_icache (guint8 *code, gint size)
199 guint8* p = (guint8*)((guint32)code & ~(0x3f));
200 guint8* end = (guint8*)((guint32)code + size);
202 __asm__ __volatile__ ("fdc %%r0(%%sr3, %0)\n"
204 "fic %%r0(%%sr3, %0)\n"
207 p += 32; /* can be 64 on pa20 cpus */
212 mono_arch_flush_register_windows (void)
214 /* No register windows on hppa */
243 #define ARGS_OFFSET 36
246 add_parameter (CallInfo *cinfo, ArgInfo *ainfo, MonoType *type)
248 int is_fp = (type->type == MONO_TYPE_R4 || type->type == MONO_TYPE_R8);
253 ainfo->size = mono_type_size (type, &align);
254 ainfo->type = type->type;
256 if (ainfo->size <= 4) {
257 cinfo->stack_usage += 4;
258 ainfo->offset = cinfo->stack_usage - (4 - ainfo->size);
260 else if (ainfo->size <= 8)
262 cinfo->stack_usage += 8;
263 cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, 8);
264 ainfo->offset = cinfo->stack_usage - (8 - ainfo->size);
268 cinfo->stack_usage += ainfo->size;
269 cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
270 ainfo->offset = cinfo->stack_usage;
273 ofs = (ALIGN_TO (ainfo->offset, 4) - ARGS_OFFSET) / 4;
274 if (ofs < PARAM_REGS) {
276 if (ainfo->size <= 4)
277 ainfo->storage = ArgInIReg;
279 ainfo->storage = ArgInIRegPair;
280 ainfo->reg = hppa_r26 - ofs;
281 } else if (type->type == MONO_TYPE_R4) {
282 ainfo->storage = ArgInFReg;
283 ainfo->reg = hppa_fr4 + ofs;
284 } else { /* type->type == MONO_TYPE_R8 */
285 ainfo->storage = ArgInDReg;
286 ainfo->reg = hppa_fr4 + ofs;
290 /* frame pointer based offset */
291 ainfo->reg = hppa_r3;
292 ainfo->storage = ArgOnStack;
295 /* All offsets are negative relative to the frame pointer */
296 ainfo->offset = -ainfo->offset;
302 analyze_return (CallInfo *cinfo, MonoMethodSignature *sig)
309 size = mono_type_size (type, &align);
311 /* ref: mono_type_to_stind */
312 cinfo->ret.type = type->type;
314 cinfo->ret.storage = ArgInIReg;
315 cinfo->ret.reg = hppa_r28;
318 switch (type->type) {
321 case MONO_TYPE_BOOLEAN:
332 case MONO_TYPE_FNPTR:
333 case MONO_TYPE_CLASS:
334 case MONO_TYPE_STRING:
335 case MONO_TYPE_OBJECT:
336 case MONO_TYPE_SZARRAY:
337 case MONO_TYPE_ARRAY:
338 cinfo->ret.storage = ArgInIReg;
339 cinfo->ret.reg = hppa_r28;
343 cinfo->ret.storage = ArgInIRegPair;
344 cinfo->ret.reg = hppa_r28;
347 cinfo->ret.storage = ArgInFReg;
348 cinfo->ret.reg = hppa_fr4;
351 cinfo->ret.storage = ArgInDReg;
352 cinfo->ret.reg = hppa_fr4;
354 case MONO_TYPE_GENERICINST:
355 type = &type->data.generic_class->container_class->byval_arg;
358 case MONO_TYPE_VALUETYPE:
359 if (type->data.klass->enumtype) {
360 type = type->data.klass->enum_basetype;
364 case MONO_TYPE_TYPEDBYREF:
365 cinfo->struct_return = 1;
366 /* cinfo->ret.storage tells us how the ABI expects
367 * the parameter to be returned
370 cinfo->ret.storage = ArgInIReg;
371 cinfo->ret.reg = hppa_r28;
372 } else if (size <= 8) {
373 cinfo->ret.storage = ArgInIRegPair;
374 cinfo->ret.reg = hppa_r28;
376 cinfo->ret.storage = ArgOnStack;
377 cinfo->ret.reg = hppa_sp;
380 /* We always allocate stack space for this because the
381 * arch-indep code expects us to
383 cinfo->stack_usage += size;
384 cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
385 cinfo->ret.offset = -cinfo->stack_usage;
389 g_error ("Can't handle as return value 0x%x", sig->ret->type);
397 * Obtain information about a call according to the calling convention.
400 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
403 int n = sig->hasthis + sig->param_count;
409 ptrtype.type = MONO_TYPE_PTR;
412 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
414 /* The area below ARGS_OFFSET is the linkage area... */
415 cinfo->stack_usage = ARGS_OFFSET - 4;
416 /* -4, because the first argument will allocate the area it needs */
420 add_parameter (cinfo, cinfo->args + 0, &ptrtype);
421 DEBUG (printf ("param <this>: assigned to reg %s offset %d\n", mono_arch_regname (cinfo->args[0].reg), cinfo->args[0].offset));
424 /* TODO: What to do with varargs? */
426 for (i = 0; i < sig->param_count; ++i) {
427 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
428 if (sig->params [i]->byref)
431 type = mono_type_get_underlying_type (sig->params [i]);
432 add_parameter (cinfo, ainfo, type);
434 DEBUG (printf ("param %d: type %d size %d assigned to reg %s offset %d\n", i, type->type, mono_type_size (type, &dummy), mono_arch_regname (ainfo->reg), ainfo->offset));
437 analyze_return (cinfo, sig);
444 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
450 for (i = 0; i < cfg->num_varinfo; i++) {
451 MonoInst *ins = cfg->varinfo [i];
452 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
455 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
458 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
459 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
462 if (mono_is_regsize_var (ins->inst_vtype)) {
463 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
464 g_assert (i == vmv->idx);
465 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
474 mono_arch_get_global_int_regs (MonoCompile *cfg)
479 /* r3 is sometimes used as our frame pointer, so don't allocate it
480 * r19 is the GOT pointer, don't allocate it either
484 for (i = 4; i <= 18; i++)
485 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
492 * mono_arch_regalloc_cost:
494 * Return the cost, in number of memory references, of the action of
495 * allocating the variable VMV into a register during global register
499 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
506 * Set var information according to the calling convention.
507 * The locals var stuff should most likely be split in another method.
509 * updates m->stack_offset based on the amount of stack space needed for
513 mono_arch_allocate_vars (MonoCompile *m)
515 MonoMethodSignature *sig;
516 MonoMethodHeader *header;
518 int i, offset, size, align, curinst;
524 m->flags |= MONO_CFG_HAS_SPILLUP;
526 header = mono_method_get_header (m->method);
528 sig = mono_method_signature (m->method);
529 DEBUG (printf ("Allocating locals - incoming params:\n"));
530 cinfo = get_call_info (sig, FALSE);
533 * We use the ABI calling conventions for managed code as well.
535 if (m->flags & MONO_CFG_HAS_ALLOCA) {
537 m->used_int_regs |= 1 << hppa_r4;
542 /* Before this function is called, we would have looked at all
543 * calls from this method and figured out how much space is needed
544 * for the param area.
546 * Locals are allocated backwards, right before the param area
548 /* TODO: in some cases we don't need the frame pointer... */
549 m->frame_reg = hppa_r3;
550 offset = m->param_area;
552 /* Return values can be passed back either in four ways:
553 * r28 is used for data <= 4 bytes (32-bit ABI)
554 * r28/r29 are used for data >4 && <= 8 bytes
555 * fr4 is used for floating point data
556 * data larger than 8 bytes is returned on the stack pointed to
559 * This code needs to be in sync with how CEE_RET is handled
560 * in mono_method_to_ir (). In some cases when we return small
561 * structs, the ABI specifies that they should be returned in
562 * registers, but the code in mono_method_to_ir () always emits
563 * a memcpy for valuetype returns, so we need to make sure we
564 * allocate space on the stack for this copy.
566 if (cinfo->struct_return) {
567 /* this is used to stash the incoming r28 pointer */
568 offset += sizeof (gpointer);
569 m->ret->opcode = OP_REGOFFSET;
570 m->ret->inst_basereg = stack_ptr;
571 m->ret->inst_offset = -offset;
572 } else if (sig->ret->type != MONO_TYPE_VOID) {
573 m->ret->opcode = OP_REGVAR;
574 m->ret->inst_c0 = cinfo->ret.reg;
577 curinst = m->locals_start;
578 for (i = curinst; i < m->num_varinfo; ++i) {
579 inst = m->varinfo [i];
581 if (inst->opcode == OP_REGVAR) {
582 DEBUG (printf ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg)));
586 if (inst->flags & MONO_INST_IS_DEAD)
589 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
590 * pinvoke wrappers when they call functions returning structure */
591 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
592 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
594 size = mini_type_stack_size (cfg->generic_sharing_context, inst->inst_vtype, &align);
597 * This is needed since structures containing doubles must be doubleword
599 * FIXME: Do this only if needed.
601 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
605 * variables are accessed as negative offsets from hppa_sp
607 inst->opcode = OP_REGOFFSET;
608 inst->inst_basereg = stack_ptr;
610 offset = ALIGN_TO (offset, align);
611 inst->inst_offset = -offset;
613 DEBUG (printf ("allocating local %d (size = %d) to [%s - %d]\n", i, size, mono_arch_regname (inst->inst_basereg), -inst->inst_offset));
616 if (sig->call_convention == MONO_CALL_VARARG) {
620 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
621 ArgInfo *ainfo = &cinfo->args [i];
623 if (inst->opcode != OP_REGVAR) {
624 switch (ainfo->storage) {
629 /* Currently mono requests all incoming registers
630 * be assigned to a stack location :-(
633 if (!(inst->flags & (MONO_INST_VOLATILE | MONO_INST_INDIRECT))) {
634 inst->opcode = OP_REGVAR;
635 inst->dreg = ainfo->reg;
636 DEBUG (printf ("param %d in register %s\n", i, mono_arch_regname (inst->dreg)));
642 inst->opcode = OP_REGOFFSET;
643 inst->inst_basereg = hppa_r3;
644 inst->inst_offset = ainfo->offset;
645 DEBUG (printf ("param %d stored on stack [%s - %d]\n", i, mono_arch_regname (hppa_r3), -inst->inst_offset));
651 m->stack_offset = offset; /* Includes cfg->param_area */
658 * take the arguments and generate the arch-specific
659 * instructions to properly call the function in call.
660 * This includes pushing, moving arguments to the right register
663 * sets call->stack_usage and cfg->param_area
666 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual)
669 MonoMethodSignature *sig;
675 DEBUG (printf ("is_virtual = %d\n", is_virtual));
677 sig = call->signature;
678 n = sig->param_count + sig->hasthis;
680 DEBUG (printf ("Calling method with %d parameters\n", n));
682 cinfo = get_call_info (sig, sig->pinvoke);
685 g_assert (sig->call_convention != MONO_CALL_VARARG);
687 for (i = 0; i < n; ++i) {
688 ainfo = &cinfo->args [i];
690 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
694 if (is_virtual && i == 0) {
695 /* the argument will be attached to the call instruction */
697 call->used_iregs |= 1 << ainfo->reg;
699 MONO_INST_NEW (cfg, arg, OP_OUTARG);
701 arg->cil_code = in->cil_code;
703 arg->inst_call = call;
704 arg->type = in->type;
706 /* prepend, we'll need to reverse them later */
707 arg->next = call->out_args;
708 call->out_args = arg;
710 switch (ainfo->storage) {
712 case ArgInIRegPair: {
713 MonoHPPAArgInfo *ai = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoHPPAArgInfo));
714 ai->reg = ainfo->reg;
715 ai->size = ainfo->size;
716 ai->offset = ainfo->offset;
718 arg->backend.data = ai;
720 call->used_iregs |= 1 << ainfo->reg;
721 if (ainfo->storage == ArgInIRegPair)
722 call->used_iregs |= 1 << (ainfo->reg + 1);
723 if (ainfo->type == MONO_TYPE_VALUETYPE)
724 arg->opcode = OP_OUTARG_VT;
728 MonoHPPAArgInfo *ai = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoHPPAArgInfo));
730 ai->size = ainfo->size;
731 ai->offset = ainfo->offset;
733 arg->backend.data = ai;
734 if (ainfo->type == MONO_TYPE_VALUETYPE)
735 arg->opcode = OP_OUTARG_VT;
737 arg->opcode = OP_OUTARG_MEMBASE;
738 call->used_iregs |= 1 << ainfo->reg;
742 arg->backend.reg3 = ainfo->reg;
743 arg->opcode = OP_OUTARG_R4;
744 call->used_fregs |= 1 << ainfo->reg;
747 arg->backend.reg3 = ainfo->reg;
748 arg->opcode = OP_OUTARG_R8;
749 call->used_fregs |= 1 << ainfo->reg;
758 * Reverse the call->out_args list.
761 MonoInst *prev = NULL, *list = call->out_args, *next;
768 call->out_args = prev;
770 call->stack_usage = cinfo->stack_usage;
771 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
772 cfg->param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
774 cfg->flags |= MONO_CFG_HAS_CALLS;
783 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
790 insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
794 bb->code = to_insert;
795 to_insert->next = ins;
797 to_insert->next = ins->next;
798 ins->next = to_insert;
802 #define NEW_INS(cfg,dest,op) do { \
803 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
804 (dest)->opcode = (op); \
805 insert_after_ins (bb, last_ins, (dest)); \
809 map_to_reg_reg_op (int op)
834 case OP_LOAD_MEMBASE:
835 return OP_LOAD_MEMINDEX;
836 case OP_LOADI4_MEMBASE:
837 return OP_LOADI4_MEMINDEX;
838 case OP_LOADU4_MEMBASE:
839 return OP_LOADU4_MEMINDEX;
840 case OP_LOADU1_MEMBASE:
841 return OP_LOADU1_MEMINDEX;
842 case OP_LOADI2_MEMBASE:
843 return OP_LOADI2_MEMINDEX;
844 case OP_LOADU2_MEMBASE:
845 return OP_LOADU2_MEMINDEX;
846 case OP_LOADI1_MEMBASE:
847 return OP_LOADI1_MEMINDEX;
848 case OP_LOADR4_MEMBASE:
849 return OP_LOADR4_MEMINDEX;
850 case OP_LOADR8_MEMBASE:
851 return OP_LOADR8_MEMINDEX;
852 case OP_STOREI1_MEMBASE_REG:
853 return OP_STOREI1_MEMINDEX;
854 case OP_STOREI2_MEMBASE_REG:
855 return OP_STOREI2_MEMINDEX;
856 case OP_STOREI4_MEMBASE_REG:
857 return OP_STOREI4_MEMINDEX;
858 case OP_STORE_MEMBASE_REG:
859 return OP_STORE_MEMINDEX;
860 case OP_STORER4_MEMBASE_REG:
861 return OP_STORER4_MEMINDEX;
862 case OP_STORER8_MEMBASE_REG:
863 return OP_STORER8_MEMINDEX;
864 case OP_STORE_MEMBASE_IMM:
865 return OP_STORE_MEMBASE_REG;
866 case OP_STOREI1_MEMBASE_IMM:
867 return OP_STOREI1_MEMBASE_REG;
868 case OP_STOREI2_MEMBASE_IMM:
869 return OP_STOREI2_MEMBASE_REG;
870 case OP_STOREI4_MEMBASE_IMM:
871 return OP_STOREI4_MEMBASE_REG;
873 g_assert_not_reached ();
877 * Remove from the instruction list the instructions that can't be
878 * represented with very simple instructions with no register
882 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
884 MonoInst *ins, *next, *temp, *last_ins = NULL;
887 /* setup the virtual reg allocator */
888 if (bb->max_vreg > cfg->rs->next_vreg)
889 cfg->rs->next_vreg = bb->max_vreg;
894 switch (ins->opcode) {
897 if (!hppa_check_bits (ins->inst_imm, 11)) {
898 NEW_INS (cfg, temp, OP_ICONST);
899 temp->inst_c0 = ins->inst_imm;
900 temp->dreg = mono_regstate_next_int (cfg->rs);
901 ins->sreg2 = temp->dreg;
902 ins->opcode = map_to_reg_reg_op (ins->opcode);
907 if (!hppa_check_bits (ins->inst_imm, 11)) {
908 NEW_INS (cfg, temp, OP_ICONST);
909 temp->inst_c0 = ins->inst_imm;
910 temp->dreg = mono_regstate_next_int (cfg->rs);
911 ins->sreg2 = temp->dreg;
912 ins->opcode = map_to_reg_reg_op (ins->opcode);
917 if (ins->inst_imm == 1) {
918 ins->opcode = OP_MOVE;
921 if (ins->inst_imm == 0) {
922 ins->opcode = OP_ICONST;
926 imm = mono_is_power_of_two (ins->inst_imm);
928 ins->opcode = OP_SHL_IMM;
933 int tmp = mono_regstate_next_int (cfg->rs);
934 NEW_INS (cfg, temp, OP_ICONST);
935 temp->inst_c0 = ins->inst_c0;
938 ins->opcode = CEE_MUL;
940 /* Need to rewrite the CEE_MUL too... */
946 int freg1 = mono_regstate_next_float (cfg->rs);
947 int freg2 = mono_regstate_next_float (cfg->rs);
949 NEW_INS(cfg, temp, OP_STORE_MEMBASE_REG);
950 temp->sreg1 = ins->sreg1;
951 temp->inst_destbasereg = hppa_sp;
952 temp->inst_offset = -16;
954 NEW_INS(cfg, temp, OP_LOADR4_MEMBASE);
956 temp->inst_basereg = hppa_sp;
957 temp->inst_offset = -16;
959 NEW_INS(cfg, temp, OP_STORE_MEMBASE_REG);
960 temp->sreg1 = ins->sreg2;
961 temp->inst_destbasereg = hppa_sp;
962 temp->inst_offset = -16;
964 NEW_INS(cfg, temp, OP_LOADR4_MEMBASE);
966 temp->inst_basereg = hppa_sp;
967 temp->inst_offset = -16;
969 NEW_INS (cfg, temp, OP_HPPA_XMPYU);
974 NEW_INS(cfg, temp, OP_HPPA_STORER4_RIGHT);
976 temp->inst_destbasereg = hppa_sp;
977 temp->inst_offset = -16;
979 ins->opcode = OP_LOAD_MEMBASE;
980 ins->inst_basereg = hppa_sp;
981 ins->inst_offset = -16;
991 bb->last_ins = last_ins;
992 bb->max_vreg = cfg->rs->next_vreg;
997 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
1002 mono_arch_lowering_pass (cfg, bb);
1003 mono_local_regalloc (cfg, bb);
1008 hppa_patch (guint32 *code, const gpointer target)
1010 guint32 ins = *code;
1011 gint32 val = (gint32)target;
1012 gint32 disp = (val - (gint32)code - 8) >> 2;
1015 DEBUG (printf ("patching 0x%08x (0x%08x) to point to 0x%08x (disp = %d)\n", code, ins, val, disp));
1017 switch (*code >> 26) {
1018 case 0x08: /* ldil, next insn can be a ldo, ldw, or ble */
1019 *code = *code & ~0x1fffff;
1020 *code = *code | hppa_op_imm21 (hppa_lsel (val));
1023 if ((*code >> 26) == 0x0D) { /* ldo */
1024 *code = *code & ~0x3fff;
1025 *code = *code | hppa_op_imm14 (hppa_rsel (val));
1026 } else if ((*code >> 26) == 0x12) { /* ldw */
1027 *code = *code & ~0x3fff;
1028 *code = *code | hppa_op_imm14 (hppa_rsel (val));
1029 } else if ((*code >> 26) == 0x39) { /* ble */
1030 *code = *code & ~0x1f1ffd;
1031 *code = *code | hppa_op_imm17 (hppa_rsel (val));
1041 if (!hppa_check_bits (disp, 17))
1043 reg1 = (*code >> 21) & 0x1f;
1044 *code = (*code & ~0x1f1ffd) | hppa_op_imm17(disp);
1047 case 0x20: /* combt */
1048 case 0x22: /* combf */
1049 if (!hppa_check_bits (disp >> 2, 12))
1051 *code = (*code & ~0x1ffd) | hppa_op_imm12(disp);
1055 g_warning ("Unpatched opcode %x\n", *code >> 26);
1061 g_warning ("cannot branch to target, insn is %08x, displacement is %d\n", (int)*code, (int)disp);
1062 g_assert_not_reached ();
1066 emit_float_to_int (MonoCompile *cfg, guint32 *code, int dreg, int sreg, int size, gboolean is_signed)
1068 /* sreg is a float, dreg is an integer reg. */
1069 hppa_fcnvfxt (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, sreg, sreg);
1070 hppa_fstws (code, sreg, 0, -16, hppa_sp);
1071 hppa_ldw (code, -16, hppa_sp, dreg);
1074 hppa_extru (code, dreg, 31, 8, dreg);
1076 hppa_extru (code, dreg, 31, 16, dreg);
1079 hppa_extrs (code, dreg, 31, 8, dreg);
1081 hppa_extrs (code, dreg, 31, 16, dreg);
1086 /* Clobbers r1, r20, r21 */
1088 emit_memcpy (guint32 *code, int doff, int dreg, int soff, int sreg, int size)
1090 /* r20 is the destination */
1091 hppa_set (code, doff, hppa_r20);
1092 hppa_add (code, hppa_r20, dreg, hppa_r20);
1094 /* r21 is the source */
1095 hppa_set (code, soff, hppa_r21);
1096 hppa_add (code, hppa_r21, sreg, hppa_r21);
1099 hppa_ldw (code, 0, hppa_r21, hppa_r1);
1100 hppa_stw (code, hppa_r1, 0, hppa_r20);
1101 hppa_ldo (code, 4, hppa_r21, hppa_r21);
1102 hppa_ldo (code, 4, hppa_r20, hppa_r20);
1106 hppa_ldh (code, 0, hppa_r21, hppa_r1);
1107 hppa_sth (code, hppa_r1, 0, hppa_r20);
1108 hppa_ldo (code, 2, hppa_r21, hppa_r21);
1109 hppa_ldo (code, 2, hppa_r20, hppa_r20);
1113 hppa_ldb (code, 0, hppa_r21, hppa_r1);
1114 hppa_stb (code, hppa_r1, 0, hppa_r20);
1115 hppa_ldo (code, 1, hppa_r21, hppa_r21);
1116 hppa_ldo (code, 1, hppa_r20, hppa_r20);
1124 * mono_arch_get_vcall_slot_addr:
1126 * Determine the vtable slot used by a virtual call.
1129 mono_arch_get_vcall_slot_addr (guint8 *code8, gpointer *regs)
1131 guint32 *code = (guint32*)((unsigned long)code8 & ~3);
1136 /* This is the special virtual call token */
1137 if (code [-1] != 0x34000eee) /* ldo 0x777(r0),r0 */
1140 if ((code [0] >> 26) == 0x39 && /* ble */
1141 (code [-2] >> 26) == 0x12) { /* ldw */
1142 guint32 ldw = code [-2];
1143 guint32 reg = (ldw >> 21) & 0x1f;
1144 gint32 disp = ((ldw & 1) ? (-1 << 13) : 0) | ((ldw & 0x3fff) >> 1);
1145 /* FIXME: we are not guaranteed that reg is saved in the LMF.
1146 * In fact, it probably isn't, since it is allocated as a
1147 * callee register. Right now just return an address; this
1148 * is sufficient for non-AOT operation
1150 // return (gpointer)((guint8*)regs [reg] + disp);
1154 g_assert_not_reached ();
1159 /* ins->dreg = *(ins->inst_desgbasereg + ins->inst_offset) */
1160 #define EMIT_LOAD_MEMBASE(ins, op) do { \
1161 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1162 hppa_set (code, ins->inst_offset, hppa_r1); \
1163 hppa_ ## op ## x (code, hppa_r1, ins->inst_basereg, ins->dreg); \
1166 hppa_ ## op (code, ins->inst_offset, ins->inst_basereg, ins->dreg); \
1170 #define EMIT_COND_BRANCH_FLAGS(ins,r1,r2,b0,b1) do {\
1171 if (ins->flags & MONO_INST_BRLABEL) { \
1172 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1174 hppa_combt (code, r1, r2, b1, 0); \
1176 hppa_combf (code, r1, r2, b1, 0); \
1179 hppa_combf (code, r1, r2, b1, 2); \
1181 hppa_combt (code, r1, r2, b1, 2); \
1183 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1184 hppa_bl (code, 0, hppa_r0); \
1189 #define EMIT_COND_BRANCH(ins,r1,r2,cond) EMIT_COND_BRANCH_FLAGS(ins, r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)])
1191 #define EMIT_FLOAT_COND_BRANCH_FLAGS(ins,r1,r2,b0) do {\
1192 hppa_fcmp (code, HPPA_FP_FMT_DBL, b0, r1, r2); \
1193 hppa_ftest (code, 0); \
1194 if (ins->flags & MONO_INST_BRLABEL) \
1195 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1197 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1198 hppa_bl (code, 8, hppa_r0); \
1202 #define EMIT_FLOAT_COND_BRANCH(ins,r1,r2,cond) EMIT_FLOAT_COND_BRANCH_FLAGS(ins, r1, r2, float_branch_table [cond])
1204 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1,r2,b0,b1,exc_name) \
1206 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1207 ovfj->data.exception = (exc_name); \
1208 ovfj->ip_offset = (guint8*)code - cfg->native_code; \
1209 hppa_bl (code, 8, hppa_r2); \
1210 hppa_depi (code, 0, 31, 2, hppa_r2); \
1211 hppa_ldo (code, 8, hppa_r2, hppa_r2); \
1213 hppa_combf (code, r1, r2, b1, 2); \
1215 hppa_combt (code, r1, r2, b1, 2); \
1217 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1218 hppa_bl (code, 0, hppa_r0); \
1222 #define EMIT_COND_SYSTEM_EXCEPTION(r1,r2,cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1224 /* TODO: MEM_INDEX_REG - cannot be r1 */
1225 #define MEM_INDEX_REG hppa_r31
1226 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->inst_imm */
1227 #define EMIT_STORE_MEMBASE_IMM(ins, op) do { \
1229 if (ins->inst_imm == 0) \
1232 hppa_set (code, ins->inst_imm, hppa_r1); \
1235 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1236 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1237 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1238 hppa_ ## op (code, sreg, 0, MEM_INDEX_REG); \
1241 hppa_ ## op (code, sreg, ins->inst_offset, ins->inst_destbasereg); \
1245 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->sreg1 */
1246 #define EMIT_STORE_MEMBASE_REG(ins, op) do { \
1247 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1248 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1249 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1250 hppa_ ## op (code, ins->sreg1, 0, MEM_INDEX_REG); \
1253 hppa_ ## op (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); \
1258 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
1263 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
1264 MonoInst *last_ins = NULL;
1269 if (cfg->opt & MONO_OPT_PEEPHOLE)
1270 peephole_pass (cfg, bb);
1272 if (cfg->verbose_level > 2)
1273 g_print ("[%s::%s] Basic block %d starting at offset 0x%x\n", cfg->method->klass->name, cfg->method->name, bb->block_num, bb->native_offset);
1275 cpos = bb->max_offset;
1277 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
1285 offset = (guint8*)code - cfg->native_code;
1287 spec = ins_get_spec (ins->opcode);
1289 max_len = ((guint8 *)spec) [MONO_INST_LEN];
1291 if (offset > (cfg->code_size - max_len - 16)) {
1292 cfg->code_size *= 2;
1293 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1294 code = (guint32*)(cfg->native_code + offset);
1295 mono_jit_stats.code_reallocs++;
1297 code_start = (guint8*)code;
1298 // if (ins->cil_code)
1299 // g_print ("cil code\n");
1300 mono_debug_record_line_number (cfg, ins, offset);
1302 switch (ins->opcode) {
1303 case OP_STOREI1_MEMBASE_IMM:
1304 EMIT_STORE_MEMBASE_IMM (ins, stb);
1306 case OP_STOREI2_MEMBASE_IMM:
1307 EMIT_STORE_MEMBASE_IMM (ins, sth);
1309 case OP_STORE_MEMBASE_IMM:
1310 case OP_STOREI4_MEMBASE_IMM:
1311 EMIT_STORE_MEMBASE_IMM (ins, stw);
1313 case OP_STOREI1_MEMBASE_REG:
1314 EMIT_STORE_MEMBASE_REG (ins, stb);
1316 case OP_STOREI2_MEMBASE_REG:
1317 EMIT_STORE_MEMBASE_REG (ins, sth);
1319 case OP_STORE_MEMBASE_REG:
1320 case OP_STOREI4_MEMBASE_REG:
1321 EMIT_STORE_MEMBASE_REG (ins, stw);
1328 case OP_LOADU1_MEMBASE:
1329 EMIT_LOAD_MEMBASE (ins, ldb);
1331 case OP_LOADI1_MEMBASE:
1332 EMIT_LOAD_MEMBASE (ins, ldb);
1333 hppa_extrs (code, ins->dreg, 31, 8, ins->dreg);
1335 case OP_LOADU2_MEMBASE:
1336 EMIT_LOAD_MEMBASE (ins, ldh);
1338 case OP_LOADI2_MEMBASE:
1339 EMIT_LOAD_MEMBASE (ins, ldh);
1340 hppa_extrs (code, ins->dreg, 31, 16, ins->dreg);
1342 case OP_LOAD_MEMBASE:
1343 case OP_LOADI4_MEMBASE:
1344 case OP_LOADU4_MEMBASE:
1345 EMIT_LOAD_MEMBASE (ins, ldw);
1348 hppa_extrs (code, ins->sreg1, 31, 8, ins->dreg);
1351 hppa_extrs (code, ins->sreg1, 31, 16, ins->dreg);
1354 hppa_extru (code, ins->sreg1, 31, 8, ins->dreg);
1357 hppa_extru (code, ins->sreg1, 31, 16, ins->dreg);
1364 if (ins->sreg1 != ins->dreg)
1365 hppa_copy (code, ins->sreg1, ins->dreg);
1368 hppa_copy (code, ins->sreg1 + 1, ins->dreg);
1369 hppa_copy (code, ins->sreg1, ins->dreg + 1);
1373 /* break 4,8 - this is what gdb normally uses... */
1374 *code++ = 0x00010004;
1378 hppa_add (code, ins->sreg1, ins->sreg2, ins->dreg);
1381 hppa_addc (code, ins->sreg1, ins->sreg2, ins->dreg);
1385 hppa_addi (code, ins->inst_imm, ins->sreg1, ins->dreg);
1388 hppa_set (code, ins->inst_imm, hppa_r1);
1389 hppa_addc (code, ins->sreg1, hppa_r1, ins->dreg);
1391 case OP_HPPA_ADD_OVF: {
1392 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1393 hppa_bl (code, 8, hppa_r2);
1394 hppa_depi (code, 0, 31, 2, hppa_r2);
1395 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1397 if (ins->backend.reg3 == CEE_ADD_OVF)
1398 hppa_add_cond (code, HPPA_ADD_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1400 hppa_add_cond (code, HPPA_ADD_COND_NUV, ins->sreg1, ins->sreg2, ins->dreg);
1402 ovfj->data.exception = "OverflowException";
1403 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1404 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1405 hppa_bl_n (code, 8, hppa_r0);
1408 case OP_HPPA_ADDC_OVF: {
1409 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1410 hppa_bl (code, 8, hppa_r2);
1411 hppa_depi (code, 0, 31, 2, hppa_r2);
1412 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1414 if (ins->backend.reg3 == OP_LADD_OVF)
1415 hppa_addc_cond (code, HPPA_ADD_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1417 hppa_addc_cond (code, HPPA_ADD_COND_NUV, ins->sreg1, ins->sreg2, ins->dreg);
1419 ovfj->data.exception = "OverflowException";
1420 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1421 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1422 hppa_bl_n (code, 8, hppa_r0);
1427 hppa_sub (code, ins->sreg1, ins->sreg2, ins->dreg);
1431 hppa_addi (code, -ins->inst_imm, ins->sreg1, ins->dreg);
1434 hppa_subb (code, ins->sreg1, ins->sreg2, ins->dreg);
1437 hppa_set (code, ins->inst_imm, hppa_r1);
1438 hppa_subb (code, ins->sreg1, hppa_r1, ins->dreg);
1440 case OP_HPPA_SUB_OVF: {
1441 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1442 hppa_bl (code, 8, hppa_r2);
1443 hppa_depi (code, 0, 31, 2, hppa_r2);
1444 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1445 hppa_sub_cond (code, HPPA_SUB_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1446 ovfj->data.exception = "OverflowException";
1447 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1448 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1449 hppa_bl_n (code, 8, hppa_r0);
1452 case OP_HPPA_SUBB_OVF: {
1453 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1454 hppa_bl (code, 8, hppa_r2);
1455 hppa_depi (code, 0, 31, 2, hppa_r2);
1456 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1458 hppa_subb_cond (code, HPPA_SUB_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1459 ovfj->data.exception = "OverflowException";
1460 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1461 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1462 hppa_bl_n (code, 8, hppa_r0);
1467 hppa_and (code, ins->sreg1, ins->sreg2, ins->dreg);
1470 hppa_set (code, ins->inst_imm, hppa_r1);
1471 hppa_and (code, ins->sreg1, hppa_r1, ins->dreg);
1475 hppa_or (code, ins->sreg1, ins->sreg2, ins->dreg);
1479 hppa_set (code, ins->inst_imm, hppa_r1);
1480 hppa_or (code, ins->sreg1, hppa_r1, ins->dreg);
1484 hppa_xor (code, ins->sreg1, ins->sreg2, ins->dreg);
1487 hppa_set (code, ins->inst_imm, hppa_r1);
1488 hppa_xor (code, ins->sreg1, hppa_r1, ins->dreg);
1491 if (ins->sreg1 != ins->dreg) {
1492 hppa_shl (code, ins->sreg1, ins->sreg2, ins->dreg);
1495 hppa_copy (code, ins->sreg1, hppa_r1);
1496 hppa_shl (code, hppa_r1, ins->sreg2, ins->dreg);
1501 g_assert (ins->inst_imm < 32);
1502 if (ins->sreg1 != ins->dreg) {
1503 hppa_zdep (code, ins->sreg1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1506 hppa_copy (code, ins->sreg1, hppa_r1);
1507 hppa_zdep (code, hppa_r1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1511 if (ins->sreg1 != ins->dreg) {
1512 hppa_shr (code, ins->sreg1, ins->sreg2, ins->dreg);
1515 hppa_copy (code, ins->sreg1, hppa_r1);
1516 hppa_shr (code, hppa_r1, ins->sreg2, ins->dreg);
1520 g_assert (ins->inst_imm < 32);
1521 if (ins->sreg1 != ins->dreg) {
1522 hppa_extrs (code, ins->sreg1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1525 hppa_copy (code, ins->sreg1, hppa_r1);
1526 hppa_extrs (code, hppa_r1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1530 g_assert (ins->inst_imm < 32);
1531 if (ins->sreg1 != ins->dreg) {
1532 hppa_extru (code, ins->sreg1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1535 hppa_copy (code, ins->sreg1, hppa_r1);
1536 hppa_extru (code, hppa_r1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1540 if (ins->sreg1 != ins->dreg) {
1541 hppa_lshr (code, ins->sreg1, ins->sreg2, ins->dreg);
1544 hppa_copy (code, ins->sreg1, hppa_r1);
1545 hppa_lshr (code, hppa_r1, ins->sreg2, ins->dreg);
1549 hppa_not (code, ins->sreg1, ins->dreg);
1552 hppa_subi (code, 0, ins->sreg1, ins->dreg);
1557 /* Should have been rewritten using xmpyu */
1558 g_assert_not_reached ();
1561 if ((ins->inst_c0 > 0 && ins->inst_c0 >= (1 << 13)) ||
1562 (ins->inst_c0 < 0 && ins->inst_c0 < -(1 << 13))) {
1563 hppa_ldil (code, hppa_lsel (ins->inst_c0), ins->dreg);
1564 hppa_ldo (code, hppa_rsel (ins->inst_c0), ins->dreg, ins->dreg);
1566 hppa_ldo (code, ins->inst_c0, hppa_r0, ins->dreg);
1570 g_assert_not_reached ();
1572 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
1573 hppa_set_template (code, ins->dreg);
1575 g_warning ("unimplemented opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
1580 if (ins->sreg1 != ins->dreg)
1581 hppa_fcpy (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->dreg);
1584 case OP_HPPA_OUTARG_R4CONST:
1585 hppa_set (code, (unsigned int)ins->inst_p0, hppa_r1);
1586 hppa_fldwx (code, hppa_r0, hppa_r1, ins->dreg, 0);
1589 case OP_HPPA_OUTARG_REGOFFSET:
1590 hppa_ldo (code, ins->inst_offset, ins->inst_basereg, ins->dreg);
1595 * Keep in sync with mono_arch_emit_epilog
1597 g_assert (!cfg->method->save_lmf);
1598 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
1599 hppa_bl (code, 8, hppa_r0);
1602 /* ensure ins->sreg1 is not NULL */
1603 hppa_ldw (code, 0, ins->sreg1, hppa_r1);
1612 call = (MonoCallInst*)ins;
1613 if (ins->flags & MONO_INST_HAS_METHOD)
1614 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
1616 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
1617 hppa_ldil (code, 0, hppa_r1);
1618 hppa_ldo (code, 0, hppa_r1, hppa_r1);
1620 * We may have loaded an actual function address, or
1621 * it might be a plabel. Check to see if the plabel
1622 * bit is set, and load the actual fptr from it if
1625 hppa_bb_n (code, HPPA_BIT_COND_MSB_CLR, hppa_r1, 30, 2);
1626 hppa_depi (code, 0, 31, 2, hppa_r1);
1627 hppa_ldw (code, 4, hppa_r1, hppa_r19);
1628 hppa_ldw (code, 0, hppa_r1, hppa_r1);
1629 hppa_ble (code, 0, hppa_r1);
1630 hppa_copy (code, hppa_r31, hppa_r2);
1631 if (call->signature->ret->type == MONO_TYPE_R4)
1632 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr4, hppa_fr4);
1637 case OP_VOIDCALL_REG:
1639 call = (MonoCallInst*)ins;
1640 g_assert (!call->virtual);
1641 hppa_copy (code, ins->sreg1, hppa_r1);
1642 hppa_bb_n (code, HPPA_BIT_COND_MSB_CLR, hppa_r1, 30, 2);
1643 hppa_depi (code, 0, 31, 2, hppa_r1);
1644 hppa_ldw (code, 4, hppa_r1, hppa_r19);
1645 hppa_ldw (code, 0, hppa_r1, hppa_r1);
1646 hppa_ble (code, 0, hppa_r1);
1647 hppa_copy (code, hppa_r31, hppa_r2);
1648 if (call->signature->ret->type == MONO_TYPE_R4)
1649 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr4, hppa_fr4);
1651 case OP_FCALL_MEMBASE:
1652 case OP_LCALL_MEMBASE:
1653 case OP_VCALL_MEMBASE:
1654 case OP_VOIDCALL_MEMBASE:
1655 case OP_CALL_MEMBASE:
1656 call = (MonoCallInst*)ins;
1657 /* jump to ins->inst_sreg1 + ins->inst_offset */
1658 hppa_ldw (code, ins->inst_offset, ins->sreg1, hppa_r1);
1660 /* For virtual calls, emit a special token that can
1661 * be used by get_vcall_slot_addr
1664 hppa_ldo (code, 0x777, hppa_r0, hppa_r0);
1665 hppa_ble (code, 0, hppa_r1);
1666 hppa_copy (code, hppa_r31, hppa_r2);
1671 /* Keep alignment */
1672 hppa_ldo (code, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->sreg1, ins->dreg);
1673 hppa_depi (code, 0, 31, 6, ins->dreg);
1674 hppa_copy (code, hppa_sp, hppa_r1);
1675 hppa_addl (code, ins->dreg, hppa_sp, hppa_sp);
1676 hppa_copy (code, hppa_r1, ins->dreg);
1678 if (ins->flags & MONO_INST_INIT) {
1679 hppa_stw (code, hppa_r0, 0, hppa_r1);
1680 hppa_combt (code, hppa_r1, hppa_sp, HPPA_CMP_COND_ULT, -3);
1681 hppa_ldo (code, 4, hppa_r1, hppa_r1);
1687 hppa_copy (code, ins->sreg1, hppa_r26);
1688 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
1689 (gpointer)"mono_arch_throw_exception");
1690 hppa_ldil (code, 0, hppa_r1);
1691 hppa_ldo (code, 0, hppa_r1, hppa_r1);
1692 hppa_ble (code, 0, hppa_r1);
1693 hppa_copy (code, hppa_r31, hppa_r2);
1694 /* should never return */
1695 *code++ = 0xffeeddcc;
1698 hppa_copy (code, ins->sreg1, hppa_r26);
1699 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
1700 (gpointer)"mono_arch_rethrow_exception");
1701 hppa_ldil (code, 0, hppa_r1);
1702 hppa_ldo (code, 0, hppa_r1, hppa_r1);
1703 hppa_ble (code, 0, hppa_r1);
1704 hppa_copy (code, hppa_r31, hppa_r2);
1705 /* should never return */
1706 *code++ = 0xffeeddcc;
1708 case OP_START_HANDLER:
1709 if (hppa_check_bits (ins->inst_left->inst_offset, 14))
1710 hppa_stw (code, hppa_r2, ins->inst_left->inst_offset, ins->inst_left->inst_basereg);
1712 hppa_set (code, ins->inst_left->inst_offset, hppa_r1);
1713 hppa_addl (code, ins->inst_left->inst_basereg, hppa_r1, hppa_r1);
1714 hppa_stw (code, hppa_r2, 0, hppa_r1);
1718 if (ins->sreg1 != hppa_r26)
1719 hppa_copy (code, ins->sreg1, hppa_r26);
1720 if (hppa_check_bits (ins->inst_left->inst_offset, 14))
1721 hppa_ldw (code, ins->inst_left->inst_offset, ins->inst_left->inst_basereg, hppa_r2);
1723 hppa_set (code, ins->inst_left->inst_offset, hppa_r1);
1724 hppa_ldwx (code, hppa_r1, ins->inst_left->inst_basereg, hppa_r2);
1726 hppa_bv (code, hppa_r0, hppa_r2);
1730 if (hppa_check_bits (ins->inst_left->inst_offset, 14))
1731 hppa_ldw (code, ins->inst_left->inst_offset, ins->inst_left->inst_basereg, hppa_r1);
1733 hppa_set (code, ins->inst_left->inst_offset, hppa_r1);
1734 hppa_ldwx (code, hppa_r1, ins->inst_left->inst_basereg, hppa_r1);
1736 hppa_bv (code, hppa_r0, hppa_r1);
1739 case OP_CALL_HANDLER:
1740 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
1741 hppa_bl (code, 0, hppa_r2);
1745 ins->inst_c0 = (guint8*)code - cfg->native_code;
1749 DEBUG (printf ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins));
1750 if (ins->flags & MONO_INST_BRLABEL) {
1751 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
1753 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
1755 hppa_bl (code, 8, hppa_r0);
1756 /* TODO: if the branch is too long, we may need to
1757 * use a long-branch sequence:
1758 * hppa_ldil (code, 0, hppa_r1);
1759 * hppa_ldo (code, 0, hppa_r1, hppa_r1);
1760 * hppa_bv (code, hppa_r0, hppa_r1);
1766 hppa_bv (code, hppa_r0, ins->sreg1);
1773 max_len += 8 * GPOINTER_TO_INT (ins->klass);
1774 if (offset > (cfg->code_size - max_len - 16)) {
1775 cfg->code_size += max_len;
1776 cfg->code_size *= 2;
1777 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1778 code = cfg->native_code + offset;
1779 code_start = (guint8*)code;
1781 hppa_blr (code, ins->sreg1, hppa_r0);
1783 for (i = 0; i < GPOINTER_TO_INT (ins->klass); ++i) {
1784 *code++ = 0xdeadbeef;
1785 *code++ = 0xdeadbeef;
1790 /* comclr is cool :-) */
1792 hppa_comclr_cond (code, HPPA_SUB_COND_NE, ins->sreg1, ins->sreg2, ins->dreg);
1793 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1797 hppa_comclr_cond (code, HPPA_SUB_COND_SGE, ins->sreg1, ins->sreg2, ins->dreg);
1798 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1801 case OP_HPPA_CLT_UN:
1802 hppa_comclr_cond (code, HPPA_SUB_COND_UGE, ins->sreg1, ins->sreg2, ins->dreg);
1803 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1807 hppa_comclr_cond (code, HPPA_SUB_COND_SLE, ins->sreg1, ins->sreg2, ins->dreg);
1808 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1811 case OP_HPPA_CGT_UN:
1812 hppa_comclr_cond (code, HPPA_SUB_COND_ULE, ins->sreg1, ins->sreg2, ins->dreg);
1813 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1821 case OP_COND_EXC_EQ:
1822 case OP_COND_EXC_NE_UN:
1823 case OP_COND_EXC_LT:
1824 case OP_COND_EXC_LT_UN:
1825 case OP_COND_EXC_GT:
1826 case OP_COND_EXC_GT_UN:
1827 case OP_COND_EXC_GE:
1828 case OP_COND_EXC_GE_UN:
1829 case OP_COND_EXC_LE:
1830 case OP_COND_EXC_LE_UN:
1831 case OP_COND_EXC_OV:
1832 case OP_COND_EXC_NO:
1834 case OP_COND_EXC_NC:
1835 case OP_COND_EXC_IOV:
1836 case OP_COND_EXC_IC:
1850 case OP_COMPARE_IMM:
1851 case OP_ICOMPARE_IMM:
1852 g_warning ("got opcode %s in %s(), should be reduced\n", mono_inst_name (ins->opcode), __FUNCTION__);
1853 g_assert_not_reached ();
1859 case OP_HPPA_BLT_UN:
1861 case OP_HPPA_BGT_UN:
1863 case OP_HPPA_BGE_UN:
1865 case OP_HPPA_BLE_UN:
1866 EMIT_COND_BRANCH (ins, ins->sreg1, ins->sreg2, ins->opcode - OP_HPPA_BEQ);
1869 case OP_HPPA_COND_EXC_EQ:
1870 case OP_HPPA_COND_EXC_GE:
1871 case OP_HPPA_COND_EXC_GT:
1872 case OP_HPPA_COND_EXC_LE:
1873 case OP_HPPA_COND_EXC_LT:
1874 case OP_HPPA_COND_EXC_NE_UN:
1875 case OP_HPPA_COND_EXC_GE_UN:
1876 case OP_HPPA_COND_EXC_GT_UN:
1877 case OP_HPPA_COND_EXC_LE_UN:
1878 case OP_HPPA_COND_EXC_LT_UN:
1879 EMIT_COND_SYSTEM_EXCEPTION (ins->sreg1, ins->sreg2, ins->opcode - OP_HPPA_COND_EXC_EQ, ins->inst_p1);
1882 case OP_HPPA_COND_EXC_OV:
1883 case OP_HPPA_COND_EXC_NO:
1884 case OP_HPPA_COND_EXC_C:
1885 case OP_HPPA_COND_EXC_NC:
1888 /* floating point opcodes */
1890 hppa_set (code, (unsigned int)ins->inst_p0, hppa_r1);
1891 hppa_flddx (code, hppa_r0, hppa_r1, ins->dreg);
1894 hppa_set (code, (unsigned int)ins->inst_p0, hppa_r1);
1895 hppa_fldwx (code, hppa_r0, hppa_r1, hppa_fr31, 0);
1896 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr31, ins->dreg);
1898 case OP_STORER8_MEMBASE_REG:
1899 hppa_set (code, ins->inst_offset, hppa_r1);
1900 hppa_fstdx (code, ins->sreg1, hppa_r1, ins->inst_destbasereg);
1902 case OP_LOADR8_MEMBASE:
1903 hppa_set (code, ins->inst_offset, hppa_r1);
1904 hppa_flddx (code, hppa_r1, ins->inst_basereg, ins->dreg);
1906 case OP_STORER4_MEMBASE_REG:
1907 hppa_fcnvff (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, ins->sreg1, hppa_fr31);
1908 if (hppa_check_bits (ins->inst_offset, 5)) {
1909 hppa_fstws (code, hppa_fr31, 0, ins->inst_offset, ins->inst_destbasereg);
1911 hppa_set (code, ins->inst_offset, hppa_r1);
1912 hppa_fstwx (code, hppa_fr31, 0, hppa_r1, ins->inst_destbasereg);
1915 case OP_HPPA_STORER4_LEFT:
1916 case OP_HPPA_STORER4_RIGHT:
1917 if (hppa_check_bits (ins->inst_offset, 5)) {
1918 hppa_fstws (code, ins->sreg1, (ins->opcode == OP_HPPA_STORER4_RIGHT), ins->inst_offset, ins->inst_destbasereg);
1920 hppa_set (code, ins->inst_offset, hppa_r1);
1921 hppa_fstwx (code, ins->sreg1, (ins->opcode == OP_HPPA_STORER4_RIGHT), hppa_r1, ins->inst_destbasereg);
1924 case OP_LOADR4_MEMBASE:
1925 if (hppa_check_bits (ins->inst_offset, 5)) {
1926 hppa_fldws (code, ins->inst_offset, ins->inst_basereg, hppa_fr31, 0);
1928 hppa_set (code, ins->inst_offset, hppa_r1);
1929 hppa_fldwx (code, hppa_r1, ins->inst_basereg, hppa_fr31, 0);
1931 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr31, ins->dreg);
1933 case OP_HPPA_LOADR4_LEFT:
1934 case OP_HPPA_LOADR4_RIGHT:
1935 if (hppa_check_bits (ins->inst_offset, 5)) {
1936 hppa_fldws (code, ins->inst_offset, ins->inst_basereg, ins->dreg, (ins->opcode == OP_HPPA_LOADR4_RIGHT));
1938 hppa_set (code, ins->inst_offset, hppa_r1);
1939 hppa_fldwx (code, hppa_r1, ins->inst_basereg, ins->dreg, (ins->opcode == OP_HPPA_LOADR4_RIGHT));
1944 hppa_stw (code, ins->sreg1, -16, hppa_sp);
1945 hppa_fldws (code, -16, hppa_sp, hppa_fr31, 0);
1946 hppa_fcnvxf (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_SGL, hppa_fr31, ins->dreg);
1947 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, ins->dreg, ins->dreg);
1950 case OP_FCONV_TO_R4:
1951 /* reduce precision */
1952 hppa_fcnvff (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, ins->sreg1, ins->dreg);
1953 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, ins->dreg, ins->dreg);
1956 case OP_HPPA_SETF4REG:
1957 hppa_fcnvff (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, ins->sreg1, ins->dreg);
1960 hppa_stw (code, ins->sreg1, -16, hppa_sp);
1961 hppa_fldws (code, -16, hppa_sp, hppa_fr31, 0);
1962 hppa_fcnvxf (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr31, ins->dreg);
1965 case OP_FCONV_TO_I1:
1966 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
1968 case OP_FCONV_TO_U1:
1969 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
1971 case OP_FCONV_TO_I2:
1972 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
1974 case OP_FCONV_TO_U2:
1975 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
1977 case OP_FCONV_TO_I4:
1979 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
1981 case OP_FCONV_TO_U4:
1983 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
1986 case OP_FCONV_TO_I8:
1987 case OP_FCONV_TO_U8:
1988 g_assert_not_reached ();
1989 /* Implemented as helper calls */
1991 case OP_LCONV_TO_R_UN:
1992 g_assert_not_reached ();
1993 /* Implemented as helper calls */
1996 case OP_LCONV_TO_OVF_I:
2001 hppa_fadd (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
2004 hppa_fsub (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
2007 hppa_fmul (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
2010 hppa_fdiv (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
2017 g_assert_not_reached();
2025 hppa_fcmp (code, HPPA_FP_FMT_DBL, float_ceq_table [ins->opcode - OP_FCEQ], ins->sreg1, ins->sreg2);
2026 hppa_ftest (code, 0);
2027 hppa_bl (code, 12, hppa_r0);
2028 hppa_ldo (code, 1, hppa_r0, ins->dreg);
2029 hppa_ldo (code, 0, hppa_r0, ins->dreg);
2042 EMIT_FLOAT_COND_BRANCH (ins, ins->sreg1, ins->sreg2, ins->opcode - OP_FBEQ);
2046 case OP_MEMORY_BARRIER:
2050 hppa_xmpyu (code, ins->sreg1, ins->sreg2, ins->dreg);
2054 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
2055 g_assert_not_reached ();
2058 if ((((guint8*)code) - code_start) > max_len) {
2059 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
2060 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
2061 g_assert_not_reached ();
2071 cfg->code_len = (guint8*)code - cfg->native_code;
2076 mono_arch_register_lowlevel_calls (void)
2081 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
2083 MonoJumpInfo *patch_info;
2086 /* FIXME: Move part of this to arch independent code */
2087 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
2088 unsigned char *ip = patch_info->ip.i + code;
2091 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
2092 DEBUG (printf ("patch_info->type = %d, target = %p\n", patch_info->type, target));
2094 switch (patch_info->type) {
2095 case MONO_PATCH_INFO_NONE:
2096 case MONO_PATCH_INFO_BB_OVF:
2097 case MONO_PATCH_INFO_EXC_OVF:
2100 case MONO_PATCH_INFO_IP:
2101 hppa_patch ((guint32 *)ip, ip);
2104 case MONO_PATCH_INFO_CLASS_INIT: {
2107 case MONO_PATCH_INFO_METHOD_JUMP: {
2110 case MONO_PATCH_INFO_SWITCH: {
2112 gpointer *table = (gpointer *)target;
2114 for (i = 0; i < patch_info->data.table->table_size; i++) {
2115 DEBUG (printf ("Patching switch table, table[%d] = %p\n", i, table[i]));
2116 hppa_ldil (ip, hppa_lsel (table [i]), hppa_r1);
2117 hppa_be_n (ip, hppa_rsel (table [i]), hppa_r1);
2124 hppa_patch ((guint32 *)ip, target);
2131 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2133 guint32 *code = (guint32*)p;
2137 hppa_set (code, cfg->method, hppa_r26);
2138 hppa_copy (code, hppa_r0, hppa_r25); /* NULL sp for now */
2139 hppa_set (code, func, hppa_r1);
2140 hppa_depi (code, 0, 31, 2, hppa_r1);
2141 hppa_ldw (code, 0, hppa_r1, hppa_r1);
2142 hppa_ble (code, 0, hppa_r1);
2143 hppa_copy (code, hppa_r31, hppa_r2);
2158 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2160 guint32 *code = (guint32*)p;
2163 int save_mode = SAVE_NONE;
2164 MonoMethod *method = cfg->method;
2166 switch (mono_type_get_underlying_type (mono_method_signature (method)->ret)->type) {
2167 case MONO_TYPE_VOID:
2168 /* special case string .ctor icall */
2169 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2170 save_mode = SAVE_ONE;
2172 save_mode = SAVE_NONE;
2177 save_mode = SAVE_ONE;
2179 save_mode = SAVE_TWO;
2184 save_mode = SAVE_FP;
2186 case MONO_TYPE_VALUETYPE:
2187 save_mode = SAVE_STRUCT;
2190 save_mode = SAVE_ONE;
2194 /* Save the result to the stack and also put it into the output registers */
2196 switch (save_mode) {
2199 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
2200 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
2201 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
2202 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
2205 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
2206 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
2210 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
2212 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
2213 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
2214 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
2219 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
2221 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
2229 sparc_set (code, cfg->method, sparc_o0);
2231 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
2234 /* Restore result */
2236 switch (save_mode) {
2238 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
2239 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
2242 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
2245 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
2257 * The HPPA stack frame should look like this:
2259 * ---------------------
2260 * incoming params area
2261 * ---------------------
2262 * linkage area size = ARGS_OFFSET
2263 * --------------------- fp = psp
2264 * HPPA_STACK_LMF_OFFSET
2265 * ---------------------
2266 * MonoLMF structure or saved registers
2267 * -------------------
2268 * locals size = cfg->stack_offset - cfg->param_area
2269 * ---------------------
2270 * params area size = cfg->param_area - ARGS_OFFSET (aligned)
2271 * ---------------------
2272 * callee linkage area size = ARGS_OFFSET
2273 * --------------------- sp
2276 mono_arch_emit_prolog (MonoCompile *cfg)
2278 MonoMethod *method = cfg->method;
2280 MonoMethodSignature *sig;
2282 int alloc_size, pos, max_offset, i;
2289 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
2292 sig = mono_method_signature (method);
2293 cfg->code_size = 512 + sig->param_count * 20;
2294 code = cfg->native_code = g_malloc (cfg->code_size);
2296 /* TODO: enable tail call optimization */
2297 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
2298 hppa_stw (code, hppa_r2, -20, hppa_sp);
2302 pos = HPPA_STACK_LMF_OFFSET;
2304 /* figure out how much space we need for spilling */
2305 if (!method->save_lmf) {
2306 /* spill callee-save registers */
2307 guint32 mask = cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS;
2308 for (i = 0; i < 32; i++) {
2309 if ((1 << i) & mask)
2310 pos += sizeof (gulong);
2314 pos += sizeof (MonoLMF);
2317 alloc_size = ALIGN_TO (pos + cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
2318 g_assert ((alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) == 0);
2320 cfg->stack_usage = alloc_size;
2323 hppa_copy (code, hppa_r3, hppa_r1);
2324 hppa_copy (code, hppa_sp, hppa_r3);
2325 if (hppa_check_bits (alloc_size, 14))
2326 hppa_stwm (code, hppa_r1, alloc_size, hppa_sp);
2328 hppa_stwm (code, hppa_r1, 8100, hppa_sp);
2329 hppa_addil (code, hppa_lsel (alloc_size - 8100), hppa_sp);
2330 hppa_ldo (code, hppa_rsel (alloc_size - 8100), hppa_r1, hppa_sp);
2334 /* compute max_offset in order to use short forward jumps
2335 * we always do it on hppa because the immediate displacement
2336 * for jumps is small
2339 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2340 MonoInst *ins = bb->code;
2341 bb->max_offset = max_offset;
2343 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
2347 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2352 DEBUG (printf ("Incoming arguments: \n"));
2353 cinfo = get_call_info (sig, sig->pinvoke);
2355 /* We do this first so that we don't have to worry about the LMF-
2356 * saving code clobbering r28
2358 if (cinfo->struct_return)
2359 hppa_stw (code, hppa_r28, cfg->ret->inst_offset, hppa_sp);
2361 /* Save the LMF or the spilled registers */
2362 pos = HPPA_STACK_LMF_OFFSET;
2363 if (!method->save_lmf) {
2364 /* spill callee-save registers */
2365 guint32 mask = cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS;
2366 for (i = 0; i < 32; i++) {
2367 if ((1 << i) & mask) {
2369 hppa_ldw (code, 0, hppa_r3, hppa_r1);
2370 hppa_stw (code, hppa_r1, pos, hppa_r3);
2372 hppa_stw (code, i, pos, hppa_r3);
2373 pos += sizeof (gulong);
2377 int ofs = lmf_offset + G_STRUCT_OFFSET (MonoLMF, regs);
2380 hppa_ldw (code, 0, hppa_r3, hppa_r1);
2381 hppa_stw (code, hppa_r1, ofs, hppa_r3);
2382 ofs += sizeof (gulong);
2383 for (reg = 4; reg < 32; reg++) {
2384 if (HPPA_IS_SAVED_GREG (reg)) {
2385 hppa_stw (code, reg, ofs, hppa_r3);
2386 ofs += sizeof (gulong);
2389 /* We shouldn't need to save the FP regs.... */
2390 ofs = ALIGN_TO (ofs, sizeof(double));
2391 hppa_set (code, ofs, hppa_r1);
2392 for (reg = 0; reg < 32; reg++) {
2393 if (HPPA_IS_SAVED_FREG (reg)) {
2394 hppa_fstdx (code, reg, hppa_r1, hppa_r3);
2395 hppa_ldo (code, sizeof(double), hppa_r1, hppa_r1);
2399 /* We also spill the arguments onto the stack, because
2400 * the call to hppa_get_lmf_addr below can clobber them
2402 * This goes in the param area that is always allocated
2405 for (reg = hppa_r26; reg >= hppa_r23; reg--) {
2406 hppa_stw (code, reg, ofs, hppa_sp);
2411 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
2412 hppa_copy (code, hppa_r30, hppa_r4);
2414 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
2415 hppa_set (code, cfg->domain, hppa_r26);
2416 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
2417 hppa_ldil (code, 0, hppa_r1);
2418 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2419 hppa_depi (code, 0, 31, 2, hppa_r1);
2420 hppa_ldw (code, 0, hppa_r1, hppa_r1);
2421 hppa_ble (code, 0, hppa_r1);
2422 hppa_copy (code, hppa_r31, hppa_r2);
2425 if (method->save_lmf) {
2426 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2427 (gpointer)"mono_get_lmf_addr");
2428 hppa_ldil (code, 0, hppa_r1);
2429 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2430 hppa_depi (code, 0, 31, 2, hppa_r1);
2431 hppa_ldw (code, 0, hppa_r1, hppa_r1);
2432 hppa_ble (code, 0, hppa_r1);
2433 hppa_copy (code, hppa_r31, hppa_r2);
2435 /* lmf_offset is the offset from the previous stack pointer,
2436 * The pointer to the struct is put in hppa_r22 (new_lmf).
2437 * The callee-saved registers are already in the MonoLMF
2441 /* hppa_r22 = new_lmf (on the stack) */
2442 hppa_ldo (code, lmf_offset, hppa_r3, hppa_r22);
2443 /* lmf_offset is the offset from the previous stack pointer,
2445 hppa_stw (code, hppa_r28, G_STRUCT_OFFSET(MonoLMF, lmf_addr), hppa_r22);
2446 /* new_lmf->previous_lmf = *lmf_addr */
2447 hppa_ldw (code, 0, hppa_r28, hppa_r1);
2448 hppa_stw (code, hppa_r1, G_STRUCT_OFFSET(MonoLMF, previous_lmf), hppa_r22);
2449 /* *(lmf_addr) = r22 */
2450 hppa_stw (code, hppa_r22, 0, hppa_r28);
2451 hppa_set (code, method, hppa_r1);
2452 hppa_stw (code, hppa_r1, G_STRUCT_OFFSET(MonoLMF, method), hppa_r22);
2453 hppa_stw (code, hppa_sp, G_STRUCT_OFFSET(MonoLMF, ebp), hppa_r22);
2454 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
2455 hppa_ldil (code, 0, hppa_r1);
2456 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2457 hppa_stw (code, hppa_r1, G_STRUCT_OFFSET(MonoLMF, eip), hppa_r22);
2459 /* Now reload the arguments from the stack */
2460 hppa_ldw (code, -36, hppa_sp, hppa_r26);
2461 hppa_ldw (code, -40, hppa_sp, hppa_r25);
2462 hppa_ldw (code, -44, hppa_sp, hppa_r24);
2463 hppa_ldw (code, -48, hppa_sp, hppa_r23);
2466 /* load arguments allocated to register from the stack */
2469 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2470 ArgInfo *ainfo = cinfo->args + i;
2471 inst = cfg->args [pos];
2473 if (inst->opcode == OP_REGVAR) {
2474 /* Want the argument in a register */
2475 switch (ainfo->storage) {
2477 if (ainfo->reg != inst->dreg)
2478 hppa_copy (code, ainfo->reg, inst->dreg);
2479 DEBUG (printf ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg)));
2483 if (ainfo->reg != inst->dreg) {
2484 hppa_copy (code, ainfo->reg, inst->dreg);
2485 hppa_copy (code, ainfo->reg + 1, inst->dreg + 1);
2487 DEBUG (printf ("Argument %d assigned to register %s, %s\n", pos, mono_arch_regname (inst->dreg), mono_arch_regname (inst->dreg + 1)));
2491 if (ainfo->reg != inst->dreg)
2492 hppa_fcpy (code, HPPA_FP_FMT_SGL, ainfo->reg, inst->dreg);
2493 DEBUG (printf ("Argument %d assigned to single register %s\n", pos, mono_arch_fregname (inst->dreg)));
2497 if (ainfo->reg != inst->dreg)
2498 hppa_fcpy (code, HPPA_FP_FMT_DBL, ainfo->reg, inst->dreg);
2499 DEBUG (printf ("Argument %d assigned to double register %s\n", pos, mono_arch_fregname (inst->dreg)));
2503 switch (ainfo->size) {
2505 hppa_ldb (code, ainfo->offset, hppa_r3, inst->dreg);
2508 hppa_ldh (code, ainfo->offset, hppa_r3, inst->dreg);
2511 hppa_ldw (code, ainfo->offset, hppa_r3, inst->dreg);
2514 g_assert_not_reached ();
2518 DEBUG (printf ("Argument %d loaded from the stack [%s - %d]\n", pos, mono_arch_regname (hppa_r3), -ainfo->offset));
2522 g_assert_not_reached ();
2526 /* Want the argument on the stack */
2527 switch (ainfo->storage)
2531 DEBUG (printf ("Argument %d stored from register %s to stack [%s + %d]\n", pos, mono_arch_regname (ainfo->reg), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2532 if (hppa_check_bits (inst->inst_offset, 14)) {
2533 off = inst->inst_offset;
2534 reg = inst->inst_basereg;
2537 hppa_set (code, inst->inst_offset, hppa_r1);
2538 hppa_add (code, hppa_r1, inst->inst_basereg, hppa_r1);
2542 switch (ainfo->size)
2545 hppa_stb (code, ainfo->reg, off, reg);
2548 hppa_sth (code, ainfo->reg, off, reg);
2551 hppa_stw (code, ainfo->reg, off, reg);
2554 g_assert_not_reached ();
2559 DEBUG (printf ("Argument %d stored from register (%s,%s) to stack [%s + %d]\n", pos, mono_arch_regname (ainfo->reg), mono_arch_regname (ainfo->reg+1), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2560 if (hppa_check_bits (inst->inst_offset + 4, 14)) {
2561 hppa_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
2562 hppa_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg);
2565 hppa_ldo (code, inst->inst_offset, inst->inst_basereg, hppa_r1);
2566 hppa_stw (code, ainfo->reg, 0, hppa_r1);
2567 hppa_stw (code, ainfo->reg + 1, 4, hppa_r1);
2572 DEBUG (printf ("Argument %d (float) stored from register %s to stack [%s + %d]\n", pos, mono_arch_fregname (ainfo->reg), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2573 hppa_ldo (code, inst->inst_offset, inst->inst_basereg, hppa_r1);
2574 hppa_fstwx (code, ainfo->reg, 0, hppa_r0, hppa_r1);
2578 DEBUG (printf ("Argument %d (double) stored from register %s to stack [%s + %d]\n", pos, mono_arch_fregname (ainfo->reg), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2579 hppa_ldo (code, inst->inst_offset, inst->inst_basereg, hppa_r1);
2580 hppa_fstdx (code, ainfo->reg, hppa_r0, hppa_r1);
2584 DEBUG (printf ("Argument %d copied from [%s - %d] to [%s + %d] (size=%d)\n", pos, mono_arch_regname (hppa_r3), -ainfo->offset, mono_arch_regname (inst->inst_basereg), inst->inst_offset, ainfo->size));
2585 if (inst->inst_offset != ainfo->offset ||
2586 inst->inst_basereg != hppa_r3)
2587 code = emit_memcpy (code, inst->inst_offset, inst->inst_basereg, ainfo->offset, hppa_r3, ainfo->size);
2591 g_assert_not_reached ();
2600 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
2602 if (getenv("HPPA_BREAK")) {
2603 *(guint32*)code = 0x00010004;
2607 cfg->code_len = code - cfg->native_code;
2608 g_assert (cfg->code_len < cfg->code_size);
2617 mono_arch_emit_epilog (MonoCompile *cfg)
2619 MonoMethod *method = cfg->method;
2620 MonoMethodSignature *sig;
2622 int max_epilog_size = 16 + 20 * 4;
2626 sig = mono_method_signature (cfg->method);
2627 if (cfg->method->save_lmf)
2628 max_epilog_size += 128;
2630 if (mono_jit_trace_calls != NULL)
2631 max_epilog_size += 50;
2633 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2634 max_epilog_size += 50;
2636 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
2637 cfg->code_size *= 2;
2638 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2639 mono_jit_stats.code_reallocs++;
2642 code = (guint32*)(cfg->native_code + cfg->code_len);
2644 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
2645 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
2647 pos = HPPA_STACK_LMF_OFFSET;
2648 if (cfg->method->save_lmf) {
2650 hppa_ldo (code, pos, hppa_r3, hppa_r22);
2651 hppa_ldw (code, G_STRUCT_OFFSET(MonoLMF, previous_lmf), hppa_r22, hppa_r21);
2652 hppa_ldw (code, G_STRUCT_OFFSET(MonoLMF, lmf_addr), hppa_r22, hppa_r20);
2653 hppa_stw (code, hppa_r21, G_STRUCT_OFFSET(MonoLMF, previous_lmf), hppa_r20);
2655 pos += G_STRUCT_OFFSET(MonoLMF, regs) + sizeof (gulong);
2656 /* We skip the restore of r3 here, it is restored from the
2657 * stack anyway. This makes the code a bit easier.
2659 for (reg = 4; reg < 31; reg++) {
2660 if (HPPA_IS_SAVED_GREG (reg)) {
2661 hppa_ldw (code, pos, hppa_r3, reg);
2662 pos += sizeof(gulong);
2666 pos = ALIGN_TO (pos, sizeof (double));
2667 hppa_set (code, pos, hppa_r1);
2668 for (reg = 0; reg < 31; reg++) {
2669 if (HPPA_IS_SAVED_FREG (reg)) {
2670 hppa_flddx (code, hppa_r1, hppa_r3, reg);
2671 hppa_ldo (code, sizeof (double), hppa_r1, hppa_r1);
2672 pos += sizeof (double);
2676 guint32 mask = cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS;
2678 for (i = 0; i < 32; i++) {
2681 if ((1 << i) & mask) {
2682 hppa_ldw (code, pos, hppa_r3, i);
2683 pos += sizeof (gulong);
2688 if (sig->ret->type != MONO_TYPE_VOID &&
2689 mono_type_to_stind (sig->ret) == CEE_STOBJ) {
2690 CallInfo *cinfo = get_call_info (sig, sig->pinvoke);
2692 switch (cinfo->ret.storage) {
2694 hppa_ldw (code, cfg->ret->inst_offset, hppa_sp, hppa_r28);
2695 hppa_ldw (code, 0, hppa_r28, hppa_r28);
2698 hppa_ldw (code, cfg->ret->inst_offset, hppa_sp, hppa_r28);
2699 hppa_ldw (code, 4, hppa_r28, hppa_r29);
2700 hppa_ldw (code, 0, hppa_r28, hppa_r28);
2706 g_assert_not_reached ();
2711 if (1 || cfg->flags & MONO_CFG_HAS_CALLS)
2712 hppa_ldw (code, -20, hppa_r3, hppa_r2);
2713 hppa_ldo (code, 64, hppa_r3, hppa_sp);
2714 hppa_bv (code, hppa_r0, hppa_r2);
2715 hppa_ldwm (code, -64, hppa_sp, hppa_r3);
2717 cfg->code_len = (guint8*)code - cfg->native_code;
2719 g_assert (cfg->code_len < cfg->code_size);
2723 /* remove once throw_exception_by_name is eliminated */
2725 exception_id_by_name (const char *name)
2727 if (strcmp (name, "IndexOutOfRangeException") == 0)
2728 return MONO_EXC_INDEX_OUT_OF_RANGE;
2729 if (strcmp (name, "OverflowException") == 0)
2730 return MONO_EXC_OVERFLOW;
2731 if (strcmp (name, "ArithmeticException") == 0)
2732 return MONO_EXC_ARITHMETIC;
2733 if (strcmp (name, "DivideByZeroException") == 0)
2734 return MONO_EXC_DIVIDE_BY_ZERO;
2735 if (strcmp (name, "InvalidCastException") == 0)
2736 return MONO_EXC_INVALID_CAST;
2737 if (strcmp (name, "NullReferenceException") == 0)
2738 return MONO_EXC_NULL_REF;
2739 if (strcmp (name, "ArrayTypeMismatchException") == 0)
2740 return MONO_EXC_ARRAY_TYPE_MISMATCH;
2741 g_error ("Unknown intrinsic exception %s\n", name);
2746 mono_arch_emit_exceptions (MonoCompile *cfg)
2748 MonoJumpInfo *patch_info;
2751 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
2752 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
2753 int max_epilog_size = 50;
2757 /* count the number of exception infos */
2760 * make sure we have enough space for exceptions
2762 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2763 switch (patch_info->type) {
2764 case MONO_PATCH_INFO_BB_OVF:
2765 g_assert_not_reached ();
2768 case MONO_PATCH_INFO_EXC_OVF: {
2769 const MonoOvfJump *ovfj = patch_info->data.target;
2770 max_epilog_size += 8;
2771 i = exception_id_by_name (ovfj->data.exception);
2772 if (!exc_throw_found [i]) {
2773 max_epilog_size += 24;
2774 exc_throw_found [i] = TRUE;
2779 case MONO_PATCH_INFO_EXC:
2780 i = exception_id_by_name (patch_info->data.target);
2781 if (!exc_throw_found [i]) {
2782 max_epilog_size += 24;
2783 exc_throw_found [i] = TRUE;
2792 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
2793 cfg->code_size *= 2;
2794 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2795 mono_jit_stats.code_reallocs++;
2798 code = cfg->native_code + cfg->code_len;
2800 /* add code to raise exceptions */
2801 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2802 switch (patch_info->type) {
2803 case MONO_PATCH_INFO_BB_OVF: {
2807 case MONO_PATCH_INFO_EXC_OVF: {
2808 const MonoOvfJump *ovfj = patch_info->data.target;
2809 MonoJumpInfo *newji;
2810 unsigned char *ip = patch_info->ip.i + cfg->native_code;
2811 unsigned char *stub = code;
2813 /* Patch original call, point it at the stub */
2814 hppa_patch ((guint32 *)ip, code);
2816 /* Write the stub */
2817 /* SUBTLE: this has to be PIC, because the code block
2820 hppa_bl_n (code, 8, hppa_r0);
2823 /* Add a patch info to patch the stub to point to the exception code */
2824 newji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
2825 newji->type = MONO_PATCH_INFO_EXC;
2826 newji->ip.i = stub - cfg->native_code;
2827 newji->data.target = ovfj->data.exception;
2828 newji->next = patch_info->next;
2829 patch_info->next = newji;
2832 case MONO_PATCH_INFO_EXC: {
2833 unsigned char *ip = patch_info->ip.i + cfg->native_code;
2834 i = exception_id_by_name (patch_info->data.target);
2835 if (exc_throw_pos [i]) {
2836 hppa_patch ((guint32 *)ip, exc_throw_pos [i]);
2837 patch_info->type = MONO_PATCH_INFO_NONE;
2840 exc_throw_pos [i] = code;
2842 hppa_patch ((guint32 *)ip, code);
2843 hppa_set (code, patch_info->data.target, hppa_r26);
2844 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
2845 patch_info->data.name = "mono_arch_throw_exception_by_name";
2846 patch_info->ip.i = code - cfg->native_code;
2848 /* Assume the caller has set r2, we can't set it
2849 * here based on ip, because the caller may
2850 * be relocated (also the "ip" may be from an overflow
2853 hppa_ldil (code, 0, hppa_r1);
2854 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2855 hppa_bv (code, hppa_r0, hppa_r1);
2865 cfg->code_len = code - cfg->native_code;
2867 g_assert (cfg->code_len < cfg->code_size);
2871 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2873 #error "--with-sigaltstack=yes not supported on hppa"
2878 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
2883 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
2888 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
2890 /* add the this argument */
2891 if (this_reg != -1) {
2893 MONO_INST_NEW (cfg, this, OP_SETREG);
2894 this->type = this_type;
2895 this->sreg1 = this_reg;
2896 this->dreg = mono_regstate_next_int (cfg->rs);
2897 mono_bblock_add_inst (cfg->cbb, this);
2898 mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, hppa_r26, FALSE);
2903 MONO_INST_NEW (cfg, vtarg, OP_SETREG);
2904 vtarg->type = STACK_MP;
2905 vtarg->sreg1 = vt_reg;
2906 vtarg->dreg = mono_regstate_next_int (cfg->rs);
2907 mono_bblock_add_inst (cfg->cbb, vtarg);
2908 mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, hppa_r28, FALSE);
2914 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
2916 MonoInst *ins = NULL;
2921 if (cmethod->klass == mono_defaults.thread_class &&
2922 strcmp (cmethod->name, "MemoryBarrier") == 0) {
2924 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
2932 * mono_arch_get_argument_info:
2933 * @csig: a method signature
2934 * @param_count: the number of parameters to consider
2935 * @arg_info: an array to store the result infos
2937 * Gathers information on parameters such as size, alignment and
2938 * padding. arg_info should be large enought to hold param_count + 1 entries.
2940 * Returns the size of the activation frame.
2943 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
2950 cinfo = get_call_info (csig, FALSE);
2952 if (csig->hasthis) {
2953 ainfo = &cinfo->args [0];
2954 arg_info [0].offset = ainfo->offset;
2957 for (k = 0; k < param_count; k++) {
2958 ainfo = &cinfo->args [k + csig->hasthis];
2960 arg_info [k + 1].offset = ainfo->offset;
2961 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
2969 mono_arch_print_tree (MonoInst *tree, int arity)
2974 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
2979 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)