1 /* src/vm/jit/sparc64/codegen.h - code generation macros and
2 definitions for SPARC64
4 Copyright (C) 1996-2005, 2006, 2007, 2008
5 CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
7 This file is part of CACAO.
9 This program is free software; you can redistribute it and/or
10 modify it under the terms of the GNU General Public License as
11 published by the Free Software Foundation; either version 2, or (at
12 your option) any later version.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
32 #include "vm/jit/jit.hpp"
34 #include "md-abi.h" /* for INT_NATARG_CNT */
38 /* debug defines **************************************************************/
40 # define PASS13BIT(imm) ((((s4)(imm)&0x1fff)<<19)>>19)
42 # define PASS13BIT(imm) imm
47 s4 nat_argintregs[INT_NATARG_CNT];
49 /* branch defines *************************************************************/
58 /* patcher defines ************************************************************/
60 #define PATCHER_CALL_INSTRUCTIONS 2 /* number of instructions */
61 #define PATCHER_CALL_SIZE 2 * 4 /* size in bytes of a patcher call */
63 #define EXCEPTION_CHECK_INSTRUCTIONS 3 /* number of instructions */
64 #define EXCEPTION_CHECK_SIZE 3 * 4 /* byte size of an exception check */
66 #define PATCHER_NOPS \
73 /* additional functions and macros to generate code ***************************/
76 /* MCODECHECK(icnt) */
78 #define MCODECHECK(icnt) \
80 if ((cd->mcodeptr + (icnt) * 4) > cd->mcodeend) \
81 codegen_increase(cd); \
85 #define ALIGNCODENOP \
86 if ((s4) ((ptrint) cd->mcodeptr & 7)) { \
90 #define ALIGN_STACK_SLOTS(slots) \
95 #define M_COPY(s,d) emit_copy(jd, iptr, (s), (d))
96 #define ICONST(d,c) emit_iconst(cd, (d), (c))
97 #define LCONST(d,c) emit_lconst(cd, (d), (c))
102 /********************** instruction formats ***********************************/
107 /* 3-address-operations: M_OP3
109 * op3 ..... operation
110 * rs1 ..... register number source 1
111 * rs2 ..... register number or constant integer source 2
112 * rd ..... register number destination
113 * imm ..... switch to use rs2 as constant 13bit integer
114 * (REG means: use b as register number)
115 * (IMM means: use b as signed immediate value)
117 #define M_OP3(op,op3,rd,rs1,rs2,imm) \
119 assert(check_13bit_imm(rs2)); \
120 *((u4 *) cd->mcodeptr) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | (imm?((rs2)&0x1fff):(rs2)) ); \
125 #define M_OP3_GET_RD(x) (((x) >> 25) & 0x1f)
126 #define M_OP3_GET_RS(x) (((x) >> 14) & 0x1f)
127 #define M_OP3_GET_IMM(x) ( (x) & 0x1fff)
129 /* 3-address-operations: M_OP3C
130 * rcond ... condition opcode
131 * rs2 ..... register number or 10bit signed immediate
134 #define M_OP3C(op,op3,rcond,rd,rs1,rs2,imm) \
136 *((u4 *) cd->mcodeptr) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | \
137 ((rcond) << 10) | (imm?((rs2)&0x3ff):(rs2)) ); \
145 * rs1 .... source reg 1
146 * rs2 .... source reg 2 or immediate shift count (5 or 6 bits long depending whether 32 or 64 bit shift)
148 * imm .... switch for constant
149 * x ...... 0 => 32, 1 => 64 bit shift
151 #define M_SHFT(op,op3,rs1,rs2,rd,imm,x) \
153 *((u4 *) cd->mcodeptr) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((rs1) << 14) | ((imm) << 13) | \
154 ((x) << 12) | (((imm) && (x))?((rs2) & 0x3f):((rs2) & 0x1f)) ); \
162 * cond ... condition opcode
163 * rs2 .... source 2 or signed 11-bit constant
165 * imm .... switch for constant
166 * cc{0-2} 32-bit 64-bit or fp condition
168 #define M_FMT4(op,op3,rd,rs2,cond,cc2,cc1,cc0,imm) \
170 *((u4 *) cd->mcodeptr) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((cc2) << 18) | ((cond) << 14) | \
171 ((imm) << 13) | ((cc1) << 12) | ((cc0) << 11) | ((rs2) & 0x7ff) ); \
176 #define FR_X(r) (((r)<<1) + 1) /* transpose macro for floats which reside in upper half of double word */
177 #define DR_X(r) ((((r)*2)|(((r)*2)>>5)) & 0x1f) /* transpose & pack double, see SPARC spec. */
179 /* 3-address-floating-point-operation
181 * op3,opf .... function-number
183 * rs1 ... source reg (-1 signals unused)
188 #define M_FOP3(op,op3,opf,rd,rs1,rs2) \
190 *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((rd)<<25) | ((op3)<<19) | ((((rs1)==-1)?0:(rs1)) << 14) | \
191 ((opf)<<5) | (rs2) ); \
194 /* float addressing */
195 #define M_FOP3_FX(op,op3,opf,rd,rs1,rs2) \
197 *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | (FR_X(rd)<<25) | ((op3)<<19) | ((((rs1)==-1)?0:FR_X(rs1)) << 14) | \
198 ((opf)<<5) | FR_X(rs2) ); \
201 /* double addressing */
202 #define M_FOP3_DX(op,op3,opf,rd,rs1,rs2) \
204 *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | (DR_X(rd)<<25) | ((op3)<<19) | ((((rs1)==-1)?0:DR_X(rs1)) << 14) | \
205 ((opf)<<5) | DR_X(rs2) ); \
209 /* 3-address-floating-point-compare-operation
211 * op3,opf .... function-number
212 * fcc ... floating point condition code (fcc0 - fcc3)
218 #define M_FCMP_DX(op,op3,opf,fcc,rs1,rs2) \
220 *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((fcc)<<25) | ((op3)<<19) | (DR_X(rs1) << 14) | \
221 ((opf)<<5) | DR_X(rs2) ); \
225 #define M_FCMP_FX(op,op3,opf,fcc,rs1,rs2) \
227 *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((fcc)<<25) | ((op3)<<19) | (FR_X(rs1) << 14) | \
228 ((opf)<<5) | FR_X(rs2) ); \
232 /**** format 2 operations ********/
234 /* branch on integer reg instruction
236 rcond ...... condition to be tested
237 disp16 ... 16-bit relative address to be jumped to (divided by 4)
238 rs1 ..... register to be tested
239 p ..... prediction bit
240 anul .... annullment bit
242 #define M_BRAREG(op,rcond,rs1,disp16,p,anul) \
244 *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((anul)<<29) | (0<<28) | ((rcond)<<25) | (3<<22) | \
245 ( ((disp16)& 0xC000) << 6 ) | (p << 19) | ((rs1) << 14) | ((disp16)&0x3fff) ); \
250 /* branch on integer reg instruction
252 cond ...... condition to be tested
253 disp19 ... 19-bit relative address to be jumped to (divided by 4)
254 ccx ..... 32(0) or 64(2) bit test
255 p ..... prediction bit
256 anul .... annullment bit
258 #define M_BRACC(op,op2,cond,disp19,ccx,p,anul) \
260 *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((anul)<<29) | ((cond)<<25) | (op2<<22) | (ccx<<20) | \
261 (p << 19 ) | ((disp19) & 0x007ffff) ); \
266 /************** end-user instructions (see a SPARC asm manual) ***************/
268 #define M_SETHI(imm22, rd) \
270 *((u4 *) cd->mcodeptr) = ((((s4)(0x00)) << 30) | ((rd) << 25) | ((0x04)<<22) | ((imm22)&0x3FFFFF) ); \
274 #define M_NOP M_SETHI(0,0) /* nop */
276 #define M_AND(rs1,rs2,rd) M_OP3(0x02,0x01,rd,rs1,rs2,REG) /* 64b c = a & b */
277 #define M_AND_IMM(rs1,rs2,rd) M_OP3(0x02,0x01,rd,rs1,rs2,IMM)
278 #define M_ANDCC(rs1,rs2,rd) M_OP3(0x02,0x11,rd,rs1,rs2,REG)
279 #define M_ANDCC_IMM(rs1,rs2,rd) M_OP3(0x02,0x11,rd,rs1,rs2,IMM)
281 #define M_OR(rs1,rs2,rd) M_OP3(0x02,0x02,rd,rs1,rs2,REG) /* rd = rs1 | rs2 */
282 #define M_OR_IMM(rs1,rs2,rd) M_OP3(0x02,0x02,rd,rs1,rs2,IMM)
283 #define M_XOR(rs1,rs2,rd) M_OP3(0x02,0x03,rd,rs1,rs2,REG) /* rd = rs1 ^ rs2 */
284 #define M_XOR_IMM(rs1,rs2,rd) M_OP3(0x02,0x03,rd,rs1,rs2,IMM)
286 #define M_MOV(rs,rd) M_OR(REG_ZERO, rs, rd) /* rd = rs */
287 #define M_CLR(rd) M_OR(REG_ZERO,REG_ZERO,rd) /* rd = 0 */
291 #define M_SLLX(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,REG,1) /* 64b rd = rs << rs2 */
292 #define M_SLLX_IMM(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,IMM,1)
293 #define M_SLL(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,REG,0) /* 32b rd = rs << rs2 */
294 #define M_SLL_IMM(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,IMM,0)
295 #define M_SRLX(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,REG,1) /* 64b rd = rs >>>rs2 */
296 #define M_SRLX_IMM(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,IMM,1)
297 #define M_SRL(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,REG,0) /* 32b rd = rs >>>rs2 */
298 #define M_SRL_IMM(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,IMM,0)
299 #define M_SRAX(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,REG,1) /* 64b rd = rs >> rs2 */
300 #define M_SRAX_IMM(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,IMM,1)
301 #define M_SRA(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,REG,0) /* 32b rd = rs >> rs2 */
302 #define M_SRA_IMM(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,IMM,0)
304 #define M_ISEXT(rs,rd) M_SRA(rs,REG_ZERO,rd) /* sign extend 32 bits*/
307 #define M_ADD(rs1,rs2,rd) M_OP3(0x02,0x00,rd,rs1,rs2,REG) /* 64b rd = rs1 + rs2 */
308 #define M_ADD_IMM(rs1,rs2,rd) M_OP3(0x02,0x00,rd,rs1,rs2,IMM)
309 #define M_SUB(rs1,rs2,rd) M_OP3(0x02,0x04,rd,rs1,rs2,REG) /* 64b rd = rs1 - rs2 */
310 #define M_SUB_IMM(rs1,rs2,rd) M_OP3(0x02,0x04,rd,rs1,rs2,IMM)
311 #define M_MULX(rs1,rs2,rd) M_OP3(0x02,0x09,rd,rs1,rs2,REG) /* 64b rd = rs1 * rs2 */
312 #define M_MULX_IMM(rs1,rs2,rd) M_OP3(0x02,0x09,rd,rs1,rs2,IMM)
313 #define M_DIVX(rs1,rs2,rd) M_OP3(0x02,0x2d,rd,rs1,rs2,REG) /* 64b rd = rs1 / rs2 */
315 #define M_SUBcc(rs1,rs2,rd) M_OP3(0x02,0x14,rd,rs1,rs2,REG) /* sets xcc and icc */
316 #define M_SUBcc_IMM(rs1,rs2,rd) M_OP3(0x02,0x14,rd,rs1,rs2,IMM) /* sets xcc and icc */
320 /**** compare and conditional ALU operations ***********/
322 #define M_CMP(rs1,rs2) M_SUBcc(rs1,rs2,REG_ZERO)
323 #define M_CMP_IMM(rs1,rs2) M_SUBcc_IMM(rs1,rs2,REG_ZERO)
326 /* move integer register on (64-bit) condition */
328 #define M_XCMOVEQ(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x1,1,1,0,REG) /* a==b ? rd=rs */
329 #define M_XCMOVNE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x9,1,1,0,REG) /* a!=b ? rd=rs */
330 #define M_XCMOVLT(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x3,1,1,0,REG) /* a<b ? rd=rs */
331 #define M_XCMOVGE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xb,1,1,0,REG) /* a>=b ? rd=rs */
332 #define M_XCMOVLE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x2,1,1,0,REG) /* a<=b ? rd=rs */
333 #define M_XCMOVGT(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xa,1,1,0,REG) /* a>b ? rd=rs */
334 #define M_XCMOVULE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x4,1,1,0,REG) /* a<=b ? rd=rs (u)*/
336 #define M_XCMOVEQ_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x1,1,1,0,IMM) /* a==b ? rd=rs */
337 #define M_XCMOVNE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x9,1,1,0,IMM) /* a!=b ? rd=rs */
338 #define M_XCMOVLT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x3,1,1,0,IMM) /* a<b ? rd=rs */
339 #define M_XCMOVGE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xb,1,1,0,IMM) /* a>=b ? rd=rs */
340 #define M_XCMOVLE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x2,1,1,0,IMM) /* a<=b ? rd=rs */
341 #define M_XCMOVGT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xa,1,1,0,IMM) /* a>b ? rd=rs */
342 #define M_XCMOVULE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x4,1,1,0,IMM) /* a<=b ? rd=rs (u)*/
344 /* move integer register on (fcc0) floating point condition */
346 #define M_CMOVFGT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x6,0,0,0,IMM) /* fa>fb ? rd=rs */
347 #define M_CMOVFLT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x4,0,0,0,IMM) /* fa<fb ? rd=rs */
348 #define M_CMOVFEQ_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x9,0,0,0,IMM) /* fa==fb ? rd=rs */
350 /* move integer register on (32-bit) condition */
354 /* move integer register on register condition */
356 #define M_CMOVREQ(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x1,rd,rs1,rs2,REG) /* rs1==0 ? rd=rs2 */
357 #define M_CMOVRNE(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x5,rd,rs1,rs2,REG) /* rs1!=0 ? rd=rs2 */
358 #define M_CMOVRLE(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x2,rd,rs1,rs2,REG) /* rs1<=0 ? rd=rs2 */
359 #define M_CMOVRLT(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x3,rd,rs1,rs2,REG) /* rs1<0 ? rd=rs2 */
360 #define M_CMOVRGT(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x6,rd,rs1,rs2,REG) /* rs1>0 ? rd=rs2 */
361 #define M_CMOVRGE(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x7,rd,rs1,rs2,REG) /* rs1>=0 ? rd=rs2 */
363 #define M_CMOVREQ_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x1,rd,rs1,rs2,IMM) /* rs1==0 ? rd=rs2 */
364 #define M_CMOVRNE_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x5,rd,rs1,rs2,IMM) /* rs1!=0 ? rd=rs2 */
365 #define M_CMOVRLE_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x2,rd,rs1,rs2,IMM) /* rs1<=0 ? rd=rs2 */
366 #define M_CMOVRLT_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x3,rd,rs1,rs2,IMM) /* rs1<0 ? rd=rs2 */
367 #define M_CMOVRGT_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x6,rd,rs1,rs2,IMM) /* rs1>0 ? rd=rs2 */
368 #define M_CMOVRGE_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x7,rd,rs1,rs2,IMM) /* rs1>=0 ? rd=rs2 */
371 /**** big constant helpers *********/
373 /* #define FITS_13BIT_IMM(x) ((x >= -4096) && (x <= 4095)) */
375 bool fits_13(s4 disp);
376 s4 get_lopart_disp(s4 disp);
378 #define abs(x) ((x) < 0 ? (-(x)) : (x))
380 #define sethi_part(x) ((x)>>10)
381 #define setlo_part(x) ((x) & 0x3ff)
383 #define DO_SETHI_REG(c,rd) \
386 M_SETHI(sethi_part(c), rd); \
387 if (setlo_part(c)) { \
388 M_OR_IMM(rd, setlo_part(c), rd); \
392 M_SETHI(sethi_part(~c), rd); \
393 M_XOR_IMM(rd, PASS13BIT(setlo_part(c) | 0xffffffffffff1c00), rd); \
397 #define DO_SETHI_PART(c,rs,rd) \
400 M_SETHI(sethi_part(c), rd); \
404 M_SETHI(sethi_part(-c), rd); \
406 assert(sethi_part(c) != 0xf); \
413 #define M_LDA(rd,rs,disp) \
415 if (fits_13(disp)) { \
416 M_AADD_IMM(rs,disp,rd); \
419 DO_SETHI_REG(disp,rd); \
424 /**** load/store operations ********/
426 #define M_SLDU(rd,rs,disp) M_OP3(0x03,0x02,rd,rs,disp,IMM) /* 16-bit load, uns*/
427 #define M_SLDS(rd,rs,disp) M_OP3(0x03,0x0a,rd,rs,disp,IMM) /* 16-bit load, sig*/
428 #define M_BLDS(rd,rs,disp) M_OP3(0x03,0x09,rd,rs,disp,IMM) /* 8-bit load, sig */
431 #define M_LDX_INTERN(rd,rs,disp) M_OP3(0x03,0x0b,rd,rs,disp,IMM) /* 64-bit load, sig*/
432 #define M_LDX(rd,rs,disp) \
434 if (fits_13(disp)) { \
435 M_LDX_INTERN(rd,rs,disp); \
438 DO_SETHI_PART(disp,rs,rd); \
439 M_LDX_INTERN(rd,rd,PASS13BIT(get_lopart_disp(disp))); \
443 #define M_ILD_INTERN(rd,rs,disp) M_OP3(0x03,0x08,rd,rs,disp,IMM) /* 32-bit load, sig */
444 #define M_ILD(rd,rs,disp) \
446 if (fits_13(disp)) { \
447 M_ILD_INTERN(rd,rs,disp); \
450 DO_SETHI_PART(disp,rs,rd); \
451 M_ILD_INTERN(rd,rd,PASS13BIT(get_lopart_disp(disp))); \
457 #define M_SST(rd,rs,disp) M_OP3(0x03,0x06,rd,rs,disp,IMM) /* 16-bit store */
458 #define M_BST(rd,rs,disp) M_OP3(0x03,0x05,rd,rs,disp,IMM) /* 8-bit store */
460 /* Stores with displacement overflow should only happen with PUTFIELD or on */
461 /* the stack. The PUTFIELD instruction does not use REG_ITMP3 and a */
462 /* reg_of_var call should not use REG_ITMP3!!! */
464 #define M_STX_INTERN(rd,rs,disp) M_OP3(0x03,0x0e,rd,rs,disp,IMM) /* 64-bit store */
465 #define M_STX(rd,rs,disp) \
467 if (fits_13(disp)) { \
468 M_STX_INTERN(rd,rs,disp); \
471 DO_SETHI_PART(disp,rs,REG_ITMP3); \
472 M_STX_INTERN(rd,REG_ITMP3,PASS13BIT(get_lopart_disp(disp))); \
477 #define M_IST_INTERN(rd,rs,disp) M_OP3(0x03,0x04,rd,rs,disp,IMM) /* 32-bit store */
478 #define M_IST(rd,rs,disp) \
480 if (fits_13(disp)) { \
481 M_IST_INTERN(rd,rs,disp); \
484 DO_SETHI_PART(disp,rs,REG_ITMP3); \
485 M_IST_INTERN(rd,REG_ITMP3,PASS13BIT(get_lopart_disp(disp))); \
490 /**** branch operations ********/
491 /* XXX prediction and annul bits currently set to defaults, but could be used for optimizations */
493 /* branch on integer register */
495 #define M_BEQZ(r,disp) M_BRAREG(0x0,0x1,r,disp,1,0) /* br r == 0 */
496 #define M_BLEZ(r,disp) M_BRAREG(0x0,0x2,r,disp,1,0) /* br r <= 0 */
497 #define M_BLTZ(r,disp) M_BRAREG(0x0,0x3,r,disp,1,0) /* br r < 0 */
498 #define M_BNEZ(r,disp) M_BRAREG(0x0,0x5,r,disp,1,0) /* br r != 0 */
499 #define M_BGTZ(r,disp) M_BRAREG(0x0,0x6,r,disp,1,0) /* br r > 0 */
500 #define M_BGEZ(r,disp) M_BRAREG(0x0,0x7,r,disp,1,0) /* br r >= 0 */
503 /* branch on (64-bit) integer condition codes */
505 #define M_XBEQ(disp) M_BRACC(0x00,0x1,0x1,disp,2,1,0) /* branch a==b */
506 #define M_XBNE(disp) M_BRACC(0x00,0x1,0x9,disp,2,1,0) /* branch a!=b */
507 #define M_XBGT(disp) M_BRACC(0x00,0x1,0xa,disp,2,1,0) /* branch a>b */
508 #define M_XBLT(disp) M_BRACC(0x00,0x1,0x3,disp,2,1,0) /* branch a<b */
509 #define M_XBGE(disp) M_BRACC(0x00,0x1,0xb,disp,2,1,0) /* branch a>=b */
510 #define M_XBLE(disp) M_BRACC(0x00,0x1,0x2,disp,2,1,0) /* branch a<=b */
511 #define M_XBUGE(disp) M_BRACC(0x00,0x1,0xd,disp,2,1,0) /* br uns a>=b */
512 #define M_XBUGT(disp) M_BRACC(0x00,0x1,0xc,disp,2,1,0) /* br uns a>b */
513 #define M_XBULT(disp) M_BRACC(0x00,0x1,0x5,disp,2,1,0) /* br uns a<b */
515 /* branch on (32-bit) integer condition codes */
517 #define M_BR(disp) M_BRACC(0x00,0x1,0x8,disp,0,1,0) /* branch */
518 #define M_BEQ(disp) M_BRACC(0x00,0x1,0x1,disp,0,1,0) /* branch a==b */
519 #define M_BNE(disp) M_BRACC(0x00,0x1,0x9,disp,0,1,0) /* branch a!=b */
520 #define M_BGT(disp) M_BRACC(0x00,0x1,0xa,disp,0,1,0) /* branch a>b */
521 #define M_BLT(disp) M_BRACC(0x00,0x1,0x3,disp,0,1,0) /* branch a<b */
522 #define M_BGE(disp) M_BRACC(0x00,0x1,0xb,disp,0,1,0) /* branch a>=b */
523 #define M_BLE(disp) M_BRACC(0x00,0x1,0x2,disp,0,1,0) /* branch a<=b */
524 #define M_BULE(disp) M_BRACC(0x00,0x1,0x4,disp,0,1,0) /* br uns a<=b */
525 #define M_BUGT(disp) M_BRACC(0x00,0x1,0xc,disp,0,1,0) /* br uns a>b */
526 #define M_BULT(disp) M_BRACC(0x00,0x1,0x5,disp,0,1,0) /* br uns a<b */
528 /* branch on (fcc0) floating point condition codes */
530 #define M_FBR(disp) M_BRACC(0x00,0x5,0x8,disp,0,1,0) /* branch */
531 #define M_FBU(disp) M_BRACC(0x00,0x5,0x7,disp,0,1,0) /* unordered */
532 #define M_FBG(disp) M_BRACC(0x00,0x5,0x6,disp,0,1,0) /* branch a>b */
533 #define M_FBL(disp) M_BRACC(0x00,0x5,0x4,disp,0,1,0) /* branch a<b */
534 #define M_FBO(disp) M_BRACC(0x00,0x5,0xf,disp,0,1,0) /* br ordered */
538 #define M_SAVE(rs1,rs2,rd) M_OP3(0x02,0x3c,rd,rs1,rs2,IMM)
539 #define M_SAVE_REG(rs1,rs2,rd) M_OP3(0x02,0x3c,rd,rs1,rs2,REG)
540 #define M_RESTORE(rs1,rs2,rd) M_OP3(0x02,0x3d,rd,rs1,rs2,IMM)
544 #define M_JMP(rd,rs1,rs2) M_OP3(0x02,0x38,rd, rs1,rs2,REG) /* jump to rs1+rs2, adr of instr. saved to rd */
545 #define M_JMP_IMM(rd,rs1,rs2) M_OP3(0x02,0x38,rd, rs1,rs2,IMM)
546 #define M_RET(rs1,imm) M_OP3(0x02,0x38,REG_ZERO,rs1,imm,IMM) /* a jump which discards the current pc */
548 #define M_RETURN(rs1,imm) M_OP3(0x02,0x39,0,rs1,imm,IMM) /* like ret, but does window restore */
550 /**** floating point operations **/
553 #define M_DMOV(rs,rd) M_FOP3_DX(0x02,0x34,0x02,rd,-1,rs) /* rd = rs */
554 #define M_FMOV(rs,rd) M_FOP3_FX(0x02,0x34,0x01,rd,-1,rs) /* rd = rs */
556 #define M_FMOV_INTERN(rs,rd) M_FOP3(0x02,0x34,0x01,rd,-1,rs) /* rd = rs */
558 #define M_FNEG(rs,rd) M_FOP3_FX(0x02,0x34,0x05,rd,-1,rs) /* rd = -rs */
559 #define M_DNEG(rs,rd) M_FOP3_DX(0x02,0x34,0x06,rd,-1,rs) /* rd = -rs */
561 #define M_FADD(rs1,rs2,rd) M_FOP3_FX(0x02,0x34,0x41,rd,rs1,rs2) /* float add */
562 #define M_DADD(rs1,rs2,rd) M_FOP3_DX(0x02,0x34,0x42,rd,rs1,rs2) /* double add */
563 #define M_FSUB(rs1,rs2,rd) M_FOP3_FX(0x02,0x34,0x045,rd,rs1,rs2) /* float sub */
564 #define M_DSUB(rs1,rs2,rd) M_FOP3_DX(0x02,0x34,0x046,rd,rs1,rs2) /* double sub */
565 #define M_FMUL(rs1,rs2,rd) M_FOP3_FX(0x02,0x34,0x049,rd,rs1,rs2) /* float mul */
566 #define M_DMUL(rs1,rs2,rd) M_FOP3_DX(0x02,0x34,0x04a,rd,rs1,rs2) /* double mul */
567 #define M_FDIV(rs1,rs2,rd) M_FOP3_FX(0x02,0x34,0x04d,rd,rs1,rs2) /* float div */
568 #define M_DDIV(rs1,rs2,rd) M_FOP3_DX(0x02,0x34,0x04e,rd,rs1,rs2) /* double div */
571 /**** compare and conditional FPU operations ***********/
573 /* rd field 0 ==> fcc target unit is fcc0 */
574 #define M_FCMP(rs1,rs2) M_FCMP_FX(0x02,0x35,0x051,0,rs1,rs2) /* compare flt */
575 #define M_DCMP(rs1,rs2) M_FCMP_DX(0x02,0x35,0x052,0,rs1,rs2) /* compare dbl */
577 /* conversion functions */
579 #define M_CVTIF(rs,rd) M_FOP3_FX(0x02,0x34,0x0c4,rd,-1,rs)/* int2flt */
580 #define M_CVTID(rs,rd) M_FOP3(0x02,0x34,0x0c8,DR_X(rd),-1,FR_X(rs)) /* int2dbl */
581 #define M_CVTLF(rs,rd) M_FOP3(0x02,0x34,0x084,FR_X(rd),-1,DR_X(rs)) /* long2flt */
582 #define M_CVTLD(rs,rd) M_FOP3_DX(0x02,0x34,0x088,rd,-1,rs) /* long2dbl */
584 #define M_CVTFI(rs,rd) M_FOP3_FX(0x02,0x34,0x0d1,rd,-1,rs) /* flt2int */
585 #define M_CVTDI(rs,rd) M_FOP3(0x02,0x34,0x0d2,FR_X(rd),-1,DR_X(rs)) /* dbl2int */
586 #define M_CVTFL(rs,rd) M_FOP3(0x02,0x34,0x081,DR_X(rd),-1,FR_X(rs)) /* flt2long */
587 #define M_CVTDL(rs,rd) M_FOP3_DX(0x02,0x34,0x082,rd,-1,rs) /* dbl2long */
589 #define M_CVTFD(rs,rd) M_FOP3(0x02,0x34,0x0c9,DR_X(rd),-1,FR_X(rs)) /* flt2dbl */
590 #define M_CVTDF(rs,rd) M_FOP3(0x02,0x34,0x0c6,FR_X(rd),-1,DR_X(rs)) /* dbl2float */
594 #define M_DLD_INTERN(rd,rs1,disp) M_OP3(0x03,0x23,DR_X(rd),rs1,disp,IMM) /* double (64-bit) load */
595 #define M_DLD(rd,rs,disp) \
597 s4 lo = (short) (disp); \
598 s4 hi = (short) (((disp) - lo) >> 13); \
600 M_DLD_INTERN(rd,rs,lo); \
602 M_SETHI(hi&0x3ffff8,rd); \
604 M_DLD_INTERN(rd,rd,PASS13BIT(lo)); \
607 /* Note for SETHI: sethi has a 22bit imm, only set upper 19 bits */
609 #define M_FLD_INTERN(rd,rs1,disp) M_OP3(0x03,0x20,FR_X(rd),rs1,disp,IMM) /* float (32-bit) load */
610 #define M_FLD(rd,rs,disp) \
612 s4 lo = (short) (disp); \
613 s4 hi = (short) (((disp) - lo) >> 13); \
615 M_FLD_INTERN(rd,rs,lo); \
617 M_SETHI(hi&0x3ffff8,rd); \
619 M_FLD_INTERN(rd,rd,PASS13BIT(lo)); \
624 #define M_FST_INTERN(rd,rs,disp) M_OP3(0x03,0x24,FR_X(rd),rs,disp,IMM) /* float (32-bit) store */
625 #define M_FST(rd,rs,disp) \
627 s4 lo = (short) (disp); \
628 s4 hi = (short) (((disp) - lo) >> 13); \
630 M_FST_INTERN(rd,rs,lo); \
632 M_SETHI(hi&0x3ffff8,REG_ITMP3); \
633 M_AADD(rs,REG_ITMP3,REG_ITMP3); \
634 M_FST_INTERN(rd,REG_ITMP3,PASS13BIT(lo)); \
639 #define M_DST_INTERN(rd,rs1,disp) M_OP3(0x03,0x27,DR_X(rd),rs1,disp,IMM) /* double (64-bit) store */
640 #define M_DST(rd,rs,disp) \
642 s4 lo = (short) (disp); \
643 s4 hi = (short) (((disp) - lo) >> 13); \
645 M_DST_INTERN(rd,rs,lo); \
647 M_SETHI(hi&0x3ffff8,REG_ITMP3); \
648 M_AADD(rs,REG_ITMP3,REG_ITMP3); \
649 M_DST_INTERN(rd,REG_ITMP3,PASS13BIT(lo)); \
656 * Address pseudo instruction
659 #define POINTERSHIFT 3 /* x8 */
662 #define M_ALD_INTERN(a,b,disp) M_LDX_INTERN(a,b,disp)
663 #define M_ALD(rd,rs,disp) M_LDX(rd,rs,disp)
664 #define M_AST_INTERN(a,b,disp) M_STX_INTERN(a,b,disp)
665 #define M_AST(a,b,disp) M_STX(a,b,disp)
666 #define M_AADD(a,b,c) M_ADD(a,b,c)
667 #define M_AADD_IMM(a,b,c) M_ADD_IMM(a,b,c)
668 #define M_ASUB_IMM(a,b,c) M_SUB_IMM(a,b,c)
669 #define M_ASLL_IMM(a,b,c) M_SLLX_IMM(a,b,c)
671 #define M_ACMP(a,b) M_CMP(a,b)
672 #define M_ICMP(a,b) M_CMP(a,b)
674 #endif /* _CODEGEN_H */