1 /* vm/jit/sparc64/codegen.h - code generation macros and definitions for Sparc
3 Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 Contact: cacao@cacaojvm.org
27 Authors: Andreas Krall
33 $Id: codegen.h 4722 2006-04-03 15:36:00Z twisti $
43 #include "vm/jit/jit.h"
46 /* additional functions and macros to generate code ***************************/
48 #define gen_nullptr_check(objreg) \
51 codegen_add_nullpointerexception_ref(cd); \
55 #define gen_bound_check \
57 M_ILD(REG_ITMP3, s1, OFFSET(java_arrayheader, size)); \
58 M_CMP(s2, REG_ITMP3); \
60 codegen_add_arrayindexoutofboundsexception_ref(cd, s2); \
64 #define gen_div_check(r) \
67 codegen_add_arithmeticexception_ref(cd); \
71 /* MCODECHECK(icnt) */
73 #define MCODECHECK(icnt) \
75 if ((cd->mcodeptr + (icnt) * 4) > cd->mcodeend) \
76 codegen_increase(cd); \
80 #define ALIGNCODENOP \
81 if ((s4) ((ptrint) mcodeptr & 7)) { \
87 generates an integer-move from register rs to rd.
88 if rs and rd are the same int-register, no code will be generated.
91 #define M_INTMOVE(rs,rd) if (rs != rd) { M_MOV(rs, rd); }
95 generates a double floating-point-move from register (pair) rs to rd.
96 if rs and rd are the same double-register, no code will be generated
99 #define M_DBLMOVE(rs, rd) if (rs != rd) { M_DMOV (rs, rd); }
103 generates a double floating-point-move from pseudo register rs to rd.
104 (ie. lower register of double rs pair to lower register of double rd pair)
105 if rs and rd are the same double-register, no code will be generated
107 #define M_FLTMOVE(rs, rd) if (rs != rd) { M_FMOV (rs, rd); }
111 #define M_COPY(s,d) emit_copy(jd, iptr, (s), (d))
116 /********************** instruction formats ***********************************/
121 /* 3-address-operations: M_OP3
123 * op3 ..... operation
124 * rs1 ..... register number source 1
125 * rs2 ..... register number or constant integer source 2
126 * rd ..... register number destination
127 * imm ..... switch to use rs2 as constant 13bit integer
128 * (REG means: use b as register number)
129 * (IMM means: use b as signed immediate value)
132 #define M_OP3(op,op3,rd,rs1,rs2,imm) \
133 *(mcodeptr++) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | (imm?((rs2)&0x1fff):(rs2)) )
135 /* 3-address-operations: M_OP3C
136 * rcond ... condition opcode
137 * rs2 ..... register number or 10bit signed immediate
141 #define M_OP3C(op,op3,rcond,rd,rs1,rs2,imm) \
142 *(mcodeptr++) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | \
143 ((rcond) << 10) | (imm?((rs2)&0x3ff):(rs2)) )
151 * rs2 .... source 2 or constant
153 * imm .... switch for constant
154 * x ...... 0 => 32, 1 => 64 bit shift
156 #define M_SHFT(op,op3,rs1,rs2,rd,imm,x) \
157 *(mcodeptr++) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((rs1) << 14) | ((rs2) << 0) | \
158 ((imm) << 13) | ((x) << 12) )
163 * cond ... condition opcode
164 * rs2 .... source 2 or signed 11-bit constant
166 * imm .... switch for constant
167 * cc{0-2} 32-bit 64-bit or fp condition
170 #define M_FMT4(op,op3,rd,rs2,cond,cc2,cc1,cc0,imm) \
171 *(mcodeptr++) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((cc2) << 18) | ((cond) << 14) | \
172 ((imm) << 13) | ((cc1) << 12) | ((cc0) << 11) | ((rs2) << 0) )
177 /* 3-address-floating-point-operation
179 op3,opf .... function-number
182 #define M_FOP3(op,op3,opf,rd,rs1,rs2) \
183 *(mcodeptr++) = ( (((s4)(op))<<30) | ((rd)<<25) | ((op3)<<19) | ((rs1) << 14) | ((opf)<<5) | (rs2) )
186 /**** format 2 operations ********/
188 /* branch on integer reg instruction
190 rcond ...... condition to be tested
191 disp16 ... 16-bit relative address to be jumped to (divided by 4)
192 rs1 ..... register to be tested
193 p ..... prediction bit
194 anul .... annullment bit
196 #define M_BRAREG(op,rcond,rs1,disp16,p,anul) \
197 *(mcodeptr++) = ( (((s4)(op))<<30) | ((anul)<<29) | (0<<28) | ((rcond)<<25) | (3<<22) | \
198 ( ((disp16)& 0xC000) << 6 ) | (p << 19) | ((rs1) << 14) | ((disp16)&0x3fff) )
200 /* branch on integer reg instruction
202 cond ...... condition to be tested
203 disp19 ... 19-bit relative address to be jumped to (divided by 4)
204 ccx ..... 32(0) or 64(2) bit test
205 p ..... prediction bit
206 anul .... annullment bit
208 #define M_BRACC(op,op2,cond,disp19,ccx,p,anul) \
209 *(mcodeptr++) = ( (((s4)(op))<<30) | ((anul)<<29) | ((cond)<<25) | (op2<<22) | (ccx<<20) | \
210 (p << 19 ) | (disp19) )
212 /************** end-user instructions (try to follow asm style) ***************/
215 #define M_SETHI(imm22, rd) \
216 *(mcodeptr++) = ((((s4)(0x00)) << 30) | ((rd) << 25) | ((0x04)<<22) | ((imm22)&0x3FFFFF) )
219 #define M_NOP (M_SETHI(0,0)) /* nop */
221 #define M_AND(rs1,rs2,rd) M_OP3(0x02,0x01,rd,rs1,rs2,REG) /* 64b c = a & b */
222 #define M_AND_IMM(rs1,rs2,rd) M_OP3(0x02,0x01,rd,rs1,rs2,IMM)
223 #define M_ANDCC(rs1,rs2,rd) M_OP3(0x02,0x11,rd,rs1,rs2,REG)
224 #define M_ANDCC_IMM(rs1,rs2,rd) M_OP3(0x02,0x11,rd,rs1,rs2,IMM)
226 #define M_OR(rs1,rs2,rd) M_OP3(0x02,0x02,rd,rs1,rs2,REG) /* rd = rs1 | rs2 */
227 #define M_OR_IMM(rs1,rs2,rd) M_OP3(0x02,0x02,rd,rs1,rs2,IMM)
228 #define M_XOR(rs1,rs2,rd) M_OP3(0x02,0x03,rs1,rs2,rd,REG) /* rd = rs1 ^ rs2 */
229 #define M_XOR_IMM(rs1,rs2,rd) M_OP3(0x02,0x03,rs1,rs2,rd,IMM)
231 #define M_MOV(rs,rd) M_OR(REG_ZERO, rs, rd) /* rd = rs */
235 #define M_SLLX(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,REG,1) /* 64b rd = rs << rs2 */
236 #define M_SLLX_IMM(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,IMM,1)
237 #define M_SRLX(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,REG,1) /* 64b rd = rs >>>rs2 */
238 #define M_SRLX_IMM(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,IMM,1)
239 #define M_SRL(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,REG,0) /* 32b rd = rs >>>rs2 */
240 #define M_SRL_IMM(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,IMM,0)
241 #define M_SRAX(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,REG,1) /* 64b rd = rs >> rs2 */
242 #define M_SRAX_IMM(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,IMM,1)
243 #define M_SRA(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,REG,0) /* 32b rd = rs >> rs2 */
244 #define M_SRA_IMM(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,IMM,0)
246 #define M_ISEXT(rs,rd) M_SRA_IMM(rs,0,rd) /* sign extend 32 bits*/
249 #define M_ADD(rs1,rs2,rd) M_OP3(0x02,0x00,rd,rs1,rs2,REG) /* 64b rd = rs1 + rs2 */
250 #define M_ADD_IMM(rs1,rs2,rd) M_OP3(0x02,0x00,rd,rs1,rs2,IMM)
251 #define M_SUB(rs1,rs2,rd) M_OP3(0x02,0x04,rd,rs1,rs2,REG) /* 64b rd = rs1 - rs2 */
252 #define M_SUB_IMM(rs1,rs2,rd) M_OP3(0x02,0x04,rd,rs1,rs2,IMM)
253 #define M_MULX(rs1,rs2,rd) M_OP3(0x02,0x09,rd,rs1,rs2,REG) /* 64b rd = rs1 * rs2 */
254 #define M_MULX_IMM(rs1,rs2,rd) M_OP3(0x02,0x09,rd,rs1,rs2,IMM)
255 #define M_DIVX(rs1,rs2,rd) M_OP3(0x02,0x2d,rd,rs1,rs2,REG) /* 64b rd = rs1 / rs2 */
259 /**** compare and conditional ALU operations ***********/
261 #define M_CMP(rs1,rs2) M_SUB(rs1,rs2,REG_ZERO) /* sets xcc and icc */
262 #define M_CMP_IMM(rs1,rs2) M_SUB_IMM(rs1,rs2,REG_ZERO)
264 /* move integer register on (64-bit) condition */
266 #define M_XCMOVEQ(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x1,1,1,0,REG) /* a==b ? rd=rs */
267 #define M_XCMOVNE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x9,1,1,0,REG) /* a!=b ? rd=rs */
268 #define M_XCMOVLT(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x3,1,1,0,REG) /* a<b ? rd=rs */
269 #define M_XCMOVGE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xb,1,1,0,REG) /* a>=b ? rd=rs */
270 #define M_XCMOVLE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x2,1,1,0,REG) /* a<=b ? rd=rs */
271 #define M_XCMOVGT(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xa,1,1,0,REG) /* a>b ? rd=rs */
273 #define M_XCMOVEQ_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x1,1,1,0,IMM) /* a==b ? rd=rs */
274 #define M_XCMOVNE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x9,1,1,0,IMM) /* a!=b ? rd=rs */
275 #define M_XCMOVLT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x3,1,1,0,IMM) /* a<b ? rd=rs */
276 #define M_XCMOVGE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xb,1,1,0,IMM) /* a>=b ? rd=rs */
277 #define M_XCMOVLE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x2,1,1,0,IMM) /* a<=b ? rd=rs */
278 #define M_XCMOVGT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xa,1,1,0,IMM) /* a>b ? rd=rs */
280 /* move integer register on (fcc0) floating point condition */
282 #define M_CMOVFGT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x6,0,0,0,IMM) /* fa>fb ? rd=rs */
283 #define M_CMOVFLT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x4,0,0,0,IMM) /* fa<fb ? rd=rs */
284 #define M_CMOVFEQ_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x9,0,0,0,IMM) /* fa==fb ? rd=rs */
286 /* move integer register on (32-bit) condition */
290 /* move integer register on register condition */
292 #define M_CMOVREQ(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x1,rd,rs1,rs2,REG) /* rs1==0 ? rd=rs2 */
293 #define M_CMOVRNE(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x5,rd,rs1,rs2,REG) /* rs1!=0 ? rd=rs2 */
294 #define M_CMOVRLE(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x2,rd,rs1,rs2,REG) /* rs1<=0 ? rd=rs2 */
295 #define M_CMOVRLT(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x3,rd,rs1,rs2,REG) /* rs1<0 ? rd=rs2 */
296 #define M_CMOVRGT(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x6,rd,rs1,rs2,REG) /* rs1>0 ? rd=rs2 */
297 #define M_CMOVRGE(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x7,rd,rs1,rs2,REG) /* rs1>=0 ? rd=rs2 */
299 #define M_CMOVREQ_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x1,rd,rs1,rs2,IMM) /* rs1==0 ? rd=rs2 */
300 #define M_CMOVRNE_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x5,rd,rs1,rs2,IMM) /* rs1!=0 ? rd=rs2 */
301 #define M_CMOVRLE_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x2,rd,rs1,rs2,IMM) /* rs1<=0 ? rd=rs2 */
302 #define M_CMOVRLT_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x3,rd,rs1,rs2,IMM) /* rs1<0 ? rd=rs2 */
303 #define M_CMOVRGT_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x6,rd,rs1,rs2,IMM) /* rs1>0 ? rd=rs2 */
304 #define M_CMOVRGE_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x7,rd,rs1,rs2,IMM) /* rs1>=0 ? rd=rs2 */
307 /**** load/store operations ********/
309 #define M_SLDU(rd,rs,disp) M_OP3(0x03,0x02,rd,rs,disp,IMM) /* 16-bit load, uns*/
310 #define M_SLDS(rd,rs,disp) M_OP3(0x03,0x0a,rd,rs,disp,IMM) /* 16-bit load, sig*/
311 #define M_BLDS(rd,rs,disp) M_OP3(0x03,0x09,rd,rs,disp,IMM) /* 8-bit load, sig */
314 #define M_LDX_INTERN(rd,rs,disp) M_OP3(0x03,0x0b,rd,rs,disp,IMM) /* 64-bit load, sig*/
315 #define M_LDX(rd,rs,disp) \
317 s4 lo = (short) (disp); \
318 s4 hi = (short) (((disp) - lo) >> 13); \
320 M_LDX_INTERN(rd,rs,lo); \
322 M_SETHI(hi&0x3ffff8,rd); \
324 M_LDX_INTERN(rd,rd,lo); \
328 #define M_ILD_INTERN(rd,rs,disp) M_OP3(0x03,0x08,rd,rs,disp,IMM) /* 32-bit load, sig */
329 #define M_ILD(rd,rs,disp) \
331 s4 lo = (short) (disp); \
332 s4 hi = (short) (((disp) - lo) >> 13); \
334 M_ILD_INTERN(rd,rs,lo); \
336 M_SETHI(hi&0x3ffff8,rd); \
338 M_ILD_INTERN(rd,rd,lo); \
344 #define M_SST(rd,rs,disp) M_OP3(0x03,0x06,rd,rs,disp,IMM) /* 16-bit store */
345 #define M_BST(rd,rs,disp) M_OP3(0x03,0x05,rd,rs,disp,IMM) /* 8-bit store */
347 /* Stores with displacement overflow should only happen with PUTFIELD or on */
348 /* the stack. The PUTFIELD instruction does not use REG_ITMP3 and a */
349 /* reg_of_var call should not use REG_ITMP3!!! */
351 #define M_STX_INTERN(rd,rs,disp) M_OP3(0x03,0x0e,rd,rs,disp,IMM) /* 64-bit store */
352 #define M_STX(rd,rs,disp) \
354 s4 lo = (short) (disp); \
355 s4 hi = (short) (((disp) - lo) >> 13); \
357 M_STX_INTERN(rd,rs,lo); \
359 M_SETHI(hi&0x3ffff8,REG_ITMP3); /* sethi has a 22bit imm, only set upper 19 bits */ \
360 M_AADD(rs,REG_ITMP3,REG_ITMP3); \
361 M_STX_INTERN(rd,REG_ITMP3,lo); \
366 #define M_IST_INTERN(rd,rs,disp) M_OP3(0x03,0x04,rd,rs,disp,IMM) /* 32-bit store */
367 #define M_IST(rd,rs,disp) \
369 s4 lo = (short) (disp); \
370 s4 hi = (short) (((disp) - lo) >> 13); \
372 M_IST_INTERN(rd,rs,lo); \
374 M_SETHI(hi&0x3ffff8,REG_ITMP3); /* sethi has a 22bit imm, only set upper 19 bits */ \
375 M_AADD(rs,REG_ITMP3,REG_ITMP3); \
376 M_IST_INTERN(rd,REG_ITMP3,lo); \
381 /**** branch operations ********/
382 /* XXX prediction and annul bits currently set to defaults, but could be used for optimizations */
384 /* branch on integer register */
386 #define M_BEQZ(r,disp) M_BRAREG(0x0,0x1,r,disp,1,0) /* br r == 0 */
387 #define M_BLEZ(r,disp) M_BRAREG(0x0,0x2,r,disp,1,0) /* br r <= 0 */
388 #define M_BLTZ(r,disp) M_BRAREG(0x0,0x3,r,disp,1,0) /* br r < 0 */
389 #define M_BNEZ(r,disp) M_BRAREG(0x0,0x5,r,disp,1,0) /* br r != 0 */
390 #define M_BGTZ(r,disp) M_BRAREG(0x0,0x6,r,disp,1,0) /* br r > 0 */
391 #define M_BGEZ(r,disp) M_BRAREG(0x0,0x7,r,disp,1,0) /* br r >= 0 */
394 /* branch on (64-bit) integer condition codes */
396 #define M_XBEQ(disp) M_BRACC(0x00,0x1,0x1,disp,2,1,0) /* branch a==b */
397 #define M_XBNEQ(disp) M_BRACC(0x00,0x1,0x9,disp,2,1,0) /* branch a!=b */
398 #define M_XBGT(disp) M_BRACC(0x00,0x1,0xa,disp,2,1,0) /* branch a>b */
399 #define M_XBLT(disp) M_BRACC(0x00,0x1,0x3,disp,2,1,0) /* branch a<b */
400 #define M_XBGE(disp) M_BRACC(0x00,0x1,0xb,disp,2,1,0) /* branch a>=b */
401 #define M_XBLE(disp) M_BRACC(0x00,0x1,0x2,disp,2,1,0) /* branch a<=b */
402 #define M_XBUGE(disp) M_BRACC(0x00,0x1,0xd,disp,2,1,0) /* br uns a>=b */
404 /* branch on (32-bit) integer condition codes */
406 #define M_BR(disp) M_BRACC(0x00,0x1,0x8,disp,0,1,0) /* branch */
407 #define M_BEQ(disp) M_BRACC(0x00,0x1,0x1,disp,0,1,0) /* branch a==b */
408 #define M_BNEQ(disp) M_BRACC(0x00,0x1,0x9,disp,0,1,0) /* branch a!=b */
409 #define M_BGT(disp) M_BRACC(0x00,0x1,0xa,disp,0,1,0) /* branch a>b */
410 #define M_BLT(disp) M_BRACC(0x00,0x1,0x3,disp,0,1,0) /* branch a<b */
411 #define M_BGE(disp) M_BRACC(0x00,0x1,0xb,disp,0,1,0) /* branch a>=b */
412 #define M_BLE(disp) M_BRACC(0x00,0x1,0x2,disp,0,1,0) /* branch a<=b */
420 #define M_JMP(rd,rs1,rs2) M_OP3(0x02,0x38,rd, rs1,rs2,REG) /* jump to rs1+rs2, adr of instr. saved to rd */
421 #define M_JMP_IMM(rd,rs1,rs2) M_OP3(0x02,0x38,rd, rs1,rs2,IMM)
422 #define M_RET(rs) M_OP3(0x02,0x38,REG_ZERO,rs,REG_ZERO,REG)
424 #define M_RETURN(rs) M_OP3(0x02,0x39,0,rs,REG_ZERO,REG) /* like ret, does window restore */
426 /**** floating point operations **/
429 #define M_DMOV(rs,rd) M_FOP3(0x02,0x34,0x02,rd,0,rs) /* rd = rs */
430 #define M_FMOV(rs,rd) M_FOP3(0x02,0x34,0x01,rd*2,0,rs*2) /* rd = rs */
432 #define M_FNEG(rs,rd) M_FOP3(0x02,0x34,0x05,rd,0,rs) /* rd = -rs */
433 #define M_DNEG(rs,rd) M_FOP3(0x02,0x34,0x06,rd,0,rs) /* rd = -rs */
435 #define M_FADD(rs1,rs2,rd) M_FOP3(0x02,0x34,0x41,rd,rs1,rs2) /* float add */
436 #define M_DADD(rs1,rs2,rd) M_FOP3(0x02,0x34,0x42,rd,rs1,rs2) /* double add */
437 #define M_FSUB(rs1,rs2,rd) M_FOP3(0x02,0x34,0x045,rd,rs1,rs2) /* float sub */
438 #define M_DSUB(rs1,rs2,rd) M_FOP3(0x02,0x34,0x046,rd,rs1,rs2) /* double sub */
439 #define M_FMUL(rs1,rs2,rd) M_FOP3(0x02,0x34,0x049,rd,rs1,rs2) /* float mul */
440 #define M_DMUL(rs1,rs2,rd) M_FOP3(0x02,0x34,0x04a,rd,rs1,rs2) /* double mul */
441 #define M_FDIV(rs1,rs2,rd) M_FOP3(0x02,0x34,0x04d,rd,rs1,rs2) /* float div */
442 #define M_DDIV(rs1,rs2,rd) M_FOP3(0x02,0x34,0x04e,rd,rs1,rs2) /* double div */
445 /**** compare and conditional FPU operations ***********/
447 /* rd field 0 ==> fcc target unit is fcc0 */
448 #define M_FCMP(rs1,rs2) M_FOP3(0x02,0x35,0x051,0,rs1*2,rs2*2) /* set fcc flt */
449 #define M_DCMP(rs1,rs2) M_FOP3(0x02,0x35,0x052,0,rs1,rs2) /* set fcc dbl */
451 /* conversion functions */
453 #define M_CVTIF(rs,rd) M_FOP3(0x02,0x34,0x0c4,rd*2,0,rs*2)/* int2flt */
454 #define M_CVTID(rs,rd) M_FOP3(0x02,0x34,0x0c8,rd,0,rs*2) /* int2dbl */
455 #define M_CVTLF(rs,rd) M_FOP3(0x02,0x34,0x084,rd*2,0,rs) /* long2flt */
456 #define M_CVTLD(rs,rd) M_FOP3(0x02,0x34,0x088,rd,0,rs) /* long2dbl */
458 #define M_CVTFI(rs,rd) M_FOP3(0x02,0x34,0x0d1,rd*2,0,rs*2) /* flt2int */
459 #define M_CVTDI(rs,rd) M_FOP3(0x02,0x34,0x0d2,rd*2,0,rs) /* dbl2int */
460 #define M_CVTFL(rs,rd) M_FOP3(0x02,0x34,0x081,rd,0,rs*2) /* flt2long */
461 #define M_CVTDL(rs,rd) M_FOP3(0x02,0x34,0x082,rd,0,rs) /* dbl2long */
463 #define M_CVTFD(rs,rd) M_FOP3(0x02,0x34,0x0c9,rd,0,rs*2) /* flt2dbl */
464 #define M_CVTDF(rs,rd) M_FOP3(0x02,0x34,0x0c6,rd*2,0,rs) /* dbl2float */
467 /* translate logical double register index to float index. (e.g. %d1 -> %f2, %d2 -> %f4, etc.) */
468 /* we don't have to pack the 6-bit register number, since we are not using the upper 16 doubles */
469 /* floats reside in lower register of a double pair, use same translation as above */
470 #define M_DLD_INTERN(rd,rs1,disp) M_OP3(0x03,0x23,rd*2,rs1,disp,IMM) /* double (64-bit) load */
471 #define M_DLD(rd,rs,disp) \
473 s4 lo = (short) (disp); \
474 s4 hi = (short) (((disp) - lo) >> 13); \
476 M_DLD_INTERN(rd,rs,lo); \
478 M_SETHI(hi&0x3ffff8,rd); \
480 M_DLD_INTERN(rd,rd,lo); \
483 /* Note for SETHI: sethi has a 22bit imm, only set upper 19 bits */
485 #define M_FLD_INTERN(rd,rs1,disp) M_OP3(0x03,0x20,rd*2,rs1,disp,IMM) /* float (32-bit) load */
486 #define M_FLD(rd,rs,disp) \
488 s4 lo = (short) (disp); \
489 s4 hi = (short) (((disp) - lo) >> 13); \
491 M_FLD_INTERN(rd,rs,lo); \
493 M_SETHI(hi&0x3ffff8,rd); \
495 M_FLD_INTERN(rd,rd,lo); \
500 #define M_FST_INTERN(rd,rs,disp) M_OP3(0x03,0x24,rd*2,rs,disp,IMM) /* float (32-bit) store */
501 #define M_FST(rd,rs,disp) \
503 s4 lo = (short) (disp); \
504 s4 hi = (short) (((disp) - lo) >> 13); \
506 M_FST_INTERN(rd,rs,lo); \
508 M_SETHI(hi&0x3ffff8,REG_ITMP3); \
509 M_AADD(rs,REG_ITMP3,REG_ITMP3); \
510 M_FST_INTERN(rd,REG_ITMP3,lo); \
515 #define M_DST_INTERN(rd,rs1,disp) M_OP3(0x03,0x27,rd,rs1,disp,IMM) /* double (64-bit) store */
516 #define M_DST(rd,rs,disp) \
518 s4 lo = (short) (disp); \
519 s4 hi = (short) (((disp) - lo) >> 13); \
521 M_DST_INTERN(rd,rs,lo); \
523 M_SETHI(hi&0x3ffff8,REG_ITMP3); \
524 M_AADD(rs,REG_ITMP3,REG_ITMP3); \
525 M_DST_INTERN(rd,REG_ITMP3,lo); \
532 * Address pseudo instruction
535 #define POINTERSHIFT 3 /* x8 */
538 #define M_ALD_INTERN(a,b,disp) M_LDX_INTERN(a,b,disp)
539 #define M_ALD(a,b,disp) M_LDX(a,b,disp)
540 #define M_AST_INTERN(a,b,disp) M_STX_INTERN(a,b,disp)
541 #define M_AST(a,b,disp) M_STX(a,b,disp)
542 #define M_AADD(a,b,c) M_ADD(a,b,c)
543 #define M_AADD_IMM(a,b,c) M_ADD_IMM(a,b,c)
544 #define M_ASUB_IMM(a,b,c) M_SUB_IMM(a,b,c)
545 #define M_ASLL_IMM(a,b,c) M_SLLX_IMM(a,b,c)
550 /* var_to_reg_xxx **************************************************************
552 This function generates code to fetch data from a pseudo-register
553 into a real register. If the pseudo-register has actually been
554 assigned to a real register, no code will be emitted, since
555 following operations can use this register directly.
557 v: pseudoregister to be fetched from
558 tempregnum: temporary register to be used if v is actually spilled to ram
560 return: the register number, where the operand can be found after
561 fetching (this wil be either tempregnum or the register
562 number allready given to v)
564 *******************************************************************************/
566 #define var_to_reg_int(regnr,v,tempnr) \
568 if ((v)->flags & INMEMORY) { \
570 M_LDX(tempnr, REG_SP, (v)->regoff * 8); \
573 regnr = (v)->regoff; \
578 /* gen_resolvebranch ***********************************************************
580 * backpatches a branch instruction
581 * On Sparc all there is to do, is replace the 22bit disp at the end of the
583 * THIS APPLIES TO THE (V8) BICC INSTRUCTION ONLY.
585 * parameters: ip ... pointer to instruction after branch (void*)
586 * so ... offset of instruction after branch (s4)
587 * to ... offset of branch target (s4)
589 *******************************************************************************/
591 #define gen_resolvebranch(ip,so,to) \
592 ((s4 *) (ip))[-1] |= ((s4) (to) - (so)) >> 2 & 0x1fffff
598 #endif /* _CODEGEN_H */