1 /* jit/alpha/codegen.h - code generation macros and definitions for alpha
3 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
4 Institut f. Computersprachen, TU Wien
5 R. Grafl, A. Krall, C. Kruegel, C. Oates, R. Obermaisser,
6 M. Probst, S. Ring, E. Steiner, C. Thalinger, D. Thuernbeck,
7 P. Tomsich, J. Wenninger
9 This file is part of CACAO.
11 This program is free software; you can redistribute it and/or
12 modify it under the terms of the GNU General Public License as
13 published by the Free Software Foundation; either version 2, or (at
14 your option) any later version.
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
26 Contact: cacao@complang.tuwien.ac.at
28 Authors: Andreas Krall
31 $Id: codegen.h 594 2003-11-09 19:54:50Z twisti $
42 /* see also file calling.doc for explanation of calling conventions */
44 /* preallocated registers *****************************************************/
46 /* integer registers */
48 #define REG_RESULT 0 /* to deliver method results */
50 #define REG_RA 26 /* return address */
51 #define REG_PV 27 /* procedure vector, must be provided by caller */
52 #define REG_METHODPTR 28 /* pointer to the place from where the procedure */
53 /* vector has been fetched */
54 #define REG_ITMP1 25 /* temporary register */
55 #define REG_ITMP2 28 /* temporary register and method pointer */
56 #define REG_ITMP3 29 /* temporary register */
58 #define REG_ITMP1_XPTR 25 /* exception pointer = temporary register 1 */
59 #define REG_ITMP2_XPC 28 /* exception pc = temporary register 2 */
61 #define REG_SP 30 /* stack pointer */
62 #define REG_ZERO 31 /* allways zero */
64 /* floating point registers */
66 #define REG_FRESULT 0 /* to deliver floating point method results */
67 #define REG_FTMP1 28 /* temporary floating point register */
68 #define REG_FTMP2 29 /* temporary floating point register */
69 #define REG_FTMP3 30 /* temporary floating point register */
71 #define REG_IFTMP 28 /* temporary integer and floating point register */
74 #define INT_SAV_CNT 7 /* number of int callee saved registers */
75 #define INT_ARG_CNT 6 /* number of int argument registers */
77 #define FLT_SAV_CNT 8 /* number of flt callee saved registers */
78 #define FLT_ARG_CNT 6 /* number of flt argument registers */
81 /* macros to create code ******************************************************/
86 /* 3-address-operations: M_OP3
88 fu ..... function-number
89 a ..... register number source 1
90 b ..... register number or constant integer source 2
91 c ..... register number destination
92 const .. switch to use b as constant integer
93 (REG means: use b as register number)
94 (CONST means: use b as constant 8-bit-integer)
96 #define M_OP3(op,fu,a,b,c,const) \
97 *(mcodeptr++) = ( (((s4)(op))<<26)|((a)<<21)|((b)<<(16-3*(const)))| \
98 ((const)<<12)|((fu)<<5)|((c)) )
100 /* 3-address-floating-point-operation: M_FOP3
102 fu .... function-number
103 a,b ... source floating-point registers
104 c ..... destination register
106 #define M_FOP3(op,fu,a,b,c) \
107 *(mcodeptr++) = ( (((s4)(op))<<26)|((a)<<21)|((b)<<16)|((fu)<<5)|(c) )
109 /* branch instructions: M_BRA
111 a ...... register to be tested
112 disp ... relative address to be jumped to (divided by 4)
114 #define M_BRA(op,a,disp) \
115 *(mcodeptr++) = ( (((s4)(op))<<26)|((a)<<21)|((disp)&0x1fffff) )
118 /* memory operations: M_MEM
120 a ...... source/target register for memory access
121 b ...... base register
122 disp ... displacement (16 bit signed) to be added to b
124 #define M_MEM(op,a,b,disp) \
125 *(mcodeptr++) = ( (((s4)(op))<<26)|((a)<<21)|((b)<<16)|((disp)&0xffff) )
128 /* macros for all used commands (see an Alpha-manual for description) *********/
130 #define M_LDA(a,b,disp) M_MEM (0x08,a,b,disp) /* low const */
131 #define M_LDAH(a,b,disp) M_MEM (0x09,a,b,disp) /* high const */
132 #define M_BLDU(a,b,disp) M_MEM (0x0a,a,b,disp) /* 8 load */
133 #define M_SLDU(a,b,disp) M_MEM (0x0c,a,b,disp) /* 16 load */
134 #define M_ILD(a,b,disp) M_MEM (0x28,a,b,disp) /* 32 load */
135 #define M_LLD(a,b,disp) M_MEM (0x29,a,b,disp) /* 64 load */
136 #define M_ALD(a,b,disp) M_MEM (0x29,a,b,disp) /* addr load */
137 #define M_BST(a,b,disp) M_MEM (0x0e,a,b,disp) /* 8 store */
138 #define M_SST(a,b,disp) M_MEM (0x0d,a,b,disp) /* 16 store */
139 #define M_IST(a,b,disp) M_MEM (0x2c,a,b,disp) /* 32 store */
140 #define M_LST(a,b,disp) M_MEM (0x2d,a,b,disp) /* 64 store */
141 #define M_AST(a,b,disp) M_MEM (0x2d,a,b,disp) /* addr store */
143 #define M_BSEXT(b,c) M_OP3 (0x1c,0x0,REG_ZERO,b,c,0) /* 8 signext */
144 #define M_SSEXT(b,c) M_OP3 (0x1c,0x1,REG_ZERO,b,c,0) /* 16 signext */
146 #define M_BR(disp) M_BRA (0x30,REG_ZERO,disp) /* branch */
147 #define M_BSR(ra,disp) M_BRA (0x34,ra,disp) /* branch sbr */
148 #define M_BEQZ(a,disp) M_BRA (0x39,a,disp) /* br a == 0 */
149 #define M_BLTZ(a,disp) M_BRA (0x3a,a,disp) /* br a < 0 */
150 #define M_BLEZ(a,disp) M_BRA (0x3b,a,disp) /* br a <= 0 */
151 #define M_BNEZ(a,disp) M_BRA (0x3d,a,disp) /* br a != 0 */
152 #define M_BGEZ(a,disp) M_BRA (0x3e,a,disp) /* br a >= 0 */
153 #define M_BGTZ(a,disp) M_BRA (0x3f,a,disp) /* br a > 0 */
155 #define M_JMP(a,b) M_MEM (0x1a,a,b,0x0000) /* jump */
156 #define M_JSR(a,b) M_MEM (0x1a,a,b,0x4000) /* call sbr */
157 #define M_RET(a,b) M_MEM (0x1a,a,b,0x8000) /* return */
159 #define M_IADD(a,b,c) M_OP3 (0x10,0x0, a,b,c,0) /* 32 add */
160 #define M_LADD(a,b,c) M_OP3 (0x10,0x20, a,b,c,0) /* 64 add */
161 #define M_ISUB(a,b,c) M_OP3 (0x10,0x09, a,b,c,0) /* 32 sub */
162 #define M_LSUB(a,b,c) M_OP3 (0x10,0x29, a,b,c,0) /* 64 sub */
163 #define M_IMUL(a,b,c) M_OP3 (0x13,0x00, a,b,c,0) /* 32 mul */
164 #define M_LMUL(a,b,c) M_OP3 (0x13,0x20, a,b,c,0) /* 64 mul */
166 #define M_IADD_IMM(a,b,c) M_OP3 (0x10,0x0, a,b,c,1) /* 32 add */
167 #define M_LADD_IMM(a,b,c) M_OP3 (0x10,0x20, a,b,c,1) /* 64 add */
168 #define M_ISUB_IMM(a,b,c) M_OP3 (0x10,0x09, a,b,c,1) /* 32 sub */
169 #define M_LSUB_IMM(a,b,c) M_OP3 (0x10,0x29, a,b,c,1) /* 64 sub */
170 #define M_IMUL_IMM(a,b,c) M_OP3 (0x13,0x00, a,b,c,1) /* 32 mul */
171 #define M_LMUL_IMM(a,b,c) M_OP3 (0x13,0x20, a,b,c,1) /* 64 mul */
173 #define M_CMPEQ(a,b,c) M_OP3 (0x10,0x2d, a,b,c,0) /* c = a == b */
174 #define M_CMPLT(a,b,c) M_OP3 (0x10,0x4d, a,b,c,0) /* c = a < b */
175 #define M_CMPLE(a,b,c) M_OP3 (0x10,0x6d, a,b,c,0) /* c = a <= b */
177 #define M_CMPULE(a,b,c) M_OP3 (0x10,0x3d, a,b,c,0) /* c = a <= b */
178 #define M_CMPULT(a,b,c) M_OP3 (0x10,0x1d, a,b,c,0) /* c = a <= b */
180 #define M_CMPEQ_IMM(a,b,c) M_OP3 (0x10,0x2d, a,b,c,1) /* c = a == b */
181 #define M_CMPLT_IMM(a,b,c) M_OP3 (0x10,0x4d, a,b,c,1) /* c = a < b */
182 #define M_CMPLE_IMM(a,b,c) M_OP3 (0x10,0x6d, a,b,c,1) /* c = a <= b */
184 #define M_CMPULE_IMM(a,b,c) M_OP3 (0x10,0x3d, a,b,c,1) /* c = a <= b */
185 #define M_CMPULT_IMM(a,b,c) M_OP3 (0x10,0x1d, a,b,c,1) /* c = a <= b */
187 #define M_AND(a,b,c) M_OP3 (0x11,0x00, a,b,c,0) /* c = a & b */
188 #define M_OR( a,b,c) M_OP3 (0x11,0x20, a,b,c,0) /* c = a | b */
189 #define M_XOR(a,b,c) M_OP3 (0x11,0x40, a,b,c,0) /* c = a ^ b */
191 #define M_AND_IMM(a,b,c) M_OP3 (0x11,0x00, a,b,c,1) /* c = a & b */
192 #define M_OR_IMM( a,b,c) M_OP3 (0x11,0x20, a,b,c,1) /* c = a | b */
193 #define M_XOR_IMM(a,b,c) M_OP3 (0x11,0x40, a,b,c,1) /* c = a ^ b */
195 #define M_MOV(a,c) M_OR (a,a,c) /* c = a */
196 #define M_CLR(c) M_OR (31,31,c) /* c = 0 */
197 #define M_NOP M_OR (31,31,31) /* ; */
199 #define M_SLL(a,b,c) M_OP3 (0x12,0x39, a,b,c,0) /* c = a << b */
200 #define M_SRA(a,b,c) M_OP3 (0x12,0x3c, a,b,c,0) /* c = a >> b */
201 #define M_SRL(a,b,c) M_OP3 (0x12,0x34, a,b,c,0) /* c = a >>>b */
203 #define M_SLL_IMM(a,b,c) M_OP3 (0x12,0x39, a,b,c,1) /* c = a << b */
204 #define M_SRA_IMM(a,b,c) M_OP3 (0x12,0x3c, a,b,c,1) /* c = a >> b */
205 #define M_SRL_IMM(a,b,c) M_OP3 (0x12,0x34, a,b,c,1) /* c = a >>>b */
207 #define M_FLD(a,b,disp) M_MEM (0x22,a,b,disp) /* load flt */
208 #define M_DLD(a,b,disp) M_MEM (0x23,a,b,disp) /* load dbl */
209 #define M_FST(a,b,disp) M_MEM (0x26,a,b,disp) /* store flt */
210 #define M_DST(a,b,disp) M_MEM (0x27,a,b,disp) /* store dbl */
212 #define M_FADD(a,b,c) M_FOP3 (0x16, 0x080, a,b,c) /* flt add */
213 #define M_DADD(a,b,c) M_FOP3 (0x16, 0x0a0, a,b,c) /* dbl add */
214 #define M_FSUB(a,b,c) M_FOP3 (0x16, 0x081, a,b,c) /* flt sub */
215 #define M_DSUB(a,b,c) M_FOP3 (0x16, 0x0a1, a,b,c) /* dbl sub */
216 #define M_FMUL(a,b,c) M_FOP3 (0x16, 0x082, a,b,c) /* flt mul */
217 #define M_DMUL(a,b,c) M_FOP3 (0x16, 0x0a2, a,b,c) /* dbl mul */
218 #define M_FDIV(a,b,c) M_FOP3 (0x16, 0x083, a,b,c) /* flt div */
219 #define M_DDIV(a,b,c) M_FOP3 (0x16, 0x0a3, a,b,c) /* dbl div */
221 #define M_FADDS(a,b,c) M_FOP3 (0x16, 0x580, a,b,c) /* flt add */
222 #define M_DADDS(a,b,c) M_FOP3 (0x16, 0x5a0, a,b,c) /* dbl add */
223 #define M_FSUBS(a,b,c) M_FOP3 (0x16, 0x581, a,b,c) /* flt sub */
224 #define M_DSUBS(a,b,c) M_FOP3 (0x16, 0x5a1, a,b,c) /* dbl sub */
225 #define M_FMULS(a,b,c) M_FOP3 (0x16, 0x582, a,b,c) /* flt mul */
226 #define M_DMULS(a,b,c) M_FOP3 (0x16, 0x5a2, a,b,c) /* dbl mul */
227 #define M_FDIVS(a,b,c) M_FOP3 (0x16, 0x583, a,b,c) /* flt div */
228 #define M_DDIVS(a,b,c) M_FOP3 (0x16, 0x5a3, a,b,c) /* dbl div */
230 #define M_CVTDF(b,c) M_FOP3 (0x16, 0x0ac, 31,b,c) /* dbl2long */
231 #define M_CVTLF(b,c) M_FOP3 (0x16, 0x0bc, 31,b,c) /* long2flt */
232 #define M_CVTLD(b,c) M_FOP3 (0x16, 0x0be, 31,b,c) /* long2dbl */
233 #define M_CVTDL(b,c) M_FOP3 (0x16, 0x1af, 31,b,c) /* dbl2long */
234 #define M_CVTDL_C(b,c) M_FOP3 (0x16, 0x12f, 31,b,c) /* dbl2long */
235 #define M_CVTLI(b,c) M_FOP3 (0x17, 0x130, 31,b,c) /* long2int */
237 #define M_CVTDFS(b,c) M_FOP3 (0x16, 0x5ac, 31,b,c) /* dbl2long */
238 #define M_CVTDLS(b,c) M_FOP3 (0x16, 0x5af, 31,b,c) /* dbl2long */
239 #define M_CVTDL_CS(b,c) M_FOP3 (0x16, 0x52f, 31,b,c) /* dbl2long */
240 #define M_CVTLIS(b,c) M_FOP3 (0x17, 0x530, 31,b,c) /* long2int */
242 #define M_FCMPEQ(a,b,c) M_FOP3 (0x16, 0x0a5, a,b,c) /* c = a==b */
243 #define M_FCMPLT(a,b,c) M_FOP3 (0x16, 0x0a6, a,b,c) /* c = a<b */
245 #define M_FCMPEQS(a,b,c) M_FOP3 (0x16, 0x5a5, a,b,c) /* c = a==b */
246 #define M_FCMPLTS(a,b,c) M_FOP3 (0x16, 0x5a6, a,b,c) /* c = a<b */
248 #define M_FMOV(fa,fb) M_FOP3 (0x17, 0x020, fa,fa,fb) /* b = a */
249 #define M_FMOVN(fa,fb) M_FOP3 (0x17, 0x021, fa,fa,fb) /* b = -a */
251 #define M_FNOP M_FMOV (31,31)
253 #define M_FBEQZ(fa,disp) M_BRA (0x31,fa,disp) /* br a == 0.0*/
255 /* macros for special commands (see an Alpha-manual for description) **********/
257 #define M_TRAPB M_MEM (0x18,0,0,0x0000) /* trap barrier*/
259 #define M_S4ADDL(a,b,c) M_OP3 (0x10,0x02, a,b,c,0) /* c = a*4 + b */
260 #define M_S4ADDQ(a,b,c) M_OP3 (0x10,0x22, a,b,c,0) /* c = a*4 + b */
261 #define M_S4SUBL(a,b,c) M_OP3 (0x10,0x0b, a,b,c,0) /* c = a*4 - b */
262 #define M_S4SUBQ(a,b,c) M_OP3 (0x10,0x2b, a,b,c,0) /* c = a*4 - b */
263 #define M_S8ADDL(a,b,c) M_OP3 (0x10,0x12, a,b,c,0) /* c = a*8 + b */
264 #define M_S8ADDQ(a,b,c) M_OP3 (0x10,0x32, a,b,c,0) /* c = a*8 + b */
265 #define M_S8SUBL(a,b,c) M_OP3 (0x10,0x1b, a,b,c,0) /* c = a*8 - b */
266 #define M_S8SUBQ(a,b,c) M_OP3 (0x10,0x3b, a,b,c,0) /* c = a*8 - b */
267 #define M_SAADDQ(a,b,c) M_S8ADDQ(a,b,c) /* c = a*8 + b */
269 #define M_S4ADDL_IMM(a,b,c) M_OP3 (0x10,0x02, a,b,c,1) /* c = a*4 + b */
270 #define M_S4ADDQ_IMM(a,b,c) M_OP3 (0x10,0x22, a,b,c,1) /* c = a*4 + b */
271 #define M_S4SUBL_IMM(a,b,c) M_OP3 (0x10,0x0b, a,b,c,1) /* c = a*4 - b */
272 #define M_S4SUBQ_IMM(a,b,c) M_OP3 (0x10,0x2b, a,b,c,1) /* c = a*4 - b */
273 #define M_S8ADDL_IMM(a,b,c) M_OP3 (0x10,0x12, a,b,c,1) /* c = a*8 + b */
274 #define M_S8ADDQ_IMM(a,b,c) M_OP3 (0x10,0x32, a,b,c,1) /* c = a*8 + b */
275 #define M_S8SUBL_IMM(a,b,c) M_OP3 (0x10,0x1b, a,b,c,1) /* c = a*8 - b */
276 #define M_S8SUBQ_IMM(a,b,c) M_OP3 (0x10,0x3b, a,b,c,1) /* c = a*8 - b */
278 #define M_LLD_U(a,b,disp) M_MEM (0x0b,a,b,disp) /* unalign ld */
279 #define M_LST_U(a,b,disp) M_MEM (0x0f,a,b,disp) /* unalign st */
281 #define M_ZAP(a,b,c) M_OP3 (0x12,0x30, a,b,c,0)
282 #define M_ZAPNOT(a,b,c) M_OP3 (0x12,0x31, a,b,c,0)
284 #define M_ZAP_IMM(a,b,c) M_OP3 (0x12,0x30, a,b,c,1)
285 #define M_ZAPNOT_IMM(a,b,c) M_OP3 (0x12,0x31, a,b,c,1)
287 #define M_BZEXT(a,b) M_ZAPNOT_IMM(a, 0x01, b) /* 8 zeroext */
288 #define M_CZEXT(a,b) M_ZAPNOT_IMM(a, 0x03, b) /* 16 zeroext */
289 #define M_IZEXT(a,b) M_ZAPNOT_IMM(a, 0x0f, b) /* 32 zeroext */
291 #define M_EXTBL(a,b,c) M_OP3 (0x12,0x06, a,b,c,0)
292 #define M_EXTWL(a,b,c) M_OP3 (0x12,0x16, a,b,c,0)
293 #define M_EXTLL(a,b,c) M_OP3 (0x12,0x26, a,b,c,0)
294 #define M_EXTQL(a,b,c) M_OP3 (0x12,0x36, a,b,c,0)
295 #define M_EXTWH(a,b,c) M_OP3 (0x12,0x5a, a,b,c,0)
296 #define M_EXTLH(a,b,c) M_OP3 (0x12,0x6a, a,b,c,0)
297 #define M_EXTQH(a,b,c) M_OP3 (0x12,0x7a, a,b,c,0)
298 #define M_INSBL(a,b,c) M_OP3 (0x12,0x0b, a,b,c,0)
299 #define M_INSWL(a,b,c) M_OP3 (0x12,0x1b, a,b,c,0)
300 #define M_INSLL(a,b,c) M_OP3 (0x12,0x2b, a,b,c,0)
301 #define M_INSQL(a,b,c) M_OP3 (0x12,0x3b, a,b,c,0)
302 #define M_INSWH(a,b,c) M_OP3 (0x12,0x57, a,b,c,0)
303 #define M_INSLH(a,b,c) M_OP3 (0x12,0x67, a,b,c,0)
304 #define M_INSQH(a,b,c) M_OP3 (0x12,0x77, a,b,c,0)
305 #define M_MSKBL(a,b,c) M_OP3 (0x12,0x02, a,b,c,0)
306 #define M_MSKWL(a,b,c) M_OP3 (0x12,0x12, a,b,c,0)
307 #define M_MSKLL(a,b,c) M_OP3 (0x12,0x22, a,b,c,0)
308 #define M_MSKQL(a,b,c) M_OP3 (0x12,0x32, a,b,c,0)
309 #define M_MSKWH(a,b,c) M_OP3 (0x12,0x52, a,b,c,0)
310 #define M_MSKLH(a,b,c) M_OP3 (0x12,0x62, a,b,c,0)
311 #define M_MSKQH(a,b,c) M_OP3 (0x12,0x72, a,b,c,0)
313 #define M_EXTBL_IMM(a,b,c) M_OP3 (0x12,0x06, a,b,c,1)
314 #define M_EXTWL_IMM(a,b,c) M_OP3 (0x12,0x16, a,b,c,1)
315 #define M_EXTLL_IMM(a,b,c) M_OP3 (0x12,0x26, a,b,c,1)
316 #define M_EXTQL_IMM(a,b,c) M_OP3 (0x12,0x36, a,b,c,1)
317 #define M_EXTWH_IMM(a,b,c) M_OP3 (0x12,0x5a, a,b,c,1)
318 #define M_EXTLH_IMM(a,b,c) M_OP3 (0x12,0x6a, a,b,c,1)
319 #define M_EXTQH_IMM(a,b,c) M_OP3 (0x12,0x7a, a,b,c,1)
320 #define M_INSBL_IMM(a,b,c) M_OP3 (0x12,0x0b, a,b,c,1)
321 #define M_INSWL_IMM(a,b,c) M_OP3 (0x12,0x1b, a,b,c,1)
322 #define M_INSLL_IMM(a,b,c) M_OP3 (0x12,0x2b, a,b,c,1)
323 #define M_INSQL_IMM(a,b,c) M_OP3 (0x12,0x3b, a,b,c,1)
324 #define M_INSWH_IMM(a,b,c) M_OP3 (0x12,0x57, a,b,c,1)
325 #define M_INSLH_IMM(a,b,c) M_OP3 (0x12,0x67, a,b,c,1)
326 #define M_INSQH_IMM(a,b,c) M_OP3 (0x12,0x77, a,b,c,1)
327 #define M_MSKBL_IMM(a,b,c) M_OP3 (0x12,0x02, a,b,c,1)
328 #define M_MSKWL_IMM(a,b,c) M_OP3 (0x12,0x12, a,b,c,1)
329 #define M_MSKLL_IMM(a,b,c) M_OP3 (0x12,0x22, a,b,c,1)
330 #define M_MSKQL_IMM(a,b,c) M_OP3 (0x12,0x32, a,b,c,1)
331 #define M_MSKWH_IMM(a,b,c) M_OP3 (0x12,0x52, a,b,c,1)
332 #define M_MSKLH_IMM(a,b,c) M_OP3 (0x12,0x62, a,b,c,1)
333 #define M_MSKQH_IMM(a,b,c) M_OP3 (0x12,0x72, a,b,c,1)
335 #define M_UMULH(a,b,c) M_OP3 (0x13,0x30, a,b,c,0) /* 64 umulh */
337 #define M_UMULH_IMM(a,b,c) M_OP3 (0x13,0x30, a,b,c,1) /* 64 umulh */
339 #define M_CMOVEQ(a,b,c) M_OP3 (0x11,0x24, a,b,c,0) /* a==0 ? c=b */
340 #define M_CMOVNE(a,b,c) M_OP3 (0x11,0x26, a,b,c,0) /* a!=0 ? c=b */
341 #define M_CMOVLT(a,b,c) M_OP3 (0x11,0x44, a,b,c,0) /* a< 0 ? c=b */
342 #define M_CMOVGE(a,b,c) M_OP3 (0x11,0x46, a,b,c,0) /* a>=0 ? c=b */
343 #define M_CMOVLE(a,b,c) M_OP3 (0x11,0x64, a,b,c,0) /* a<=0 ? c=b */
344 #define M_CMOVGT(a,b,c) M_OP3 (0x11,0x66, a,b,c,0) /* a> 0 ? c=b */
346 #define M_CMOVEQ_IMM(a,b,c) M_OP3 (0x11,0x24, a,b,c,1) /* a==0 ? c=b */
347 #define M_CMOVNE_IMM(a,b,c) M_OP3 (0x11,0x26, a,b,c,1) /* a!=0 ? c=b */
348 #define M_CMOVLT_IMM(a,b,c) M_OP3 (0x11,0x44, a,b,c,1) /* a< 0 ? c=b */
349 #define M_CMOVGE_IMM(a,b,c) M_OP3 (0x11,0x46, a,b,c,1) /* a>=0 ? c=b */
350 #define M_CMOVLE_IMM(a,b,c) M_OP3 (0x11,0x64, a,b,c,1) /* a<=0 ? c=b */
351 #define M_CMOVGT_IMM(a,b,c) M_OP3 (0x11,0x66, a,b,c,1) /* a> 0 ? c=b */
353 /* macros for unused commands (see an Alpha-manual for description) ***********/
355 #define M_ANDNOT(a,b,c,const) M_OP3 (0x11,0x08, a,b,c,const) /* c = a &~ b */
356 #define M_ORNOT(a,b,c,const) M_OP3 (0x11,0x28, a,b,c,const) /* c = a |~ b */
357 #define M_XORNOT(a,b,c,const) M_OP3 (0x11,0x48, a,b,c,const) /* c = a ^~ b */
359 #define M_CMPBGE(a,b,c,const) M_OP3 (0x10,0x0f, a,b,c,const)
361 #define M_FCMPUN(a,b,c) M_FOP3 (0x16, 0x0a4, a,b,c) /* unordered */
362 #define M_FCMPLE(a,b,c) M_FOP3 (0x16, 0x0a7, a,b,c) /* c = a<=b */
364 #define M_FCMPUNS(a,b,c) M_FOP3 (0x16, 0x5a4, a,b,c) /* unordered */
365 #define M_FCMPLES(a,b,c) M_FOP3 (0x16, 0x5a7, a,b,c) /* c = a<=b */
367 #define M_FBNEZ(fa,disp) M_BRA (0x35,fa,disp)
368 #define M_FBLEZ(fa,disp) M_BRA (0x33,fa,disp)
370 #define M_JMP_CO(a,b) M_MEM (0x1a,a,b,0xc000) /* call cosub */
373 /* function gen_resolvebranch **************************************************
375 backpatches a branch instruction; Alpha branch instructions are very
376 regular, so it is only necessary to overwrite some fixed bits in the
379 parameters: ip ... pointer to instruction after branch (void*)
380 so ... offset of instruction after branch (s4)
381 to ... offset of branch target (s4)
383 *******************************************************************************/
385 #define gen_resolvebranch(ip,so,to) ((s4*)(ip))[-1]|=((s4)(to)-(so))>>2&0x1fffff
387 #define SOFTNULLPTRCHECK /* soft null pointer check supportet as option */
389 #endif /* _CODEGEN_H */
393 * These are local overrides for various environment variables in Emacs.
394 * Please do not remove this and leave it at the end of the file, where
395 * Emacs will automagically detect them.
396 * ---------------------------------------------------------------------
399 * indent-tabs-mode: t