1 /* vm/jit/alpha/codegen.h - code generation macros and definitions for alpha
3 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
4 Institut f. Computersprachen, TU Wien
5 R. Grafl, A. Krall, C. Kruegel, C. Oates, R. Obermaisser,
6 M. Probst, S. Ring, E. Steiner, C. Thalinger, D. Thuernbeck,
7 P. Tomsich, J. Wenninger
9 This file is part of CACAO.
11 This program is free software; you can redistribute it and/or
12 modify it under the terms of the GNU General Public License as
13 published by the Free Software Foundation; either version 2, or (at
14 your option) any later version.
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
26 Contact: cacao@complang.tuwien.ac.at
28 Authors: Andreas Krall
31 $Id: codegen.h 1624 2004-11-30 14:49:45Z twisti $
42 /* additional functions and macros to generate code ***************************/
44 /* #define BlockPtrOfPC(pc) block+block_index[pc] */
45 #define BlockPtrOfPC(pc) ((basicblock *) iptr->target)
49 #define COUNT_SPILLS count_spills++
55 /* gen_nullptr_check(objreg) */
57 #define gen_nullptr_check(objreg) \
59 M_BEQZ((objreg), 0); \
60 codegen_addxnullrefs(cd, mcodeptr); \
63 #define gen_bound_check \
65 M_ILD(REG_ITMP3, s1, OFFSET(java_arrayheader, size));\
66 M_CMPULT(s2, REG_ITMP3, REG_ITMP3);\
67 M_BEQZ(REG_ITMP3, 0);\
68 codegen_addxboundrefs(cd, mcodeptr, s2); \
72 /* MCODECHECK(icnt) */
74 #define MCODECHECK(icnt) \
75 if ((mcodeptr + (icnt)) > cd->mcodeend) \
76 mcodeptr = codegen_increase(cd, (u1 *) mcodeptr)
79 generates an integer-move from register a to b.
80 if a and b are the same int-register, no code will be generated.
83 #define M_INTMOVE(a,b) if (a != b) { M_MOV(a, b); }
87 generates a floating-point-move from register a to b.
88 if a and b are the same float-register, no code will be generated
91 #define M_FLTMOVE(a,b) if (a != b) { M_FMOV(a, b); }
95 this function generates code to fetch data from a pseudo-register
97 If the pseudo-register has actually been assigned to a real
98 register, no code will be emitted, since following operations
99 can use this register directly.
101 v: pseudoregister to be fetched from
102 tempregnum: temporary register to be used if v is actually spilled to ram
104 return: the register number, where the operand can be found after
105 fetching (this wil be either tempregnum or the register
106 number allready given to v)
109 #define var_to_reg_int(regnr,v,tempnr) { \
110 if ((v)->flags & INMEMORY) { \
112 M_LLD(tempnr, REG_SP, 8 * (v)->regoff); \
114 } else regnr = (v)->regoff; \
118 #define var_to_reg_flt(regnr,v,tempnr) { \
119 if ((v)->flags & INMEMORY) { \
121 M_DLD(tempnr, REG_SP, 8 * (v)->regoff); \
123 } else regnr = (v)->regoff; \
127 /* store_reg_to_var_xxx:
128 This function generates the code to store the result of an operation
129 back into a spilled pseudo-variable.
130 If the pseudo-variable has not been spilled in the first place, this
131 function will generate nothing.
133 v ............ Pseudovariable
134 tempregnum ... Number of the temporary registers as returned by
138 #define store_reg_to_var_int(sptr, tempregnum) { \
139 if ((sptr)->flags & INMEMORY) { \
141 M_LST(tempregnum, REG_SP, 8 * (sptr)->regoff); \
145 #define store_reg_to_var_flt(sptr, tempregnum) { \
146 if ((sptr)->flags & INMEMORY) { \
148 M_DST(tempregnum, REG_SP, 8 * (sptr)->regoff); \
153 #define M_COPY(from,to) \
154 d = reg_of_var(rd, to, REG_IFTMP); \
155 if ((from->regoff != to->regoff) || \
156 ((from->flags ^ to->flags) & INMEMORY)) { \
157 if (IS_FLT_DBL_TYPE(from->type)) { \
158 var_to_reg_flt(s1, from, d); \
160 store_reg_to_var_flt(to, d); \
163 var_to_reg_int(s1, from, d); \
165 store_reg_to_var_int(to, d); \
170 #define ICONST(r,c) \
171 if ((c) >= -32768 && (c) <= 32767) { \
172 M_LDA((r), REG_ZERO, c); \
174 a = dseg_adds4(cd, (c)); \
175 M_ILD((r), REG_PV, a); \
178 #define LCONST(r,c) \
179 if ((c) >= -32768 && (c) <= 32767) { \
180 M_LDA((r), REG_ZERO, (c)); \
182 a = dseg_adds8(cd, (c)); \
183 M_LLD((r), REG_PV, a); \
187 /* macros to create code ******************************************************/
192 /* 3-address-operations: M_OP3
194 fu ..... function-number
195 a ..... register number source 1
196 b ..... register number or constant integer source 2
197 c ..... register number destination
198 const .. switch to use b as constant integer
199 (REG means: use b as register number)
200 (CONST means: use b as constant 8-bit-integer)
202 #define M_OP3(op,fu,a,b,c,const) \
203 *(mcodeptr++) = ( (((s4)(op))<<26)|((a)<<21)|((b)<<(16-3*(const)))| \
204 ((const)<<12)|((fu)<<5)|((c)) )
206 /* 3-address-floating-point-operation: M_FOP3
208 fu .... function-number
209 a,b ... source floating-point registers
210 c ..... destination register
212 #define M_FOP3(op,fu,a,b,c) \
213 *(mcodeptr++) = ( (((s4)(op))<<26)|((a)<<21)|((b)<<16)|((fu)<<5)|(c) )
215 /* branch instructions: M_BRA
217 a ...... register to be tested
218 disp ... relative address to be jumped to (divided by 4)
220 #define M_BRA(op,a,disp) \
221 *(mcodeptr++) = ( (((s4)(op))<<26)|((a)<<21)|((disp)&0x1fffff) )
224 /* memory operations: M_MEM
226 a ...... source/target register for memory access
227 b ...... base register
228 disp ... displacement (16 bit signed) to be added to b
230 #define M_MEM(op,a,b,disp) \
231 *(mcodeptr++) = ( (((s4)(op))<<26)|((a)<<21)|((b)<<16)|((disp)&0xffff) )
234 /* macros for all used commands (see an Alpha-manual for description) *********/
236 #define M_LDA(a,b,disp) M_MEM (0x08,a,b,disp) /* low const */
237 #define M_LDAH(a,b,disp) M_MEM (0x09,a,b,disp) /* high const */
238 #define M_BLDU(a,b,disp) M_MEM (0x0a,a,b,disp) /* 8 load */
239 #define M_SLDU(a,b,disp) M_MEM (0x0c,a,b,disp) /* 16 load */
240 #define M_ILD(a,b,disp) M_MEM (0x28,a,b,disp) /* 32 load */
241 #define M_LLD(a,b,disp) M_MEM (0x29,a,b,disp) /* 64 load */
242 #define M_ALD(a,b,disp) M_MEM (0x29,a,b,disp) /* addr load */
243 #define M_BST(a,b,disp) M_MEM (0x0e,a,b,disp) /* 8 store */
244 #define M_SST(a,b,disp) M_MEM (0x0d,a,b,disp) /* 16 store */
245 #define M_IST(a,b,disp) M_MEM (0x2c,a,b,disp) /* 32 store */
246 #define M_LST(a,b,disp) M_MEM (0x2d,a,b,disp) /* 64 store */
247 #define M_AST(a,b,disp) M_MEM (0x2d,a,b,disp) /* addr store */
249 #define M_BSEXT(b,c) M_OP3 (0x1c,0x0,REG_ZERO,b,c,0) /* 8 signext */
250 #define M_SSEXT(b,c) M_OP3 (0x1c,0x1,REG_ZERO,b,c,0) /* 16 signext */
252 #define M_BR(disp) M_BRA (0x30,REG_ZERO,disp) /* branch */
253 #define M_BSR(ra,disp) M_BRA (0x34,ra,disp) /* branch sbr */
254 #define M_BEQZ(a,disp) M_BRA (0x39,a,disp) /* br a == 0 */
255 #define M_BLTZ(a,disp) M_BRA (0x3a,a,disp) /* br a < 0 */
256 #define M_BLEZ(a,disp) M_BRA (0x3b,a,disp) /* br a <= 0 */
257 #define M_BNEZ(a,disp) M_BRA (0x3d,a,disp) /* br a != 0 */
258 #define M_BGEZ(a,disp) M_BRA (0x3e,a,disp) /* br a >= 0 */
259 #define M_BGTZ(a,disp) M_BRA (0x3f,a,disp) /* br a > 0 */
261 #define M_JMP(a,b) M_MEM (0x1a,a,b,0x0000) /* jump */
262 #define M_JSR(a,b) M_MEM (0x1a,a,b,0x4000) /* call sbr */
263 #define M_RET(a,b) M_MEM (0x1a,a,b,0x8000) /* return */
265 #define M_IADD(a,b,c) M_OP3 (0x10,0x0, a,b,c,0) /* 32 add */
266 #define M_LADD(a,b,c) M_OP3 (0x10,0x20, a,b,c,0) /* 64 add */
267 #define M_ISUB(a,b,c) M_OP3 (0x10,0x09, a,b,c,0) /* 32 sub */
268 #define M_LSUB(a,b,c) M_OP3 (0x10,0x29, a,b,c,0) /* 64 sub */
269 #define M_IMUL(a,b,c) M_OP3 (0x13,0x00, a,b,c,0) /* 32 mul */
270 #define M_LMUL(a,b,c) M_OP3 (0x13,0x20, a,b,c,0) /* 64 mul */
272 #define M_IADD_IMM(a,b,c) M_OP3 (0x10,0x0, a,b,c,1) /* 32 add */
273 #define M_LADD_IMM(a,b,c) M_OP3 (0x10,0x20, a,b,c,1) /* 64 add */
274 #define M_ISUB_IMM(a,b,c) M_OP3 (0x10,0x09, a,b,c,1) /* 32 sub */
275 #define M_LSUB_IMM(a,b,c) M_OP3 (0x10,0x29, a,b,c,1) /* 64 sub */
276 #define M_IMUL_IMM(a,b,c) M_OP3 (0x13,0x00, a,b,c,1) /* 32 mul */
277 #define M_LMUL_IMM(a,b,c) M_OP3 (0x13,0x20, a,b,c,1) /* 64 mul */
279 #define M_CMPEQ(a,b,c) M_OP3 (0x10,0x2d, a,b,c,0) /* c = a == b */
280 #define M_CMPLT(a,b,c) M_OP3 (0x10,0x4d, a,b,c,0) /* c = a < b */
281 #define M_CMPLE(a,b,c) M_OP3 (0x10,0x6d, a,b,c,0) /* c = a <= b */
283 #define M_CMPULE(a,b,c) M_OP3 (0x10,0x3d, a,b,c,0) /* c = a <= b */
284 #define M_CMPULT(a,b,c) M_OP3 (0x10,0x1d, a,b,c,0) /* c = a <= b */
286 #define M_CMPEQ_IMM(a,b,c) M_OP3 (0x10,0x2d, a,b,c,1) /* c = a == b */
287 #define M_CMPLT_IMM(a,b,c) M_OP3 (0x10,0x4d, a,b,c,1) /* c = a < b */
288 #define M_CMPLE_IMM(a,b,c) M_OP3 (0x10,0x6d, a,b,c,1) /* c = a <= b */
290 #define M_CMPULE_IMM(a,b,c) M_OP3 (0x10,0x3d, a,b,c,1) /* c = a <= b */
291 #define M_CMPULT_IMM(a,b,c) M_OP3 (0x10,0x1d, a,b,c,1) /* c = a <= b */
293 #define M_AND(a,b,c) M_OP3 (0x11,0x00, a,b,c,0) /* c = a & b */
294 #define M_OR( a,b,c) M_OP3 (0x11,0x20, a,b,c,0) /* c = a | b */
295 #define M_XOR(a,b,c) M_OP3 (0x11,0x40, a,b,c,0) /* c = a ^ b */
297 #define M_AND_IMM(a,b,c) M_OP3 (0x11,0x00, a,b,c,1) /* c = a & b */
298 #define M_OR_IMM( a,b,c) M_OP3 (0x11,0x20, a,b,c,1) /* c = a | b */
299 #define M_XOR_IMM(a,b,c) M_OP3 (0x11,0x40, a,b,c,1) /* c = a ^ b */
301 #define M_MOV(a,c) M_OR (a,a,c) /* c = a */
302 #define M_CLR(c) M_OR (31,31,c) /* c = 0 */
303 #define M_NOP M_OR (31,31,31) /* ; */
305 #define M_SLL(a,b,c) M_OP3 (0x12,0x39, a,b,c,0) /* c = a << b */
306 #define M_SRA(a,b,c) M_OP3 (0x12,0x3c, a,b,c,0) /* c = a >> b */
307 #define M_SRL(a,b,c) M_OP3 (0x12,0x34, a,b,c,0) /* c = a >>>b */
309 #define M_SLL_IMM(a,b,c) M_OP3 (0x12,0x39, a,b,c,1) /* c = a << b */
310 #define M_SRA_IMM(a,b,c) M_OP3 (0x12,0x3c, a,b,c,1) /* c = a >> b */
311 #define M_SRL_IMM(a,b,c) M_OP3 (0x12,0x34, a,b,c,1) /* c = a >>>b */
313 #define M_FLD(a,b,disp) M_MEM (0x22,a,b,disp) /* load flt */
314 #define M_DLD(a,b,disp) M_MEM (0x23,a,b,disp) /* load dbl */
315 #define M_FST(a,b,disp) M_MEM (0x26,a,b,disp) /* store flt */
316 #define M_DST(a,b,disp) M_MEM (0x27,a,b,disp) /* store dbl */
318 #define M_FADD(a,b,c) M_FOP3 (0x16, 0x080, a,b,c) /* flt add */
319 #define M_DADD(a,b,c) M_FOP3 (0x16, 0x0a0, a,b,c) /* dbl add */
320 #define M_FSUB(a,b,c) M_FOP3 (0x16, 0x081, a,b,c) /* flt sub */
321 #define M_DSUB(a,b,c) M_FOP3 (0x16, 0x0a1, a,b,c) /* dbl sub */
322 #define M_FMUL(a,b,c) M_FOP3 (0x16, 0x082, a,b,c) /* flt mul */
323 #define M_DMUL(a,b,c) M_FOP3 (0x16, 0x0a2, a,b,c) /* dbl mul */
324 #define M_FDIV(a,b,c) M_FOP3 (0x16, 0x083, a,b,c) /* flt div */
325 #define M_DDIV(a,b,c) M_FOP3 (0x16, 0x0a3, a,b,c) /* dbl div */
327 #define M_FADDS(a,b,c) M_FOP3 (0x16, 0x580, a,b,c) /* flt add */
328 #define M_DADDS(a,b,c) M_FOP3 (0x16, 0x5a0, a,b,c) /* dbl add */
329 #define M_FSUBS(a,b,c) M_FOP3 (0x16, 0x581, a,b,c) /* flt sub */
330 #define M_DSUBS(a,b,c) M_FOP3 (0x16, 0x5a1, a,b,c) /* dbl sub */
331 #define M_FMULS(a,b,c) M_FOP3 (0x16, 0x582, a,b,c) /* flt mul */
332 #define M_DMULS(a,b,c) M_FOP3 (0x16, 0x5a2, a,b,c) /* dbl mul */
333 #define M_FDIVS(a,b,c) M_FOP3 (0x16, 0x583, a,b,c) /* flt div */
334 #define M_DDIVS(a,b,c) M_FOP3 (0x16, 0x5a3, a,b,c) /* dbl div */
336 #define M_CVTDF(b,c) M_FOP3 (0x16, 0x0ac, 31,b,c) /* dbl2flt */
337 #define M_CVTLF(b,c) M_FOP3 (0x16, 0x0bc, 31,b,c) /* long2flt */
338 #define M_CVTLD(b,c) M_FOP3 (0x16, 0x0be, 31,b,c) /* long2dbl */
339 #define M_CVTDL(b,c) M_FOP3 (0x16, 0x1af, 31,b,c) /* dbl2long */
340 #define M_CVTDL_C(b,c) M_FOP3 (0x16, 0x12f, 31,b,c) /* dbl2long */
341 #define M_CVTLI(b,c) M_FOP3 (0x17, 0x130, 31,b,c) /* long2int */
343 #define M_CVTDFS(b,c) M_FOP3 (0x16, 0x5ac, 31,b,c) /* dbl2flt */
344 #define M_CVTFDS(b,c) M_FOP3 (0x16, 0x6ac, 31,b,c) /* flt2dbl */
345 #define M_CVTDLS(b,c) M_FOP3 (0x16, 0x5af, 31,b,c) /* dbl2long */
346 #define M_CVTDL_CS(b,c) M_FOP3 (0x16, 0x52f, 31,b,c) /* dbl2long */
347 #define M_CVTLIS(b,c) M_FOP3 (0x17, 0x530, 31,b,c) /* long2int */
349 #define M_FCMPEQ(a,b,c) M_FOP3 (0x16, 0x0a5, a,b,c) /* c = a==b */
350 #define M_FCMPLT(a,b,c) M_FOP3 (0x16, 0x0a6, a,b,c) /* c = a<b */
352 #define M_FCMPEQS(a,b,c) M_FOP3 (0x16, 0x5a5, a,b,c) /* c = a==b */
353 #define M_FCMPLTS(a,b,c) M_FOP3 (0x16, 0x5a6, a,b,c) /* c = a<b */
355 #define M_FMOV(fa,fb) M_FOP3 (0x17, 0x020, fa,fa,fb) /* b = a */
356 #define M_FMOVN(fa,fb) M_FOP3 (0x17, 0x021, fa,fa,fb) /* b = -a */
358 #define M_FNOP M_FMOV (31,31)
360 #define M_FBEQZ(fa,disp) M_BRA (0x31,fa,disp) /* br a == 0.0*/
362 /* macros for special commands (see an Alpha-manual for description) **********/
364 #define M_TRAPB M_MEM (0x18,0,0,0x0000) /* trap barrier*/
366 #define M_S4ADDL(a,b,c) M_OP3 (0x10,0x02, a,b,c,0) /* c = a*4 + b */
367 #define M_S4ADDQ(a,b,c) M_OP3 (0x10,0x22, a,b,c,0) /* c = a*4 + b */
368 #define M_S4SUBL(a,b,c) M_OP3 (0x10,0x0b, a,b,c,0) /* c = a*4 - b */
369 #define M_S4SUBQ(a,b,c) M_OP3 (0x10,0x2b, a,b,c,0) /* c = a*4 - b */
370 #define M_S8ADDL(a,b,c) M_OP3 (0x10,0x12, a,b,c,0) /* c = a*8 + b */
371 #define M_S8ADDQ(a,b,c) M_OP3 (0x10,0x32, a,b,c,0) /* c = a*8 + b */
372 #define M_S8SUBL(a,b,c) M_OP3 (0x10,0x1b, a,b,c,0) /* c = a*8 - b */
373 #define M_S8SUBQ(a,b,c) M_OP3 (0x10,0x3b, a,b,c,0) /* c = a*8 - b */
374 #define M_SAADDQ(a,b,c) M_S8ADDQ(a,b,c) /* c = a*8 + b */
376 #define M_S4ADDL_IMM(a,b,c) M_OP3 (0x10,0x02, a,b,c,1) /* c = a*4 + b */
377 #define M_S4ADDQ_IMM(a,b,c) M_OP3 (0x10,0x22, a,b,c,1) /* c = a*4 + b */
378 #define M_S4SUBL_IMM(a,b,c) M_OP3 (0x10,0x0b, a,b,c,1) /* c = a*4 - b */
379 #define M_S4SUBQ_IMM(a,b,c) M_OP3 (0x10,0x2b, a,b,c,1) /* c = a*4 - b */
380 #define M_S8ADDL_IMM(a,b,c) M_OP3 (0x10,0x12, a,b,c,1) /* c = a*8 + b */
381 #define M_S8ADDQ_IMM(a,b,c) M_OP3 (0x10,0x32, a,b,c,1) /* c = a*8 + b */
382 #define M_S8SUBL_IMM(a,b,c) M_OP3 (0x10,0x1b, a,b,c,1) /* c = a*8 - b */
383 #define M_S8SUBQ_IMM(a,b,c) M_OP3 (0x10,0x3b, a,b,c,1) /* c = a*8 - b */
385 #define M_LLD_U(a,b,disp) M_MEM (0x0b,a,b,disp) /* unalign ld */
386 #define M_LST_U(a,b,disp) M_MEM (0x0f,a,b,disp) /* unalign st */
388 #define M_ZAP(a,b,c) M_OP3 (0x12,0x30, a,b,c,0)
389 #define M_ZAPNOT(a,b,c) M_OP3 (0x12,0x31, a,b,c,0)
391 #define M_ZAP_IMM(a,b,c) M_OP3 (0x12,0x30, a,b,c,1)
392 #define M_ZAPNOT_IMM(a,b,c) M_OP3 (0x12,0x31, a,b,c,1)
394 #define M_BZEXT(a,b) M_ZAPNOT_IMM(a, 0x01, b) /* 8 zeroext */
395 #define M_CZEXT(a,b) M_ZAPNOT_IMM(a, 0x03, b) /* 16 zeroext */
396 #define M_IZEXT(a,b) M_ZAPNOT_IMM(a, 0x0f, b) /* 32 zeroext */
398 #define M_EXTBL(a,b,c) M_OP3 (0x12,0x06, a,b,c,0)
399 #define M_EXTWL(a,b,c) M_OP3 (0x12,0x16, a,b,c,0)
400 #define M_EXTLL(a,b,c) M_OP3 (0x12,0x26, a,b,c,0)
401 #define M_EXTQL(a,b,c) M_OP3 (0x12,0x36, a,b,c,0)
402 #define M_EXTWH(a,b,c) M_OP3 (0x12,0x5a, a,b,c,0)
403 #define M_EXTLH(a,b,c) M_OP3 (0x12,0x6a, a,b,c,0)
404 #define M_EXTQH(a,b,c) M_OP3 (0x12,0x7a, a,b,c,0)
405 #define M_INSBL(a,b,c) M_OP3 (0x12,0x0b, a,b,c,0)
406 #define M_INSWL(a,b,c) M_OP3 (0x12,0x1b, a,b,c,0)
407 #define M_INSLL(a,b,c) M_OP3 (0x12,0x2b, a,b,c,0)
408 #define M_INSQL(a,b,c) M_OP3 (0x12,0x3b, a,b,c,0)
409 #define M_INSWH(a,b,c) M_OP3 (0x12,0x57, a,b,c,0)
410 #define M_INSLH(a,b,c) M_OP3 (0x12,0x67, a,b,c,0)
411 #define M_INSQH(a,b,c) M_OP3 (0x12,0x77, a,b,c,0)
412 #define M_MSKBL(a,b,c) M_OP3 (0x12,0x02, a,b,c,0)
413 #define M_MSKWL(a,b,c) M_OP3 (0x12,0x12, a,b,c,0)
414 #define M_MSKLL(a,b,c) M_OP3 (0x12,0x22, a,b,c,0)
415 #define M_MSKQL(a,b,c) M_OP3 (0x12,0x32, a,b,c,0)
416 #define M_MSKWH(a,b,c) M_OP3 (0x12,0x52, a,b,c,0)
417 #define M_MSKLH(a,b,c) M_OP3 (0x12,0x62, a,b,c,0)
418 #define M_MSKQH(a,b,c) M_OP3 (0x12,0x72, a,b,c,0)
420 #define M_EXTBL_IMM(a,b,c) M_OP3 (0x12,0x06, a,b,c,1)
421 #define M_EXTWL_IMM(a,b,c) M_OP3 (0x12,0x16, a,b,c,1)
422 #define M_EXTLL_IMM(a,b,c) M_OP3 (0x12,0x26, a,b,c,1)
423 #define M_EXTQL_IMM(a,b,c) M_OP3 (0x12,0x36, a,b,c,1)
424 #define M_EXTWH_IMM(a,b,c) M_OP3 (0x12,0x5a, a,b,c,1)
425 #define M_EXTLH_IMM(a,b,c) M_OP3 (0x12,0x6a, a,b,c,1)
426 #define M_EXTQH_IMM(a,b,c) M_OP3 (0x12,0x7a, a,b,c,1)
427 #define M_INSBL_IMM(a,b,c) M_OP3 (0x12,0x0b, a,b,c,1)
428 #define M_INSWL_IMM(a,b,c) M_OP3 (0x12,0x1b, a,b,c,1)
429 #define M_INSLL_IMM(a,b,c) M_OP3 (0x12,0x2b, a,b,c,1)
430 #define M_INSQL_IMM(a,b,c) M_OP3 (0x12,0x3b, a,b,c,1)
431 #define M_INSWH_IMM(a,b,c) M_OP3 (0x12,0x57, a,b,c,1)
432 #define M_INSLH_IMM(a,b,c) M_OP3 (0x12,0x67, a,b,c,1)
433 #define M_INSQH_IMM(a,b,c) M_OP3 (0x12,0x77, a,b,c,1)
434 #define M_MSKBL_IMM(a,b,c) M_OP3 (0x12,0x02, a,b,c,1)
435 #define M_MSKWL_IMM(a,b,c) M_OP3 (0x12,0x12, a,b,c,1)
436 #define M_MSKLL_IMM(a,b,c) M_OP3 (0x12,0x22, a,b,c,1)
437 #define M_MSKQL_IMM(a,b,c) M_OP3 (0x12,0x32, a,b,c,1)
438 #define M_MSKWH_IMM(a,b,c) M_OP3 (0x12,0x52, a,b,c,1)
439 #define M_MSKLH_IMM(a,b,c) M_OP3 (0x12,0x62, a,b,c,1)
440 #define M_MSKQH_IMM(a,b,c) M_OP3 (0x12,0x72, a,b,c,1)
442 #define M_UMULH(a,b,c) M_OP3 (0x13,0x30, a,b,c,0) /* 64 umulh */
444 #define M_UMULH_IMM(a,b,c) M_OP3 (0x13,0x30, a,b,c,1) /* 64 umulh */
446 #define M_CMOVEQ(a,b,c) M_OP3 (0x11,0x24, a,b,c,0) /* a==0 ? c=b */
447 #define M_CMOVNE(a,b,c) M_OP3 (0x11,0x26, a,b,c,0) /* a!=0 ? c=b */
448 #define M_CMOVLT(a,b,c) M_OP3 (0x11,0x44, a,b,c,0) /* a< 0 ? c=b */
449 #define M_CMOVGE(a,b,c) M_OP3 (0x11,0x46, a,b,c,0) /* a>=0 ? c=b */
450 #define M_CMOVLE(a,b,c) M_OP3 (0x11,0x64, a,b,c,0) /* a<=0 ? c=b */
451 #define M_CMOVGT(a,b,c) M_OP3 (0x11,0x66, a,b,c,0) /* a> 0 ? c=b */
453 #define M_CMOVEQ_IMM(a,b,c) M_OP3 (0x11,0x24, a,b,c,1) /* a==0 ? c=b */
454 #define M_CMOVNE_IMM(a,b,c) M_OP3 (0x11,0x26, a,b,c,1) /* a!=0 ? c=b */
455 #define M_CMOVLT_IMM(a,b,c) M_OP3 (0x11,0x44, a,b,c,1) /* a< 0 ? c=b */
456 #define M_CMOVGE_IMM(a,b,c) M_OP3 (0x11,0x46, a,b,c,1) /* a>=0 ? c=b */
457 #define M_CMOVLE_IMM(a,b,c) M_OP3 (0x11,0x64, a,b,c,1) /* a<=0 ? c=b */
458 #define M_CMOVGT_IMM(a,b,c) M_OP3 (0x11,0x66, a,b,c,1) /* a> 0 ? c=b */
460 /* macros for unused commands (see an Alpha-manual for description) ***********/
462 #define M_ANDNOT(a,b,c,const) M_OP3 (0x11,0x08, a,b,c,const) /* c = a &~ b */
463 #define M_ORNOT(a,b,c,const) M_OP3 (0x11,0x28, a,b,c,const) /* c = a |~ b */
464 #define M_XORNOT(a,b,c,const) M_OP3 (0x11,0x48, a,b,c,const) /* c = a ^~ b */
466 #define M_CMPBGE(a,b,c,const) M_OP3 (0x10,0x0f, a,b,c,const)
468 #define M_FCMPUN(a,b,c) M_FOP3 (0x16, 0x0a4, a,b,c) /* unordered */
469 #define M_FCMPLE(a,b,c) M_FOP3 (0x16, 0x0a7, a,b,c) /* c = a<=b */
471 #define M_FCMPUNS(a,b,c) M_FOP3 (0x16, 0x5a4, a,b,c) /* unordered */
472 #define M_FCMPLES(a,b,c) M_FOP3 (0x16, 0x5a7, a,b,c) /* c = a<=b */
474 #define M_FBNEZ(fa,disp) M_BRA (0x35,fa,disp)
475 #define M_FBLEZ(fa,disp) M_BRA (0x33,fa,disp)
477 #define M_JMP_CO(a,b) M_MEM (0x1a,a,b,0xc000) /* call cosub */
480 /* function gen_resolvebranch **************************************************
482 backpatches a branch instruction; Alpha branch instructions are very
483 regular, so it is only necessary to overwrite some fixed bits in the
486 parameters: ip ... pointer to instruction after branch (void*)
487 so ... offset of instruction after branch (s4)
488 to ... offset of branch target (s4)
490 *******************************************************************************/
492 #define gen_resolvebranch(ip,so,to) \
493 ((s4 *) (ip))[-1] |= ((s4) (to) - (so)) >> 2 & 0x1fffff
496 /* function prototypes */
498 void thread_restartcriticalsection(ucontext_t*);
500 #endif /* _CODEGEN_H */
504 * These are local overrides for various environment variables in Emacs.
505 * Please do not remove this and leave it at the end of the file, where
506 * Emacs will automagically detect them.
507 * ---------------------------------------------------------------------
510 * indent-tabs-mode: t