1 /* vm/jit/alpha/codegen.h - code generation macros and definitions for Alpha
3 Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
4 C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
5 E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
6 J. Wenninger, Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 Contact: cacao@cacaojvm.org
27 Authors: Andreas Krall
30 Changes: Christian Thalinger
32 $Id: codegen.h 4626 2006-03-16 12:03:47Z twisti $
43 #include "vm/jit/jit.h"
46 /* additional functions and macros to generate code ***************************/
48 /* gen_nullptr_check(objreg) */
50 #define gen_nullptr_check(objreg) \
52 M_BEQZ((objreg), 0); \
53 codegen_add_nullpointerexception_ref(cd, mcodeptr); \
56 #define gen_bound_check \
58 M_ILD(REG_ITMP3, s1, OFFSET(java_arrayheader, size));\
59 M_CMPULT(s2, REG_ITMP3, REG_ITMP3);\
60 M_BEQZ(REG_ITMP3, 0);\
61 codegen_add_arrayindexoutofboundsexception_ref(cd, mcodeptr, s2); \
65 /* MCODECHECK(icnt) */
67 #define MCODECHECK(icnt) \
68 if ((mcodeptr + (icnt)) > cd->mcodeend) \
69 mcodeptr = codegen_increase(cd, (u1 *) mcodeptr)
72 #define ALIGNCODENOP \
73 if ((s4) ((ptrint) mcodeptr & 7)) { \
79 generates an integer-move from register a to b.
80 if a and b are the same int-register, no code will be generated.
83 #define M_INTMOVE(a,b) if (a != b) { M_MOV(a, b); }
87 generates a floating-point-move from register a to b.
88 if a and b are the same float-register, no code will be generated
91 #define M_FLTMOVE(a,b) if (a != b) { M_FMOV(a, b); }
94 /* var_to_reg_xxx **************************************************************
96 This function generates code to fetch data from a pseudo-register
97 into a real register. If the pseudo-register has actually been
98 assigned to a real register, no code will be emitted, since
99 following operations can use this register directly.
101 v: pseudoregister to be fetched from
102 tempregnum: temporary register to be used if v is actually spilled to ram
104 return: the register number, where the operand can be found after
105 fetching (this wil be either tempregnum or the register
106 number allready given to v)
108 *******************************************************************************/
110 #define var_to_reg_int(regnr,v,tempnr) \
112 if ((v)->flags & INMEMORY) { \
114 M_LLD(tempnr, REG_SP, (v)->regoff * 8); \
117 regnr = (v)->regoff; \
122 #define var_to_reg_flt(regnr,v,tempnr) \
124 if ((v)->flags & INMEMORY) { \
126 M_DLD(tempnr, REG_SP, (v)->regoff * 8); \
129 regnr = (v)->regoff; \
134 /* store_reg_to_var_xxx ********************************************************
136 This function generates the code to store the result of an
137 operation back into a spilled pseudo-variable. If the
138 pseudo-variable has not been spilled in the first place, this
139 function will generate nothing.
141 v ............ Pseudovariable
142 tempregnum ... Number of the temporary registers as returned by
145 *******************************************************************************/
147 #define store_reg_to_var_int(sptr, tempregnum) \
149 if ((sptr)->flags & INMEMORY) { \
151 M_LST(tempregnum, REG_SP, (sptr)->regoff * 8); \
155 #define store_reg_to_var_flt(sptr, tempregnum) \
157 if ((sptr)->flags & INMEMORY) { \
159 M_DST(tempregnum, REG_SP, (sptr)->regoff * 8); \
164 #define M_COPY(from,to) \
165 d = reg_of_var(rd, to, REG_IFTMP); \
166 if ((from->regoff != to->regoff) || \
167 ((from->flags ^ to->flags) & INMEMORY)) { \
168 if (IS_FLT_DBL_TYPE(from->type)) { \
169 var_to_reg_flt(s1, from, d); \
171 store_reg_to_var_flt(to, d); \
173 var_to_reg_int(s1, from, d); \
175 store_reg_to_var_int(to, d); \
179 #define ICONST(r,c) \
180 if ((c) >= -32768 && (c) <= 32767) { \
181 M_LDA_INTERN((r), REG_ZERO, c); \
183 disp = dseg_adds4(cd, (c)); \
184 M_ILD((r), REG_PV, disp); \
187 #define LCONST(r,c) \
188 if ((c) >= -32768 && (c) <= 32767) { \
189 M_LDA_INTERN((r), REG_ZERO, (c)); \
191 disp = dseg_adds8(cd, (c)); \
192 M_LLD((r), REG_PV, disp); \
196 /* macros to create code ******************************************************/
201 /* 3-address-operations: M_OP3
203 fu ..... function-number
204 a ..... register number source 1
205 b ..... register number or constant integer source 2
206 c ..... register number destination
207 const .. switch to use b as constant integer
208 (REG means: use b as register number)
209 (CONST means: use b as constant 8-bit-integer)
212 #define M_OP3(op,fu,a,b,c,const) \
213 *(mcodeptr++) = ((((s4) (op)) << 26) | ((a) << 21) | ((b) << (16 - 3 * (const))) | ((const) << 12) | ((fu) << 5) | ((c)))
216 /* 3-address-floating-point-operation: M_FOP3
218 fu .... function-number
219 a,b ... source floating-point registers
220 c ..... destination register
223 #define M_FOP3(op,fu,a,b,c) \
224 *(mcodeptr++) = ((((s4) (op)) << 26) | ((a) << 21) | ((b) << 16) | ((fu) << 5) | (c))
227 /* branch instructions: M_BRA
229 a ...... register to be tested
230 disp ... relative address to be jumped to (divided by 4)
233 #define M_BRA(op,a,disp) \
234 *(mcodeptr++) = ((((s4) (op)) << 26) | ((a) << 21) | ((disp) & 0x1fffff))
237 /* memory operations: M_MEM
239 a ...... source/target register for memory access
240 b ...... base register
241 disp ... displacement (16 bit signed) to be added to b
244 #define M_MEM(op,a,b,disp) \
245 *(mcodeptr++) = ((((s4) (op)) << 26) | ((a) << 21) | ((b) << 16) | ((disp) & 0xffff))
248 /* macros for all used commands (see an Alpha-manual for description) *********/
250 #define M_LDA_INTERN(a,b,disp) M_MEM(0x08,a,b,disp) /* low const */
252 #define M_LDA(a,b,disp) \
254 s4 lo = (short) (disp); \
255 s4 hi = (short) (((disp) - lo) >> 16); \
257 M_LDA_INTERN(a,b,lo); \
260 M_LDA_INTERN(a,a,lo); \
264 #define M_LDAH(a,b,disp) M_MEM (0x09,a,b,disp) /* high const */
266 #define M_BLDU(a,b,disp) M_MEM (0x0a,a,b,disp) /* 8 load */
267 #define M_SLDU(a,b,disp) M_MEM (0x0c,a,b,disp) /* 16 load */
269 #define M_ILD_INTERN(a,b,disp) M_MEM(0x28,a,b,disp) /* 32 load */
270 #define M_LLD_INTERN(a,b,disp) M_MEM(0x29,a,b,disp) /* 64 load */
272 #define M_ILD(a,b,disp) \
274 s4 lo = (short) (disp); \
275 s4 hi = (short) (((disp) - lo) >> 16); \
277 M_ILD_INTERN(a,b,lo); \
280 M_ILD_INTERN(a,a,lo); \
284 #define M_LLD(a,b,disp) \
286 s4 lo = (short) (disp); \
287 s4 hi = (short) (((disp) - lo) >> 16); \
289 M_LLD_INTERN(a,b,lo); \
292 M_LLD_INTERN(a,a,lo); \
296 #define M_ALD(a,b,disp) M_LLD(a,b,disp) /* addr load */
298 #define M_BST(a,b,disp) M_MEM(0x0e,a,b,disp) /* 8 store */
299 #define M_SST(a,b,disp) M_MEM(0x0d,a,b,disp) /* 16 store */
301 #define M_IST_INTERN(a,b,disp) M_MEM(0x2c,a,b,disp) /* 32 store */
302 #define M_LST_INTERN(a,b,disp) M_MEM(0x2d,a,b,disp) /* 64 store */
304 /* Stores with displacement overflow should only happen with PUTFIELD or on */
305 /* the stack. The PUTFIELD instruction does not use REG_ITMP3 and a */
306 /* reg_of_var call should not use REG_ITMP3!!! */
308 #define M_IST(a,b,disp) \
310 s4 lo = (short) (disp); \
311 s4 hi = (short) (((disp) - lo) >> 16); \
313 M_IST_INTERN(a,b,lo); \
315 M_LDAH(REG_ITMP3,b,hi); \
316 M_IST_INTERN(a,REG_ITMP3,lo); \
320 #define M_LST(a,b,disp) \
322 s4 lo = (short) (disp); \
323 s4 hi = (short) (((disp) - lo) >> 16); \
325 M_LST_INTERN(a,b,lo); \
327 M_LDAH(REG_ITMP3,b,hi); \
328 M_LST_INTERN(a,REG_ITMP3,lo); \
332 #define M_AST(a,b,disp) M_LST(a,b,disp) /* addr store */
334 #define M_BSEXT(b,c) M_OP3 (0x1c,0x0,REG_ZERO,b,c,0) /* 8 signext */
335 #define M_SSEXT(b,c) M_OP3 (0x1c,0x1,REG_ZERO,b,c,0) /* 16 signext */
337 #define M_BR(disp) M_BRA (0x30,REG_ZERO,disp) /* branch */
338 #define M_BSR(ra,disp) M_BRA (0x34,ra,disp) /* branch sbr */
339 #define M_BEQZ(a,disp) M_BRA (0x39,a,disp) /* br a == 0 */
340 #define M_BLTZ(a,disp) M_BRA (0x3a,a,disp) /* br a < 0 */
341 #define M_BLEZ(a,disp) M_BRA (0x3b,a,disp) /* br a <= 0 */
342 #define M_BNEZ(a,disp) M_BRA (0x3d,a,disp) /* br a != 0 */
343 #define M_BGEZ(a,disp) M_BRA (0x3e,a,disp) /* br a >= 0 */
344 #define M_BGTZ(a,disp) M_BRA (0x3f,a,disp) /* br a > 0 */
346 #define M_JMP(a,b) M_MEM (0x1a,a,b,0x0000) /* jump */
347 #define M_JSR(a,b) M_MEM (0x1a,a,b,0x4000) /* call sbr */
348 #define M_RET(a,b) M_MEM (0x1a,a,b,0x8000) /* return */
350 #define M_IADD(a,b,c) M_OP3 (0x10,0x0, a,b,c,0) /* 32 add */
351 #define M_LADD(a,b,c) M_OP3 (0x10,0x20, a,b,c,0) /* 64 add */
352 #define M_ISUB(a,b,c) M_OP3 (0x10,0x09, a,b,c,0) /* 32 sub */
353 #define M_LSUB(a,b,c) M_OP3 (0x10,0x29, a,b,c,0) /* 64 sub */
354 #define M_IMUL(a,b,c) M_OP3 (0x13,0x00, a,b,c,0) /* 32 mul */
355 #define M_LMUL(a,b,c) M_OP3 (0x13,0x20, a,b,c,0) /* 64 mul */
357 #define M_IADD_IMM(a,b,c) M_OP3 (0x10,0x0, a,b,c,1) /* 32 add */
358 #define M_LADD_IMM(a,b,c) M_OP3 (0x10,0x20, a,b,c,1) /* 64 add */
359 #define M_ISUB_IMM(a,b,c) M_OP3 (0x10,0x09, a,b,c,1) /* 32 sub */
360 #define M_LSUB_IMM(a,b,c) M_OP3 (0x10,0x29, a,b,c,1) /* 64 sub */
361 #define M_IMUL_IMM(a,b,c) M_OP3 (0x13,0x00, a,b,c,1) /* 32 mul */
362 #define M_LMUL_IMM(a,b,c) M_OP3 (0x13,0x20, a,b,c,1) /* 64 mul */
364 #define M_AADD_IMM(a,b,c) M_LADD_IMM(a,b,c)
366 #define M_CMPEQ(a,b,c) M_OP3 (0x10,0x2d, a,b,c,0) /* c = a == b */
367 #define M_CMPLT(a,b,c) M_OP3 (0x10,0x4d, a,b,c,0) /* c = a < b */
368 #define M_CMPLE(a,b,c) M_OP3 (0x10,0x6d, a,b,c,0) /* c = a <= b */
370 #define M_CMPULE(a,b,c) M_OP3 (0x10,0x3d, a,b,c,0) /* c = a <= b */
371 #define M_CMPULT(a,b,c) M_OP3 (0x10,0x1d, a,b,c,0) /* c = a <= b */
373 #define M_CMPEQ_IMM(a,b,c) M_OP3 (0x10,0x2d, a,b,c,1) /* c = a == b */
374 #define M_CMPLT_IMM(a,b,c) M_OP3 (0x10,0x4d, a,b,c,1) /* c = a < b */
375 #define M_CMPLE_IMM(a,b,c) M_OP3 (0x10,0x6d, a,b,c,1) /* c = a <= b */
377 #define M_CMPULE_IMM(a,b,c) M_OP3 (0x10,0x3d, a,b,c,1) /* c = a <= b */
378 #define M_CMPULT_IMM(a,b,c) M_OP3 (0x10,0x1d, a,b,c,1) /* c = a <= b */
380 #define M_AND(a,b,c) M_OP3 (0x11,0x00, a,b,c,0) /* c = a & b */
381 #define M_OR( a,b,c) M_OP3 (0x11,0x20, a,b,c,0) /* c = a | b */
382 #define M_XOR(a,b,c) M_OP3 (0x11,0x40, a,b,c,0) /* c = a ^ b */
384 #define M_AND_IMM(a,b,c) M_OP3 (0x11,0x00, a,b,c,1) /* c = a & b */
385 #define M_OR_IMM( a,b,c) M_OP3 (0x11,0x20, a,b,c,1) /* c = a | b */
386 #define M_XOR_IMM(a,b,c) M_OP3 (0x11,0x40, a,b,c,1) /* c = a ^ b */
388 #define M_MOV(a,c) M_OR (a,a,c) /* c = a */
389 #define M_CLR(c) M_OR (31,31,c) /* c = 0 */
390 #define M_NOP M_OR (31,31,31) /* ; */
392 #define M_SLL(a,b,c) M_OP3 (0x12,0x39, a,b,c,0) /* c = a << b */
393 #define M_SRA(a,b,c) M_OP3 (0x12,0x3c, a,b,c,0) /* c = a >> b */
394 #define M_SRL(a,b,c) M_OP3 (0x12,0x34, a,b,c,0) /* c = a >>>b */
396 #define M_SLL_IMM(a,b,c) M_OP3 (0x12,0x39, a,b,c,1) /* c = a << b */
397 #define M_SRA_IMM(a,b,c) M_OP3 (0x12,0x3c, a,b,c,1) /* c = a >> b */
398 #define M_SRL_IMM(a,b,c) M_OP3 (0x12,0x34, a,b,c,1) /* c = a >>>b */
400 #define M_FLD_INTERN(a,b,disp) M_MEM(0x22,a,b,disp) /* load flt */
401 #define M_DLD_INTERN(a,b,disp) M_MEM(0x23,a,b,disp) /* load dbl */
403 #define M_FLD(a,b,disp) \
405 s4 lo = (short) (disp); \
406 s4 hi = (short) (((disp) - lo) >> 16); \
408 M_FLD_INTERN(a,b,lo); \
410 M_LDAH(REG_ITMP3,b,hi); \
411 M_FLD_INTERN(a,REG_ITMP3,lo); \
415 #define M_DLD(a,b,disp) \
417 s4 lo = (short) (disp); \
418 s4 hi = (short) (((disp) - lo) >> 16); \
420 M_DLD_INTERN(a,b,lo); \
422 M_LDAH(REG_ITMP3,b,hi); \
423 M_DLD_INTERN(a,REG_ITMP3,lo); \
427 #define M_FST_INTERN(a,b,disp) M_MEM(0x26,a,b,disp) /* store flt */
428 #define M_DST_INTERN(a,b,disp) M_MEM(0x27,a,b,disp) /* store dbl */
430 /* Stores with displacement overflow should only happen with PUTFIELD or on */
431 /* the stack. The PUTFIELD instruction does not use REG_ITMP3 and a */
432 /* reg_of_var call should not use REG_ITMP3!!! */
434 #define M_FST(a,b,disp) \
436 s4 lo = (short) (disp); \
437 s4 hi = (short) (((disp) - lo) >> 16); \
439 M_FST_INTERN(a,b,lo); \
441 M_LDAH(REG_ITMP3,b,hi); \
442 M_FST_INTERN(a,REG_ITMP3,lo); \
446 #define M_DST(a,b,disp) \
448 s4 lo = (short) (disp); \
449 s4 hi = (short) (((disp) - lo) >> 16); \
451 M_DST_INTERN(a,b,lo); \
453 M_LDAH(REG_ITMP3,b,hi); \
454 M_DST_INTERN(a,REG_ITMP3,lo); \
459 #define M_FADD(a,b,c) M_FOP3 (0x16, 0x080, a,b,c) /* flt add */
460 #define M_DADD(a,b,c) M_FOP3 (0x16, 0x0a0, a,b,c) /* dbl add */
461 #define M_FSUB(a,b,c) M_FOP3 (0x16, 0x081, a,b,c) /* flt sub */
462 #define M_DSUB(a,b,c) M_FOP3 (0x16, 0x0a1, a,b,c) /* dbl sub */
463 #define M_FMUL(a,b,c) M_FOP3 (0x16, 0x082, a,b,c) /* flt mul */
464 #define M_DMUL(a,b,c) M_FOP3 (0x16, 0x0a2, a,b,c) /* dbl mul */
465 #define M_FDIV(a,b,c) M_FOP3 (0x16, 0x083, a,b,c) /* flt div */
466 #define M_DDIV(a,b,c) M_FOP3 (0x16, 0x0a3, a,b,c) /* dbl div */
468 #define M_FADDS(a,b,c) M_FOP3 (0x16, 0x580, a,b,c) /* flt add */
469 #define M_DADDS(a,b,c) M_FOP3 (0x16, 0x5a0, a,b,c) /* dbl add */
470 #define M_FSUBS(a,b,c) M_FOP3 (0x16, 0x581, a,b,c) /* flt sub */
471 #define M_DSUBS(a,b,c) M_FOP3 (0x16, 0x5a1, a,b,c) /* dbl sub */
472 #define M_FMULS(a,b,c) M_FOP3 (0x16, 0x582, a,b,c) /* flt mul */
473 #define M_DMULS(a,b,c) M_FOP3 (0x16, 0x5a2, a,b,c) /* dbl mul */
474 #define M_FDIVS(a,b,c) M_FOP3 (0x16, 0x583, a,b,c) /* flt div */
475 #define M_DDIVS(a,b,c) M_FOP3 (0x16, 0x5a3, a,b,c) /* dbl div */
477 #define M_CVTDF(b,c) M_FOP3 (0x16, 0x0ac, 31,b,c) /* dbl2flt */
478 #define M_CVTLF(b,c) M_FOP3 (0x16, 0x0bc, 31,b,c) /* long2flt */
479 #define M_CVTLD(b,c) M_FOP3 (0x16, 0x0be, 31,b,c) /* long2dbl */
480 #define M_CVTDL(b,c) M_FOP3 (0x16, 0x1af, 31,b,c) /* dbl2long */
481 #define M_CVTDL_C(b,c) M_FOP3 (0x16, 0x12f, 31,b,c) /* dbl2long */
482 #define M_CVTLI(b,c) M_FOP3 (0x17, 0x130, 31,b,c) /* long2int */
484 #define M_CVTDFS(b,c) M_FOP3 (0x16, 0x5ac, 31,b,c) /* dbl2flt */
485 #define M_CVTFDS(b,c) M_FOP3 (0x16, 0x6ac, 31,b,c) /* flt2dbl */
486 #define M_CVTDLS(b,c) M_FOP3 (0x16, 0x5af, 31,b,c) /* dbl2long */
487 #define M_CVTDL_CS(b,c) M_FOP3 (0x16, 0x52f, 31,b,c) /* dbl2long */
488 #define M_CVTLIS(b,c) M_FOP3 (0x17, 0x530, 31,b,c) /* long2int */
490 #define M_FCMPEQ(a,b,c) M_FOP3 (0x16, 0x0a5, a,b,c) /* c = a==b */
491 #define M_FCMPLT(a,b,c) M_FOP3 (0x16, 0x0a6, a,b,c) /* c = a<b */
493 #define M_FCMPEQS(a,b,c) M_FOP3 (0x16, 0x5a5, a,b,c) /* c = a==b */
494 #define M_FCMPLTS(a,b,c) M_FOP3 (0x16, 0x5a6, a,b,c) /* c = a<b */
496 #define M_FMOV(fa,fb) M_FOP3 (0x17, 0x020, fa,fa,fb) /* b = a */
497 #define M_FMOVN(fa,fb) M_FOP3 (0x17, 0x021, fa,fa,fb) /* b = -a */
499 #define M_FNOP M_FMOV (31,31)
501 #define M_FBEQZ(fa,disp) M_BRA (0x31,fa,disp) /* br a == 0.0*/
503 /* macros for special commands (see an Alpha-manual for description) **********/
505 #define M_TRAPB M_MEM (0x18,0,0,0x0000) /* trap barrier*/
507 #define M_S4ADDL(a,b,c) M_OP3 (0x10,0x02, a,b,c,0) /* c = a*4 + b */
508 #define M_S4ADDQ(a,b,c) M_OP3 (0x10,0x22, a,b,c,0) /* c = a*4 + b */
509 #define M_S4SUBL(a,b,c) M_OP3 (0x10,0x0b, a,b,c,0) /* c = a*4 - b */
510 #define M_S4SUBQ(a,b,c) M_OP3 (0x10,0x2b, a,b,c,0) /* c = a*4 - b */
511 #define M_S8ADDL(a,b,c) M_OP3 (0x10,0x12, a,b,c,0) /* c = a*8 + b */
512 #define M_S8ADDQ(a,b,c) M_OP3 (0x10,0x32, a,b,c,0) /* c = a*8 + b */
513 #define M_S8SUBL(a,b,c) M_OP3 (0x10,0x1b, a,b,c,0) /* c = a*8 - b */
514 #define M_S8SUBQ(a,b,c) M_OP3 (0x10,0x3b, a,b,c,0) /* c = a*8 - b */
515 #define M_SAADDQ(a,b,c) M_S8ADDQ(a,b,c) /* c = a*8 + b */
517 #define M_S4ADDL_IMM(a,b,c) M_OP3 (0x10,0x02, a,b,c,1) /* c = a*4 + b */
518 #define M_S4ADDQ_IMM(a,b,c) M_OP3 (0x10,0x22, a,b,c,1) /* c = a*4 + b */
519 #define M_S4SUBL_IMM(a,b,c) M_OP3 (0x10,0x0b, a,b,c,1) /* c = a*4 - b */
520 #define M_S4SUBQ_IMM(a,b,c) M_OP3 (0x10,0x2b, a,b,c,1) /* c = a*4 - b */
521 #define M_S8ADDL_IMM(a,b,c) M_OP3 (0x10,0x12, a,b,c,1) /* c = a*8 + b */
522 #define M_S8ADDQ_IMM(a,b,c) M_OP3 (0x10,0x32, a,b,c,1) /* c = a*8 + b */
523 #define M_S8SUBL_IMM(a,b,c) M_OP3 (0x10,0x1b, a,b,c,1) /* c = a*8 - b */
524 #define M_S8SUBQ_IMM(a,b,c) M_OP3 (0x10,0x3b, a,b,c,1) /* c = a*8 - b */
526 #define M_LLD_U(a,b,disp) M_MEM (0x0b,a,b,disp) /* unalign ld */
527 #define M_LST_U(a,b,disp) M_MEM (0x0f,a,b,disp) /* unalign st */
529 #define M_ZAP(a,b,c) M_OP3 (0x12,0x30, a,b,c,0)
530 #define M_ZAPNOT(a,b,c) M_OP3 (0x12,0x31, a,b,c,0)
532 #define M_ZAP_IMM(a,b,c) M_OP3 (0x12,0x30, a,b,c,1)
533 #define M_ZAPNOT_IMM(a,b,c) M_OP3 (0x12,0x31, a,b,c,1)
535 #define M_BZEXT(a,b) M_ZAPNOT_IMM(a, 0x01, b) /* 8 zeroext */
536 #define M_CZEXT(a,b) M_ZAPNOT_IMM(a, 0x03, b) /* 16 zeroext */
537 #define M_IZEXT(a,b) M_ZAPNOT_IMM(a, 0x0f, b) /* 32 zeroext */
539 #define M_EXTBL(a,b,c) M_OP3 (0x12,0x06, a,b,c,0)
540 #define M_EXTWL(a,b,c) M_OP3 (0x12,0x16, a,b,c,0)
541 #define M_EXTLL(a,b,c) M_OP3 (0x12,0x26, a,b,c,0)
542 #define M_EXTQL(a,b,c) M_OP3 (0x12,0x36, a,b,c,0)
543 #define M_EXTWH(a,b,c) M_OP3 (0x12,0x5a, a,b,c,0)
544 #define M_EXTLH(a,b,c) M_OP3 (0x12,0x6a, a,b,c,0)
545 #define M_EXTQH(a,b,c) M_OP3 (0x12,0x7a, a,b,c,0)
546 #define M_INSBL(a,b,c) M_OP3 (0x12,0x0b, a,b,c,0)
547 #define M_INSWL(a,b,c) M_OP3 (0x12,0x1b, a,b,c,0)
548 #define M_INSLL(a,b,c) M_OP3 (0x12,0x2b, a,b,c,0)
549 #define M_INSQL(a,b,c) M_OP3 (0x12,0x3b, a,b,c,0)
550 #define M_INSWH(a,b,c) M_OP3 (0x12,0x57, a,b,c,0)
551 #define M_INSLH(a,b,c) M_OP3 (0x12,0x67, a,b,c,0)
552 #define M_INSQH(a,b,c) M_OP3 (0x12,0x77, a,b,c,0)
553 #define M_MSKBL(a,b,c) M_OP3 (0x12,0x02, a,b,c,0)
554 #define M_MSKWL(a,b,c) M_OP3 (0x12,0x12, a,b,c,0)
555 #define M_MSKLL(a,b,c) M_OP3 (0x12,0x22, a,b,c,0)
556 #define M_MSKQL(a,b,c) M_OP3 (0x12,0x32, a,b,c,0)
557 #define M_MSKWH(a,b,c) M_OP3 (0x12,0x52, a,b,c,0)
558 #define M_MSKLH(a,b,c) M_OP3 (0x12,0x62, a,b,c,0)
559 #define M_MSKQH(a,b,c) M_OP3 (0x12,0x72, a,b,c,0)
561 #define M_EXTBL_IMM(a,b,c) M_OP3 (0x12,0x06, a,b,c,1)
562 #define M_EXTWL_IMM(a,b,c) M_OP3 (0x12,0x16, a,b,c,1)
563 #define M_EXTLL_IMM(a,b,c) M_OP3 (0x12,0x26, a,b,c,1)
564 #define M_EXTQL_IMM(a,b,c) M_OP3 (0x12,0x36, a,b,c,1)
565 #define M_EXTWH_IMM(a,b,c) M_OP3 (0x12,0x5a, a,b,c,1)
566 #define M_EXTLH_IMM(a,b,c) M_OP3 (0x12,0x6a, a,b,c,1)
567 #define M_EXTQH_IMM(a,b,c) M_OP3 (0x12,0x7a, a,b,c,1)
568 #define M_INSBL_IMM(a,b,c) M_OP3 (0x12,0x0b, a,b,c,1)
569 #define M_INSWL_IMM(a,b,c) M_OP3 (0x12,0x1b, a,b,c,1)
570 #define M_INSLL_IMM(a,b,c) M_OP3 (0x12,0x2b, a,b,c,1)
571 #define M_INSQL_IMM(a,b,c) M_OP3 (0x12,0x3b, a,b,c,1)
572 #define M_INSWH_IMM(a,b,c) M_OP3 (0x12,0x57, a,b,c,1)
573 #define M_INSLH_IMM(a,b,c) M_OP3 (0x12,0x67, a,b,c,1)
574 #define M_INSQH_IMM(a,b,c) M_OP3 (0x12,0x77, a,b,c,1)
575 #define M_MSKBL_IMM(a,b,c) M_OP3 (0x12,0x02, a,b,c,1)
576 #define M_MSKWL_IMM(a,b,c) M_OP3 (0x12,0x12, a,b,c,1)
577 #define M_MSKLL_IMM(a,b,c) M_OP3 (0x12,0x22, a,b,c,1)
578 #define M_MSKQL_IMM(a,b,c) M_OP3 (0x12,0x32, a,b,c,1)
579 #define M_MSKWH_IMM(a,b,c) M_OP3 (0x12,0x52, a,b,c,1)
580 #define M_MSKLH_IMM(a,b,c) M_OP3 (0x12,0x62, a,b,c,1)
581 #define M_MSKQH_IMM(a,b,c) M_OP3 (0x12,0x72, a,b,c,1)
583 #define M_UMULH(a,b,c) M_OP3 (0x13,0x30, a,b,c,0) /* 64 umulh */
585 #define M_UMULH_IMM(a,b,c) M_OP3 (0x13,0x30, a,b,c,1) /* 64 umulh */
587 #define M_CMOVEQ(a,b,c) M_OP3 (0x11,0x24, a,b,c,0) /* a==0 ? c=b */
588 #define M_CMOVNE(a,b,c) M_OP3 (0x11,0x26, a,b,c,0) /* a!=0 ? c=b */
589 #define M_CMOVLT(a,b,c) M_OP3 (0x11,0x44, a,b,c,0) /* a< 0 ? c=b */
590 #define M_CMOVGE(a,b,c) M_OP3 (0x11,0x46, a,b,c,0) /* a>=0 ? c=b */
591 #define M_CMOVLE(a,b,c) M_OP3 (0x11,0x64, a,b,c,0) /* a<=0 ? c=b */
592 #define M_CMOVGT(a,b,c) M_OP3 (0x11,0x66, a,b,c,0) /* a> 0 ? c=b */
594 #define M_CMOVEQ_IMM(a,b,c) M_OP3 (0x11,0x24, a,b,c,1) /* a==0 ? c=b */
595 #define M_CMOVNE_IMM(a,b,c) M_OP3 (0x11,0x26, a,b,c,1) /* a!=0 ? c=b */
596 #define M_CMOVLT_IMM(a,b,c) M_OP3 (0x11,0x44, a,b,c,1) /* a< 0 ? c=b */
597 #define M_CMOVGE_IMM(a,b,c) M_OP3 (0x11,0x46, a,b,c,1) /* a>=0 ? c=b */
598 #define M_CMOVLE_IMM(a,b,c) M_OP3 (0x11,0x64, a,b,c,1) /* a<=0 ? c=b */
599 #define M_CMOVGT_IMM(a,b,c) M_OP3 (0x11,0x66, a,b,c,1) /* a> 0 ? c=b */
601 /* macros for unused commands (see an Alpha-manual for description) ***********/
603 #define M_ANDNOT(a,b,c,const) M_OP3 (0x11,0x08, a,b,c,const) /* c = a &~ b */
604 #define M_ORNOT(a,b,c,const) M_OP3 (0x11,0x28, a,b,c,const) /* c = a |~ b */
605 #define M_XORNOT(a,b,c,const) M_OP3 (0x11,0x48, a,b,c,const) /* c = a ^~ b */
607 #define M_CMPBGE(a,b,c,const) M_OP3 (0x10,0x0f, a,b,c,const)
609 #define M_FCMPUN(a,b,c) M_FOP3 (0x16, 0x0a4, a,b,c) /* unordered */
610 #define M_FCMPLE(a,b,c) M_FOP3 (0x16, 0x0a7, a,b,c) /* c = a<=b */
612 #define M_FCMPUNS(a,b,c) M_FOP3 (0x16, 0x5a4, a,b,c) /* unordered */
613 #define M_FCMPLES(a,b,c) M_FOP3 (0x16, 0x5a7, a,b,c) /* c = a<=b */
615 #define M_FBNEZ(fa,disp) M_BRA (0x35,fa,disp)
616 #define M_FBLEZ(fa,disp) M_BRA (0x33,fa,disp)
618 #define M_JMP_CO(a,b) M_MEM (0x1a,a,b,0xc000) /* call cosub */
621 /* gen_resolvebranch ***********************************************************
623 backpatches a branch instruction; Alpha branch instructions are very
624 regular, so it is only necessary to overwrite some fixed bits in the
627 parameters: ip ... pointer to instruction after branch (void*)
628 so ... offset of instruction after branch (s4)
629 to ... offset of branch target (s4)
631 *******************************************************************************/
633 #define gen_resolvebranch(ip,so,to) \
634 ((s4 *) (ip))[-1] |= ((s4) (to) - (so)) >> 2 & 0x1fffff
636 #endif /* _CODEGEN_H */
640 * These are local overrides for various environment variables in Emacs.
641 * Please do not remove this and leave it at the end of the file, where
642 * Emacs will automagically detect them.
643 * ---------------------------------------------------------------------
646 * indent-tabs-mode: t