1 /* vm/jit/x86_64/codegen.h - code generation macros and definitions for x86_64
3 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
4 R. Grafl, A. Krall, C. Kruegel, C. Oates, R. Obermaisser,
5 M. Probst, S. Ring, E. Steiner, C. Thalinger, D. Thuernbeck,
6 P. Tomsich, J. Wenninger
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
25 Contact: cacao@complang.tuwien.ac.at
27 Authors: Andreas Krall
30 $Id: codegen.h 1668 2004-12-03 16:39:40Z twisti $
40 #include "vm/jit/x86_64/types.h"
43 /* macros to create code ******************************************************/
45 /* immediate data union */
57 /* opcodes for alu instructions */
81 } X86_64_Shift_Opcode;
87 X86_64_CC_B = 2, X86_64_CC_C = 2, X86_64_CC_NAE = 2,
88 X86_64_CC_BE = 6, X86_64_CC_NA = 6,
89 X86_64_CC_AE = 3, X86_64_CC_NB = 3, X86_64_CC_NC = 3,
90 X86_64_CC_E = 4, X86_64_CC_Z = 4,
91 X86_64_CC_NE = 5, X86_64_CC_NZ = 5,
92 X86_64_CC_A = 7, X86_64_CC_NBE = 7,
93 X86_64_CC_S = 8, X86_64_CC_LZ = 8,
94 X86_64_CC_NS = 9, X86_64_CC_GEZ = 9,
95 X86_64_CC_P = 0x0a, X86_64_CC_PE = 0x0a,
96 X86_64_CC_NP = 0x0b, X86_64_CC_PO = 0x0b,
97 X86_64_CC_L = 0x0c, X86_64_CC_NGE = 0x0c,
98 X86_64_CC_GE = 0x0d, X86_64_CC_NL = 0x0d,
99 X86_64_CC_LE = 0x0e, X86_64_CC_NG = 0x0e,
100 X86_64_CC_G = 0x0f, X86_64_CC_NLE = 0x0f,
105 /* modrm and stuff */
107 #define x86_64_address_byte(mod,reg,rm) \
108 *(cd->mcodeptr++) = ((((mod) & 0x03) << 6) | (((reg) & 0x07) << 3) | ((rm) & 0x07));
111 #define x86_64_emit_reg(reg,rm) \
112 x86_64_address_byte(3,(reg),(rm));
115 #define x86_64_emit_rex(size,reg,index,rm) \
116 if ((size) == 1 || (reg) > 7 || (index) > 7 || (rm) > 7) { \
117 *(cd->mcodeptr++) = (0x40 | (((size) & 0x01) << 3) | ((((reg) >> 3) & 0x01) << 2) | ((((index) >> 3) & 0x01) << 1) | (((rm) >> 3) & 0x01)); \
121 #define x86_64_emit_mem(r,disp) \
123 x86_64_address_byte(0,(r),5); \
124 x86_64_emit_imm32((disp)); \
128 #define x86_64_emit_membase(basereg,disp,dreg) \
130 if ((basereg) == REG_SP || (basereg) == R12) { \
132 x86_64_address_byte(0,(dreg),REG_SP); \
133 x86_64_address_byte(0,REG_SP,REG_SP); \
134 } else if (x86_64_is_imm8((disp))) { \
135 x86_64_address_byte(1,(dreg),REG_SP); \
136 x86_64_address_byte(0,REG_SP,REG_SP); \
137 x86_64_emit_imm8((disp)); \
139 x86_64_address_byte(2,(dreg),REG_SP); \
140 x86_64_address_byte(0,REG_SP,REG_SP); \
141 x86_64_emit_imm32((disp)); \
145 if ((disp) == 0 && (basereg) != RBP && (basereg) != R13) { \
146 x86_64_address_byte(0,(dreg),(basereg)); \
150 if ((basereg) == RIP) { \
151 x86_64_address_byte(0,(dreg),RBP); \
152 x86_64_emit_imm32((disp)); \
156 if (x86_64_is_imm8((disp))) { \
157 x86_64_address_byte(1,(dreg),(basereg)); \
158 x86_64_emit_imm8((disp)); \
160 x86_64_address_byte(2,(dreg),(basereg)); \
161 x86_64_emit_imm32((disp)); \
166 #define x86_64_emit_memindex(reg,disp,basereg,indexreg,scale) \
168 if ((basereg) == -1) { \
169 x86_64_address_byte(0,(reg),4); \
170 x86_64_address_byte((scale),(indexreg),5); \
171 x86_64_emit_imm32((disp)); \
173 } else if ((disp) == 0 && (basereg) != RBP && (basereg) != R13) { \
174 x86_64_address_byte(0,(reg),4); \
175 x86_64_address_byte((scale),(indexreg),(basereg)); \
177 } else if (x86_64_is_imm8((disp))) { \
178 x86_64_address_byte(1,(reg),4); \
179 x86_64_address_byte((scale),(indexreg),(basereg)); \
180 x86_64_emit_imm8 ((disp)); \
183 x86_64_address_byte(2,(reg),4); \
184 x86_64_address_byte((scale),(indexreg),(basereg)); \
185 x86_64_emit_imm32((disp)); \
190 #define x86_64_is_imm8(imm) \
191 (((long)(imm) >= -128 && (long)(imm) <= 127))
194 #define x86_64_is_imm32(imm) \
195 ((long)(imm) >= (-2147483647-1) && (long)(imm) <= 2147483647)
198 #define x86_64_emit_imm8(imm) \
199 *(cd->mcodeptr++) = (u1) ((imm) & 0xff);
202 #define x86_64_emit_imm16(imm) \
204 x86_64_imm_buf imb; \
205 imb.i = (s4) (imm); \
206 *(cd->mcodeptr++) = imb.b[0]; \
207 *(cd->mcodeptr++) = imb.b[1]; \
211 #define x86_64_emit_imm32(imm) \
213 x86_64_imm_buf imb; \
214 imb.i = (s4) (imm); \
215 *(cd->mcodeptr++) = imb.b[0]; \
216 *(cd->mcodeptr++) = imb.b[1]; \
217 *(cd->mcodeptr++) = imb.b[2]; \
218 *(cd->mcodeptr++) = imb.b[3]; \
222 #define x86_64_emit_imm64(imm) \
224 x86_64_imm_buf imb; \
225 imb.l = (s8) (imm); \
226 *(cd->mcodeptr++) = imb.b[0]; \
227 *(cd->mcodeptr++) = imb.b[1]; \
228 *(cd->mcodeptr++) = imb.b[2]; \
229 *(cd->mcodeptr++) = imb.b[3]; \
230 *(cd->mcodeptr++) = imb.b[4]; \
231 *(cd->mcodeptr++) = imb.b[5]; \
232 *(cd->mcodeptr++) = imb.b[6]; \
233 *(cd->mcodeptr++) = imb.b[7]; \
237 /* additional functions and macros to generate code ***************************/
239 #define BlockPtrOfPC(pc) ((basicblock *) iptr->target)
243 #define COUNT_SPILLS count_spills++
249 #define CALCOFFSETBYTES(var, reg, val) \
250 if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
251 else if ((s4) (val) != 0) (var) += 1; \
252 else if ((reg) == RBP || (reg) == RSP || (reg) == R12 || (reg) == R13) (var) += 1;
255 #define CALCIMMEDIATEBYTES(var, val) \
256 if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
260 /* gen_nullptr_check(objreg) */
262 #define gen_nullptr_check(objreg) \
264 x86_64_test_reg_reg(cd, (objreg), (objreg)); \
265 x86_64_jcc(cd, X86_64_CC_E, 0); \
266 codegen_addxnullrefs(cd, cd->mcodeptr); \
270 #define gen_bound_check \
272 x86_64_alul_membase_reg(cd, X86_64_CMP, s1, OFFSET(java_arrayheader, size), s2); \
273 x86_64_jcc(cd, X86_64_CC_AE, 0); \
274 codegen_addxboundrefs(cd, cd->mcodeptr, s2); \
278 #define gen_div_check(v) \
280 if ((v)->flags & INMEMORY) { \
281 x86_64_alu_imm_membase(cd, X86_64_CMP, 0, REG_SP, src->regoff * 8); \
283 x86_64_test_reg_reg(cd, src->regoff, src->regoff); \
285 x86_64_jcc(cd, X86_64_CC_E, 0); \
286 codegen_addxdivrefs(cd, cd->mcodeptr); \
290 /* MCODECHECK(icnt) */
292 #define MCODECHECK(icnt) \
293 if ((cd->mcodeptr + (icnt)) > (u1 *) cd->mcodeend) \
294 cd->mcodeptr = (u1 *) codegen_increase(cd, cd->mcodeptr)
297 generates an integer-move from register a to b.
298 if a and b are the same int-register, no code will be generated.
301 #define M_INTMOVE(reg,dreg) \
302 if ((reg) != (dreg)) { \
303 x86_64_mov_reg_reg(cd, (reg),(dreg)); \
308 generates a floating-point-move from register a to b.
309 if a and b are the same float-register, no code will be generated
312 #define M_FLTMOVE(reg,dreg) \
313 if ((reg) != (dreg)) { \
314 x86_64_movq_reg_reg(cd, (reg),(dreg)); \
319 this function generates code to fetch data from a pseudo-register
320 into a real register.
321 If the pseudo-register has actually been assigned to a real
322 register, no code will be emitted, since following operations
323 can use this register directly.
325 v: pseudoregister to be fetched from
326 tempregnum: temporary register to be used if v is actually spilled to ram
328 return: the register number, where the operand can be found after
329 fetching (this wil be either tempregnum or the register
330 number allready given to v)
333 #define var_to_reg_int(regnr,v,tempnr) \
334 if ((v)->flags & INMEMORY) { \
336 if ((v)->type == TYPE_INT) { \
337 x86_64_movl_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
339 x86_64_mov_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
343 regnr = (v)->regoff; \
348 #define var_to_reg_flt(regnr,v,tempnr) \
349 if ((v)->flags & INMEMORY) { \
351 if ((v)->type == TYPE_FLT) { \
352 x86_64_movlps_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
354 x86_64_movlpd_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
356 /* x86_64_movq_membase_reg(REG_SP, (v)->regoff * 8, tempnr);*/ \
359 regnr = (v)->regoff; \
363 /* store_reg_to_var_xxx:
364 This function generates the code to store the result of an operation
365 back into a spilled pseudo-variable.
366 If the pseudo-variable has not been spilled in the first place, this
367 function will generate nothing.
369 v ............ Pseudovariable
370 tempregnum ... Number of the temporary registers as returned by
374 #define store_reg_to_var_int(sptr, tempregnum) \
375 if ((sptr)->flags & INMEMORY) { \
377 x86_64_mov_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
381 #define store_reg_to_var_flt(sptr, tempregnum) \
382 if ((sptr)->flags & INMEMORY) { \
384 x86_64_movq_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
388 #define M_COPY(from,to) \
389 d = reg_of_var(rd, to, REG_ITMP1); \
390 if ((from->regoff != to->regoff) || \
391 ((from->flags ^ to->flags) & INMEMORY)) { \
392 if (IS_FLT_DBL_TYPE(from->type)) { \
393 var_to_reg_flt(s1, from, d); \
395 store_reg_to_var_flt(to, d); \
397 var_to_reg_int(s1, from, d); \
399 store_reg_to_var_int(to, d); \
404 /* #define ALIGNCODENOP {if((int)((long)mcodeptr&7)){M_NOP;}} */
408 /* function gen_resolvebranch **************************************************
410 backpatches a branch instruction
412 parameters: ip ... pointer to instruction after branch (void*)
413 so ... offset of instruction after branch (s8)
414 to ... offset of branch target (s8)
416 *******************************************************************************/
418 #define gen_resolvebranch(ip,so,to) \
419 *((s4*) ((ip) - 4)) = (s4) ((to) - (so));
422 /* function prototypes */
424 void thread_restartcriticalsection(ucontext_t *uc);
426 #endif /* _CODEGEN_H */
430 * These are local overrides for various environment variables in Emacs.
431 * Please do not remove this and leave it at the end of the file, where
432 * Emacs will automagically detect them.
433 * ---------------------------------------------------------------------
436 * indent-tabs-mode: t