1 /* jit/x86_64/codegen.h - code generation macros and definitions for x86_64
3 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
4 R. Grafl, A. Krall, C. Kruegel, C. Oates, R. Obermaisser,
5 M. Probst, S. Ring, E. Steiner, C. Thalinger, D. Thuernbeck,
6 P. Tomsich, J. Wenninger
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
25 Contact: cacao@complang.tuwien.ac.at
27 Authors: Andreas Krall
30 $Id: codegen.h 1525 2004-11-17 15:49:58Z twisti $
38 #include <sys/ucontext.h>
42 /* macros to create code ******************************************************/
44 /* immediate data union */
56 /* opcodes for alu instructions */
80 } X86_64_Shift_Opcode;
86 X86_64_CC_B = 2, X86_64_CC_C = 2, X86_64_CC_NAE = 2,
87 X86_64_CC_BE = 6, X86_64_CC_NA = 6,
88 X86_64_CC_AE = 3, X86_64_CC_NB = 3, X86_64_CC_NC = 3,
89 X86_64_CC_E = 4, X86_64_CC_Z = 4,
90 X86_64_CC_NE = 5, X86_64_CC_NZ = 5,
91 X86_64_CC_A = 7, X86_64_CC_NBE = 7,
92 X86_64_CC_S = 8, X86_64_CC_LZ = 8,
93 X86_64_CC_NS = 9, X86_64_CC_GEZ = 9,
94 X86_64_CC_P = 0x0a, X86_64_CC_PE = 0x0a,
95 X86_64_CC_NP = 0x0b, X86_64_CC_PO = 0x0b,
96 X86_64_CC_L = 0x0c, X86_64_CC_NGE = 0x0c,
97 X86_64_CC_GE = 0x0d, X86_64_CC_NL = 0x0d,
98 X86_64_CC_LE = 0x0e, X86_64_CC_NG = 0x0e,
99 X86_64_CC_G = 0x0f, X86_64_CC_NLE = 0x0f,
104 /* modrm and stuff */
106 #define x86_64_address_byte(mod,reg,rm) \
107 *(cd->mcodeptr++) = ((((mod) & 0x03) << 6) | (((reg) & 0x07) << 3) | ((rm) & 0x07));
110 #define x86_64_emit_reg(reg,rm) \
111 x86_64_address_byte(3,(reg),(rm));
114 #define x86_64_emit_rex(size,reg,index,rm) \
115 if ((size) == 1 || (reg) > 7 || (index) > 7 || (rm) > 7) { \
116 *(cd->mcodeptr++) = (0x40 | (((size) & 0x01) << 3) | ((((reg) >> 3) & 0x01) << 2) | ((((index) >> 3) & 0x01) << 1) | (((rm) >> 3) & 0x01)); \
120 #define x86_64_emit_mem(r,disp) \
122 x86_64_address_byte(0,(r),5); \
123 x86_64_emit_imm32((disp)); \
127 #define x86_64_emit_membase(basereg,disp,dreg) \
129 if ((basereg) == REG_SP || (basereg) == R12) { \
131 x86_64_address_byte(0,(dreg),REG_SP); \
132 x86_64_address_byte(0,REG_SP,REG_SP); \
133 } else if (x86_64_is_imm8((disp))) { \
134 x86_64_address_byte(1,(dreg),REG_SP); \
135 x86_64_address_byte(0,REG_SP,REG_SP); \
136 x86_64_emit_imm8((disp)); \
138 x86_64_address_byte(2,(dreg),REG_SP); \
139 x86_64_address_byte(0,REG_SP,REG_SP); \
140 x86_64_emit_imm32((disp)); \
144 if ((disp) == 0 && (basereg) != RBP && (basereg) != R13) { \
145 x86_64_address_byte(0,(dreg),(basereg)); \
149 if ((basereg) == RIP) { \
150 x86_64_address_byte(0,(dreg),RBP); \
151 x86_64_emit_imm32((disp)); \
155 if (x86_64_is_imm8((disp))) { \
156 x86_64_address_byte(1,(dreg),(basereg)); \
157 x86_64_emit_imm8((disp)); \
159 x86_64_address_byte(2,(dreg),(basereg)); \
160 x86_64_emit_imm32((disp)); \
165 #define x86_64_emit_memindex(reg,disp,basereg,indexreg,scale) \
167 if ((basereg) == -1) { \
168 x86_64_address_byte(0,(reg),4); \
169 x86_64_address_byte((scale),(indexreg),5); \
170 x86_64_emit_imm32((disp)); \
172 } else if ((disp) == 0 && (basereg) != RBP && (basereg) != R13) { \
173 x86_64_address_byte(0,(reg),4); \
174 x86_64_address_byte((scale),(indexreg),(basereg)); \
176 } else if (x86_64_is_imm8((disp))) { \
177 x86_64_address_byte(1,(reg),4); \
178 x86_64_address_byte((scale),(indexreg),(basereg)); \
179 x86_64_emit_imm8 ((disp)); \
182 x86_64_address_byte(2,(reg),4); \
183 x86_64_address_byte((scale),(indexreg),(basereg)); \
184 x86_64_emit_imm32((disp)); \
189 #define x86_64_is_imm8(imm) \
190 (((long)(imm) >= -128 && (long)(imm) <= 127))
193 #define x86_64_is_imm32(imm) \
194 ((long)(imm) >= (-2147483647-1) && (long)(imm) <= 2147483647)
197 #define x86_64_emit_imm8(imm) \
198 *(cd->mcodeptr++) = (u1) ((imm) & 0xff);
201 #define x86_64_emit_imm16(imm) \
203 x86_64_imm_buf imb; \
204 imb.i = (s4) (imm); \
205 *(cd->mcodeptr++) = imb.b[0]; \
206 *(cd->mcodeptr++) = imb.b[1]; \
210 #define x86_64_emit_imm32(imm) \
212 x86_64_imm_buf imb; \
213 imb.i = (s4) (imm); \
214 *(cd->mcodeptr++) = imb.b[0]; \
215 *(cd->mcodeptr++) = imb.b[1]; \
216 *(cd->mcodeptr++) = imb.b[2]; \
217 *(cd->mcodeptr++) = imb.b[3]; \
221 #define x86_64_emit_imm64(imm) \
223 x86_64_imm_buf imb; \
224 imb.l = (s8) (imm); \
225 *(cd->mcodeptr++) = imb.b[0]; \
226 *(cd->mcodeptr++) = imb.b[1]; \
227 *(cd->mcodeptr++) = imb.b[2]; \
228 *(cd->mcodeptr++) = imb.b[3]; \
229 *(cd->mcodeptr++) = imb.b[4]; \
230 *(cd->mcodeptr++) = imb.b[5]; \
231 *(cd->mcodeptr++) = imb.b[6]; \
232 *(cd->mcodeptr++) = imb.b[7]; \
236 /* additional functions and macros to generate code ***************************/
238 #define BlockPtrOfPC(pc) ((basicblock *) iptr->target)
242 #define COUNT_SPILLS count_spills++
248 #define CALCOFFSETBYTES(var, reg, val) \
249 if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
250 else if ((s4) (val) != 0) (var) += 1; \
251 else if ((reg) == RBP || (reg) == RSP || (reg) == R12 || (reg) == R13) (var) += 1;
254 #define CALCIMMEDIATEBYTES(var, val) \
255 if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
259 /* gen_nullptr_check(objreg) */
261 #define gen_nullptr_check(objreg) \
263 x86_64_test_reg_reg(cd, (objreg), (objreg)); \
264 x86_64_jcc(cd, X86_64_CC_E, 0); \
265 codegen_addxnullrefs(cd, cd->mcodeptr); \
269 #define gen_bound_check \
271 x86_64_alul_membase_reg(cd, X86_64_CMP, s1, OFFSET(java_arrayheader, size), s2); \
272 x86_64_jcc(cd, X86_64_CC_AE, 0); \
273 codegen_addxboundrefs(cd, cd->mcodeptr, s2); \
277 #define gen_div_check(v) \
279 if ((v)->flags & INMEMORY) { \
280 x86_64_alu_imm_membase(cd, X86_64_CMP, 0, REG_SP, src->regoff * 8); \
282 x86_64_test_reg_reg(cd, src->regoff, src->regoff); \
284 x86_64_jcc(cd, X86_64_CC_E, 0); \
285 codegen_addxdivrefs(cd, cd->mcodeptr); \
289 /* MCODECHECK(icnt) */
291 #define MCODECHECK(icnt) \
292 if ((cd->mcodeptr + (icnt)) > (u1 *) cd->mcodeend) \
293 cd->mcodeptr = (u1 *) codegen_increase(cd, cd->mcodeptr)
296 generates an integer-move from register a to b.
297 if a and b are the same int-register, no code will be generated.
300 #define M_INTMOVE(reg,dreg) \
301 if ((reg) != (dreg)) { \
302 x86_64_mov_reg_reg(cd, (reg),(dreg)); \
307 generates a floating-point-move from register a to b.
308 if a and b are the same float-register, no code will be generated
311 #define M_FLTMOVE(reg,dreg) \
312 if ((reg) != (dreg)) { \
313 x86_64_movq_reg_reg(cd, (reg),(dreg)); \
318 this function generates code to fetch data from a pseudo-register
319 into a real register.
320 If the pseudo-register has actually been assigned to a real
321 register, no code will be emitted, since following operations
322 can use this register directly.
324 v: pseudoregister to be fetched from
325 tempregnum: temporary register to be used if v is actually spilled to ram
327 return: the register number, where the operand can be found after
328 fetching (this wil be either tempregnum or the register
329 number allready given to v)
332 #define var_to_reg_int(regnr,v,tempnr) \
333 if ((v)->flags & INMEMORY) { \
335 if ((v)->type == TYPE_INT) { \
336 x86_64_movl_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
338 x86_64_mov_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
342 regnr = (v)->regoff; \
347 #define var_to_reg_flt(regnr,v,tempnr) \
348 if ((v)->flags & INMEMORY) { \
350 if ((v)->type == TYPE_FLT) { \
351 x86_64_movlps_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
353 x86_64_movlpd_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
355 /* x86_64_movq_membase_reg(REG_SP, (v)->regoff * 8, tempnr);*/ \
358 regnr = (v)->regoff; \
362 /* store_reg_to_var_xxx:
363 This function generates the code to store the result of an operation
364 back into a spilled pseudo-variable.
365 If the pseudo-variable has not been spilled in the first place, this
366 function will generate nothing.
368 v ............ Pseudovariable
369 tempregnum ... Number of the temporary registers as returned by
373 #define store_reg_to_var_int(sptr, tempregnum) \
374 if ((sptr)->flags & INMEMORY) { \
376 x86_64_mov_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
380 #define store_reg_to_var_flt(sptr, tempregnum) \
381 if ((sptr)->flags & INMEMORY) { \
383 x86_64_movq_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
387 #define M_COPY(from,to) \
388 d = reg_of_var(rd, to, REG_ITMP1); \
389 if ((from->regoff != to->regoff) || \
390 ((from->flags ^ to->flags) & INMEMORY)) { \
391 if (IS_FLT_DBL_TYPE(from->type)) { \
392 var_to_reg_flt(s1, from, d); \
394 store_reg_to_var_flt(to, d); \
396 var_to_reg_int(s1, from, d); \
398 store_reg_to_var_int(to, d); \
403 /* #define ALIGNCODENOP {if((int)((long)mcodeptr&7)){M_NOP;}} */
404 #define ALIGNCODENOP do {} while (0)
407 /* function gen_resolvebranch **************************************************
409 backpatches a branch instruction
411 parameters: ip ... pointer to instruction after branch (void*)
412 so ... offset of instruction after branch (s8)
413 to ... offset of branch target (s8)
415 *******************************************************************************/
417 #define gen_resolvebranch(ip,so,to) \
418 *((s4*) ((ip) - 4)) = (s4) ((to) - (so));
421 /* function prototypes */
423 void thread_restartcriticalsection(ucontext_t *uc);
425 #endif /* _CODEGEN_H */
429 * These are local overrides for various environment variables in Emacs.
430 * Please do not remove this and leave it at the end of the file, where
431 * Emacs will automagically detect them.
432 * ---------------------------------------------------------------------
435 * indent-tabs-mode: t