1 /* src/vm/jit/x86_64/codegen.h - code generation macros for x86_64
3 Copyright (C) 1996-2005 R. Grafl, A. Krall, C. Kruegel, C. Oates,
4 R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner,
5 C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich, J. Wenninger,
6 Institut f. Computersprachen - TU Wien
8 This file is part of CACAO.
10 This program is free software; you can redistribute it and/or
11 modify it under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2, or (at
13 your option) any later version.
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
25 Contact: cacao@complang.tuwien.ac.at
27 Authors: Andreas Krall
30 $Id: codegen.h 2223 2005-04-05 18:01:56Z christian $
40 #include "vm/jit/x86_64/types.h"
42 /* Macro for stack.c to set Argument Stackslots */
44 #define SET_ARG_STACKSLOTS { \
47 s4 stacksize; /* Stackoffset for spilled arg */ \
50 (IS_FLT_DBL_TYPE(copy->type)) ? farg++ : iarg++; \
53 stacksize = (farg < rd->fltreg_argnum)? 0 : (farg - rd->fltreg_argnum); \
54 stacksize += (iarg < rd->intreg_argnum)? 0 : (iarg - rd->intreg_argnum); \
55 if (rd->ifmemuse < stacksize) \
56 rd->ifmemuse = stacksize; \
60 if (IS_FLT_DBL_TYPE(copy->type)) { \
62 if (!(copy->flags & SAVEDVAR)) { \
64 copy->varkind = ARGVAR; \
65 if (farg < rd->fltreg_argnum) { \
67 copy->regoff = rd->argfltregs[farg]; \
69 copy->flags = INMEMORY; \
70 copy->regoff = --stacksize; \
73 } else { /* int_arg */ \
75 if (!(copy->flags & SAVEDVAR)) { \
77 copy->varkind = ARGVAR; \
78 if (iarg < rd->intreg_argnum) { \
80 copy->regoff = rd->argintregs[iarg]; \
82 copy->flags = INMEMORY; \
83 copy->regoff = --stacksize; \
92 /* macros to create code ******************************************************/
94 /* immediate data union */
106 /* opcodes for alu instructions */
130 } X86_64_Shift_Opcode;
136 X86_64_CC_B = 2, X86_64_CC_C = 2, X86_64_CC_NAE = 2,
137 X86_64_CC_BE = 6, X86_64_CC_NA = 6,
138 X86_64_CC_AE = 3, X86_64_CC_NB = 3, X86_64_CC_NC = 3,
139 X86_64_CC_E = 4, X86_64_CC_Z = 4,
140 X86_64_CC_NE = 5, X86_64_CC_NZ = 5,
141 X86_64_CC_A = 7, X86_64_CC_NBE = 7,
142 X86_64_CC_S = 8, X86_64_CC_LZ = 8,
143 X86_64_CC_NS = 9, X86_64_CC_GEZ = 9,
144 X86_64_CC_P = 0x0a, X86_64_CC_PE = 0x0a,
145 X86_64_CC_NP = 0x0b, X86_64_CC_PO = 0x0b,
146 X86_64_CC_L = 0x0c, X86_64_CC_NGE = 0x0c,
147 X86_64_CC_GE = 0x0d, X86_64_CC_NL = 0x0d,
148 X86_64_CC_LE = 0x0e, X86_64_CC_NG = 0x0e,
149 X86_64_CC_G = 0x0f, X86_64_CC_NLE = 0x0f,
154 #define IS_IMM8(imm) \
155 (((long) (imm) >= -128) && ((long) (imm) <= 127))
158 #define IS_IMM32(imm) \
159 (((long) (imm) >= (-2147483647-1)) && ((long) (imm) <= 2147483647))
162 /* modrm and stuff */
164 #define x86_64_address_byte(mod,reg,rm) \
165 *(cd->mcodeptr++) = ((((mod) & 0x03) << 6) | (((reg) & 0x07) << 3) | ((rm) & 0x07));
168 #define x86_64_emit_reg(reg,rm) \
169 x86_64_address_byte(3,(reg),(rm));
172 #define x86_64_emit_rex(size,reg,index,rm) \
173 if ((size) == 1 || (reg) > 7 || (index) > 7 || (rm) > 7) { \
174 *(cd->mcodeptr++) = (0x40 | (((size) & 0x01) << 3) | ((((reg) >> 3) & 0x01) << 2) | ((((index) >> 3) & 0x01) << 1) | (((rm) >> 3) & 0x01)); \
178 #define x86_64_emit_byte_rex(reg,index,rm) \
179 *(cd->mcodeptr++) = (0x40 | ((((reg) >> 3) & 0x01) << 2) | ((((index) >> 3) & 0x01) << 1) | (((rm) >> 3) & 0x01));
182 #define x86_64_emit_mem(r,disp) \
184 x86_64_address_byte(0,(r),5); \
185 x86_64_emit_imm32((disp)); \
189 #define x86_64_emit_membase(basereg,disp,dreg) \
191 if ((basereg) == REG_SP || (basereg) == R12) { \
193 x86_64_address_byte(0,(dreg),REG_SP); \
194 x86_64_address_byte(0,REG_SP,REG_SP); \
195 } else if (IS_IMM8((disp))) { \
196 x86_64_address_byte(1,(dreg),REG_SP); \
197 x86_64_address_byte(0,REG_SP,REG_SP); \
198 x86_64_emit_imm8((disp)); \
200 x86_64_address_byte(2,(dreg),REG_SP); \
201 x86_64_address_byte(0,REG_SP,REG_SP); \
202 x86_64_emit_imm32((disp)); \
206 if ((disp) == 0 && (basereg) != RBP && (basereg) != R13) { \
207 x86_64_address_byte(0,(dreg),(basereg)); \
211 if ((basereg) == RIP) { \
212 x86_64_address_byte(0,(dreg),RBP); \
213 x86_64_emit_imm32((disp)); \
217 if (IS_IMM8((disp))) { \
218 x86_64_address_byte(1,(dreg),(basereg)); \
219 x86_64_emit_imm8((disp)); \
221 x86_64_address_byte(2,(dreg),(basereg)); \
222 x86_64_emit_imm32((disp)); \
227 #define x86_64_emit_memindex(reg,disp,basereg,indexreg,scale) \
229 if ((basereg) == -1) { \
230 x86_64_address_byte(0,(reg),4); \
231 x86_64_address_byte((scale),(indexreg),5); \
232 x86_64_emit_imm32((disp)); \
234 } else if ((disp) == 0 && (basereg) != RBP && (basereg) != R13) { \
235 x86_64_address_byte(0,(reg),4); \
236 x86_64_address_byte((scale),(indexreg),(basereg)); \
238 } else if (IS_IMM8((disp))) { \
239 x86_64_address_byte(1,(reg),4); \
240 x86_64_address_byte((scale),(indexreg),(basereg)); \
241 x86_64_emit_imm8 ((disp)); \
244 x86_64_address_byte(2,(reg),4); \
245 x86_64_address_byte((scale),(indexreg),(basereg)); \
246 x86_64_emit_imm32((disp)); \
251 #define x86_64_emit_imm8(imm) \
252 *(cd->mcodeptr++) = (u1) ((imm) & 0xff);
255 #define x86_64_emit_imm16(imm) \
257 x86_64_imm_buf imb; \
258 imb.i = (s4) (imm); \
259 *(cd->mcodeptr++) = imb.b[0]; \
260 *(cd->mcodeptr++) = imb.b[1]; \
264 #define x86_64_emit_imm32(imm) \
266 x86_64_imm_buf imb; \
267 imb.i = (s4) (imm); \
268 *(cd->mcodeptr++) = imb.b[0]; \
269 *(cd->mcodeptr++) = imb.b[1]; \
270 *(cd->mcodeptr++) = imb.b[2]; \
271 *(cd->mcodeptr++) = imb.b[3]; \
275 #define x86_64_emit_imm64(imm) \
277 x86_64_imm_buf imb; \
278 imb.l = (s8) (imm); \
279 *(cd->mcodeptr++) = imb.b[0]; \
280 *(cd->mcodeptr++) = imb.b[1]; \
281 *(cd->mcodeptr++) = imb.b[2]; \
282 *(cd->mcodeptr++) = imb.b[3]; \
283 *(cd->mcodeptr++) = imb.b[4]; \
284 *(cd->mcodeptr++) = imb.b[5]; \
285 *(cd->mcodeptr++) = imb.b[6]; \
286 *(cd->mcodeptr++) = imb.b[7]; \
290 /* additional functions and macros to generate code ***************************/
292 #define BlockPtrOfPC(pc) ((basicblock *) iptr->target)
296 #define COUNT_SPILLS count_spills++
302 #define CALCOFFSETBYTES(var, reg, val) \
303 if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
304 else if ((s4) (val) != 0) (var) += 1; \
305 else if ((reg) == RBP || (reg) == RSP || (reg) == R12 || (reg) == R13) (var) += 1;
308 #define CALCIMMEDIATEBYTES(var, val) \
309 if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
313 /* gen_nullptr_check(objreg) */
315 #define gen_nullptr_check(objreg) \
317 x86_64_test_reg_reg(cd, (objreg), (objreg)); \
318 x86_64_jcc(cd, X86_64_CC_E, 0); \
319 codegen_addxnullrefs(cd, cd->mcodeptr); \
323 #define gen_bound_check \
325 x86_64_alul_membase_reg(cd, X86_64_CMP, s1, OFFSET(java_arrayheader, size), s2); \
326 x86_64_jcc(cd, X86_64_CC_AE, 0); \
327 codegen_addxboundrefs(cd, cd->mcodeptr, s2); \
331 #define gen_div_check(v) \
333 if ((v)->flags & INMEMORY) { \
334 x86_64_alu_imm_membase(cd, X86_64_CMP, 0, REG_SP, src->regoff * 8); \
336 x86_64_test_reg_reg(cd, src->regoff, src->regoff); \
338 x86_64_jcc(cd, X86_64_CC_E, 0); \
339 codegen_addxdivrefs(cd, cd->mcodeptr); \
343 /* MCODECHECK(icnt) */
345 #define MCODECHECK(icnt) \
346 if ((cd->mcodeptr + (icnt)) > (u1 *) cd->mcodeend) \
347 cd->mcodeptr = (u1 *) codegen_increase(cd, cd->mcodeptr)
350 generates an integer-move from register a to b.
351 if a and b are the same int-register, no code will be generated.
354 #define M_INTMOVE(reg,dreg) \
355 if ((reg) != (dreg)) { \
356 x86_64_mov_reg_reg(cd, (reg),(dreg)); \
361 generates a floating-point-move from register a to b.
362 if a and b are the same float-register, no code will be generated
365 #define M_FLTMOVE(reg,dreg) \
366 if ((reg) != (dreg)) { \
367 x86_64_movq_reg_reg(cd, (reg),(dreg)); \
372 this function generates code to fetch data from a pseudo-register
373 into a real register.
374 If the pseudo-register has actually been assigned to a real
375 register, no code will be emitted, since following operations
376 can use this register directly.
378 v: pseudoregister to be fetched from
379 tempregnum: temporary register to be used if v is actually spilled to ram
381 return: the register number, where the operand can be found after
382 fetching (this wil be either tempregnum or the register
383 number allready given to v)
386 #define var_to_reg_int(regnr,v,tempnr) \
387 if ((v)->flags & INMEMORY) { \
389 if ((v)->type == TYPE_INT) { \
390 x86_64_movl_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
392 x86_64_mov_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
396 regnr = (v)->regoff; \
401 #define var_to_reg_flt(regnr,v,tempnr) \
402 if ((v)->flags & INMEMORY) { \
404 if ((v)->type == TYPE_FLT) { \
405 x86_64_movlps_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
407 x86_64_movlpd_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
409 /* x86_64_movq_membase_reg(REG_SP, (v)->regoff * 8, tempnr);*/ \
412 regnr = (v)->regoff; \
416 /* store_reg_to_var_xxx:
417 This function generates the code to store the result of an operation
418 back into a spilled pseudo-variable.
419 If the pseudo-variable has not been spilled in the first place, this
420 function will generate nothing.
422 v ............ Pseudovariable
423 tempregnum ... Number of the temporary registers as returned by
427 #define store_reg_to_var_int(sptr, tempregnum) \
428 if ((sptr)->flags & INMEMORY) { \
430 x86_64_mov_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
434 #define store_reg_to_var_flt(sptr, tempregnum) \
435 if ((sptr)->flags & INMEMORY) { \
437 x86_64_movq_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
441 #define M_COPY(from,to) \
442 d = reg_of_var(rd, to, REG_ITMP1); \
443 if ((from->regoff != to->regoff) || \
444 ((from->flags ^ to->flags) & INMEMORY)) { \
445 if (IS_FLT_DBL_TYPE(from->type)) { \
446 var_to_reg_flt(s1, from, d); \
448 store_reg_to_var_flt(to, d); \
450 var_to_reg_int(s1, from, d); \
452 store_reg_to_var_int(to, d); \
457 /* function gen_resolvebranch **************************************************
459 backpatches a branch instruction
461 parameters: ip ... pointer to instruction after branch (void*)
462 so ... offset of instruction after branch (s8)
463 to ... offset of branch target (s8)
465 *******************************************************************************/
467 #define gen_resolvebranch(ip,so,to) \
468 *((s4*) ((ip) - 4)) = (s4) ((to) - (so));
471 /* function prototypes */
473 void thread_restartcriticalsection(ucontext_t *uc);
475 #endif /* _CODEGEN_H */
479 * These are local overrides for various environment variables in Emacs.
480 * Please do not remove this and leave it at the end of the file, where
481 * Emacs will automagically detect them.
482 * ---------------------------------------------------------------------
485 * indent-tabs-mode: t