-/* jit/x86_64/codegen.h - code generation macros and definitions for x86_64
+/* src/vm/jit/x86_64/codegen.h - code generation macros for x86_64
- Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
- R. Grafl, A. Krall, C. Kruegel, C. Oates, R. Obermaisser,
- M. Probst, S. Ring, E. Steiner, C. Thalinger, D. Thuernbeck,
- P. Tomsich, J. Wenninger
+ Copyright (C) 1996-2005, 2006 R. Grafl, A. Krall, C. Kruegel,
+ C. Oates, R. Obermaisser, M. Platter, M. Probst, S. Ring,
+ E. Steiner, C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich,
+ J. Wenninger, Institut f. Computersprachen - TU Wien
This file is part of CACAO.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- 02111-1307, USA.
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
- Contact: cacao@complang.tuwien.ac.at
+ Contact: cacao@cacaojvm.org
Authors: Andreas Krall
Christian Thalinger
- $Id: codegen.h 1579 2004-11-24 13:56:06Z twisti $
+ Changes:
+
+ $Id: codegen.h 5507 2006-09-15 09:19:11Z christian $
*/
#ifndef _CODEGEN_H
#define _CODEGEN_H
+#include "config.h"
+
#include <ucontext.h>
-#include "jit/jit.h"
+#include "vm/types.h"
+#include "vm/jit/jit.h"
+
+
+/* some defines ***************************************************************/
+
+#define PATCHER_CALL_SIZE 5 /* size in bytes of a patcher call */
+
+
+/* additional functions and macros to generate code ***************************/
+
+#define CALCOFFSETBYTES(var, reg, val) \
+ if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
+ else if ((s4) (val) != 0) (var) += 1; \
+ else if ((reg) == RBP || (reg) == RSP || (reg) == R12 || (reg) == R13) (var) += 1;
+
+
+#define CALCIMMEDIATEBYTES(var, val) \
+ if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
+ else (var) += 1;
-/* macros to create code ******************************************************/
-/* immediate data union */
-
-typedef union {
- s4 i;
- s8 l;
- float f;
- double d;
- void *a;
- u1 b[8];
-} x86_64_imm_buf;
-
-
-/* opcodes for alu instructions */
-
-typedef enum {
- X86_64_ADD = 0,
- X86_64_OR = 1,
- X86_64_ADC = 2,
- X86_64_SBB = 3,
- X86_64_AND = 4,
- X86_64_SUB = 5,
- X86_64_XOR = 6,
- X86_64_CMP = 7,
- X86_64_NALU
-} X86_64_ALU_Opcode;
-
-
-typedef enum {
- X86_64_ROL = 0,
- X86_64_ROR = 1,
- X86_64_RCL = 2,
- X86_64_RCR = 3,
- X86_64_SHL = 4,
- X86_64_SHR = 5,
- X86_64_SAR = 7,
- X86_64_NSHIFT = 8
-} X86_64_Shift_Opcode;
-
-
-typedef enum {
- X86_64_CC_O = 0,
- X86_64_CC_NO = 1,
- X86_64_CC_B = 2, X86_64_CC_C = 2, X86_64_CC_NAE = 2,
- X86_64_CC_BE = 6, X86_64_CC_NA = 6,
- X86_64_CC_AE = 3, X86_64_CC_NB = 3, X86_64_CC_NC = 3,
- X86_64_CC_E = 4, X86_64_CC_Z = 4,
- X86_64_CC_NE = 5, X86_64_CC_NZ = 5,
- X86_64_CC_A = 7, X86_64_CC_NBE = 7,
- X86_64_CC_S = 8, X86_64_CC_LZ = 8,
- X86_64_CC_NS = 9, X86_64_CC_GEZ = 9,
- X86_64_CC_P = 0x0a, X86_64_CC_PE = 0x0a,
- X86_64_CC_NP = 0x0b, X86_64_CC_PO = 0x0b,
- X86_64_CC_L = 0x0c, X86_64_CC_NGE = 0x0c,
- X86_64_CC_GE = 0x0d, X86_64_CC_NL = 0x0d,
- X86_64_CC_LE = 0x0e, X86_64_CC_NG = 0x0e,
- X86_64_CC_G = 0x0f, X86_64_CC_NLE = 0x0f,
- X86_64_NCC
-} X86_64_CC;
-
-
-/* modrm and stuff */
-
-#define x86_64_address_byte(mod,reg,rm) \
- *(cd->mcodeptr++) = ((((mod) & 0x03) << 6) | (((reg) & 0x07) << 3) | ((rm) & 0x07));
-
-
-#define x86_64_emit_reg(reg,rm) \
- x86_64_address_byte(3,(reg),(rm));
-
-
-#define x86_64_emit_rex(size,reg,index,rm) \
- if ((size) == 1 || (reg) > 7 || (index) > 7 || (rm) > 7) { \
- *(cd->mcodeptr++) = (0x40 | (((size) & 0x01) << 3) | ((((reg) >> 3) & 0x01) << 2) | ((((index) >> 3) & 0x01) << 1) | (((rm) >> 3) & 0x01)); \
+/* gen_nullptr_check(objreg) */
+
+#define gen_nullptr_check(objreg) \
+ if (checknull) { \
+ M_TEST(objreg); \
+ M_BEQ(0); \
+ codegen_add_nullpointerexception_ref(cd); \
+ }
+
+
+#define gen_bound_check \
+ if (checkbounds) { \
+ M_ILD(REG_ITMP3, s1, OFFSET(java_arrayheader, size));\
+ M_ICMP(REG_ITMP3, s2); \
+ M_BAE(0); \
+ codegen_add_arrayindexoutofboundsexception_ref(cd, s2); \
}
-#define x86_64_emit_mem(r,disp) \
+/* MCODECHECK(icnt) */
+
+#define MCODECHECK(icnt) \
do { \
- x86_64_address_byte(0,(r),5); \
- x86_64_emit_imm32((disp)); \
+ if ((cd->mcodeptr + (icnt)) > cd->mcodeend) \
+ codegen_increase(cd); \
} while (0)
-#define x86_64_emit_membase(basereg,disp,dreg) \
+#define ALIGNCODENOP \
+ if ((s4) (((ptrint) cd->mcodeptr) & 7)) { \
+ M_NOP; \
+ }
+
+
+/* M_INTMOVE:
+ generates an integer-move from register a to b.
+ if a and b are the same int-register, no code will be generated.
+*/
+
+#define M_INTMOVE(reg,dreg) \
do { \
- if ((basereg) == REG_SP || (basereg) == R12) { \
- if ((disp) == 0) { \
- x86_64_address_byte(0,(dreg),REG_SP); \
- x86_64_address_byte(0,REG_SP,REG_SP); \
- } else if (x86_64_is_imm8((disp))) { \
- x86_64_address_byte(1,(dreg),REG_SP); \
- x86_64_address_byte(0,REG_SP,REG_SP); \
- x86_64_emit_imm8((disp)); \
- } else { \
- x86_64_address_byte(2,(dreg),REG_SP); \
- x86_64_address_byte(0,REG_SP,REG_SP); \
- x86_64_emit_imm32((disp)); \
- } \
- break; \
- } \
- if ((disp) == 0 && (basereg) != RBP && (basereg) != R13) { \
- x86_64_address_byte(0,(dreg),(basereg)); \
- break; \
- } \
- \
- if ((basereg) == RIP) { \
- x86_64_address_byte(0,(dreg),RBP); \
- x86_64_emit_imm32((disp)); \
- break; \
+ if ((reg) != (dreg)) { \
+ M_MOV(reg, dreg); \
} \
- \
- if (x86_64_is_imm8((disp))) { \
- x86_64_address_byte(1,(dreg),(basereg)); \
- x86_64_emit_imm8((disp)); \
- } else { \
- x86_64_address_byte(2,(dreg),(basereg)); \
- x86_64_emit_imm32((disp)); \
+ } while (0)
+
+
+/* M_FLTMOVE:
+ generates a floating-point-move from register a to b.
+ if a and b are the same float-register, no code will be generated
+*/
+
+#define M_FLTMOVE(reg,dreg) \
+ do { \
+ if ((reg) != (dreg)) { \
+ M_FMOV(reg, dreg); \
} \
} while (0)
-#define x86_64_emit_memindex(reg,disp,basereg,indexreg,scale) \
+#define M_COPY(s,d) emit_copy(jd, iptr, &(jd->var[(s)]), &(jd->var[(d)]))
+
+#define ICONST(r,c) \
do { \
- if ((basereg) == -1) { \
- x86_64_address_byte(0,(reg),4); \
- x86_64_address_byte((scale),(indexreg),5); \
- x86_64_emit_imm32((disp)); \
- \
- } else if ((disp) == 0 && (basereg) != RBP && (basereg) != R13) { \
- x86_64_address_byte(0,(reg),4); \
- x86_64_address_byte((scale),(indexreg),(basereg)); \
- \
- } else if (x86_64_is_imm8((disp))) { \
- x86_64_address_byte(1,(reg),4); \
- x86_64_address_byte((scale),(indexreg),(basereg)); \
- x86_64_emit_imm8 ((disp)); \
- \
- } else { \
- x86_64_address_byte(2,(reg),4); \
- x86_64_address_byte((scale),(indexreg),(basereg)); \
- x86_64_emit_imm32((disp)); \
- } \
- } while (0)
+ if ((c) == 0) \
+ M_CLR((d)); \
+ else \
+ M_IMOV_IMM((c), (d)); \
+ } while (0)
+/* do { \ */
+/* M_IMOV_IMM((c), (d)); \ */
+/* } while (0) */
-#define x86_64_is_imm8(imm) \
- (((long)(imm) >= -128 && (long)(imm) <= 127))
+#define LCONST(r,c) \
+ do { \
+ if ((c) == 0) \
+ M_CLR((d)); \
+ else \
+ M_MOV_IMM((c), (d)); \
+ } while (0)
-#define x86_64_is_imm32(imm) \
- ((long)(imm) >= (-2147483647-1) && (long)(imm) <= 2147483647)
+/* macros to create code ******************************************************/
+#define M_MOV(a,b) emit_mov_reg_reg(cd, (a), (b))
+#define M_MOV_IMM(a,b) emit_mov_imm_reg(cd, (u8) (a), (b))
-#define x86_64_emit_imm8(imm) \
- *(cd->mcodeptr++) = (u1) ((imm) & 0xff);
+#define M_IMOV(a,b) emit_movl_reg_reg(cd, (a), (b))
+#define M_IMOV_IMM(a,b) emit_movl_imm_reg(cd, (u4) (a), (b))
+#define M_FMOV(a,b) emit_movq_reg_reg(cd, (a), (b))
-#define x86_64_emit_imm16(imm) \
- do { \
- x86_64_imm_buf imb; \
- imb.i = (s4) (imm); \
- *(cd->mcodeptr++) = imb.b[0]; \
- *(cd->mcodeptr++) = imb.b[1]; \
- } while (0)
+#define M_ILD(a,b,disp) emit_movl_membase_reg(cd, (b), (disp), (a))
+#define M_LLD(a,b,disp) emit_mov_membase_reg(cd, (b), (disp), (a))
+#define M_ILD32(a,b,disp) emit_movl_membase32_reg(cd, (b), (disp), (a))
+#define M_LLD32(a,b,disp) emit_mov_membase32_reg(cd, (b), (disp), (a))
-#define x86_64_emit_imm32(imm) \
- do { \
- x86_64_imm_buf imb; \
- imb.i = (s4) (imm); \
- *(cd->mcodeptr++) = imb.b[0]; \
- *(cd->mcodeptr++) = imb.b[1]; \
- *(cd->mcodeptr++) = imb.b[2]; \
- *(cd->mcodeptr++) = imb.b[3]; \
- } while (0)
+#define M_IST(a,b,disp) emit_movl_reg_membase(cd, (a), (b), (disp))
+#define M_LST(a,b,disp) emit_mov_reg_membase(cd, (a), (b), (disp))
+#define M_IST_IMM(a,b,disp) emit_movl_imm_membase(cd, (a), (b), (disp))
+#define M_LST_IMM32(a,b,disp) emit_mov_imm_membase(cd, (a), (b), (disp))
-#define x86_64_emit_imm64(imm) \
- do { \
- x86_64_imm_buf imb; \
- imb.l = (s8) (imm); \
- *(cd->mcodeptr++) = imb.b[0]; \
- *(cd->mcodeptr++) = imb.b[1]; \
- *(cd->mcodeptr++) = imb.b[2]; \
- *(cd->mcodeptr++) = imb.b[3]; \
- *(cd->mcodeptr++) = imb.b[4]; \
- *(cd->mcodeptr++) = imb.b[5]; \
- *(cd->mcodeptr++) = imb.b[6]; \
- *(cd->mcodeptr++) = imb.b[7]; \
- } while (0)
+#define M_IST32(a,b,disp) emit_movl_reg_membase32(cd, (a), (b), (disp))
+#define M_LST32(a,b,disp) emit_mov_reg_membase32(cd, (a), (b), (disp))
+#define M_IST32_IMM(a,b,disp) emit_movl_imm_membase32(cd, (a), (b), (disp))
+#define M_LST32_IMM32(a,b,disp) emit_mov_imm_membase32(cd, (a), (b), (disp))
-/* additional functions and macros to generate code ***************************/
+#define M_IADD(a,b) emit_alul_reg_reg(cd, ALU_ADD, (a), (b))
+#define M_ISUB(a,b) emit_alul_reg_reg(cd, ALU_SUB, (a), (b))
+#define M_IMUL(a,b) emit_imull_reg_reg(cd, (a), (b))
-#define BlockPtrOfPC(pc) ((basicblock *) iptr->target)
+#define M_IADD_IMM(a,b) emit_alul_imm_reg(cd, ALU_ADD, (a), (b))
+#define M_ISUB_IMM(a,b) emit_alul_imm_reg(cd, ALU_SUB, (a), (b))
+#define M_IMUL_IMM(a,b,c) emit_imull_imm_reg_reg(cd, (b), (a), (c))
+#define M_LADD(a,b) emit_alu_reg_reg(cd, ALU_ADD, (a), (b))
+#define M_LSUB(a,b) emit_alu_reg_reg(cd, ALU_SUB, (a), (b))
+#define M_LMUL(a,b) emit_imul_reg_reg(cd, (a), (b))
-#ifdef STATISTICS
-#define COUNT_SPILLS count_spills++
-#else
-#define COUNT_SPILLS
-#endif
+#define M_LADD_IMM(a,b) emit_alu_imm_reg(cd, ALU_ADD, (a), (b))
+#define M_LSUB_IMM(a,b) emit_alu_imm_reg(cd, ALU_SUB, (a), (b))
+#define M_LMUL_IMM(a,b,c) emit_imul_imm_reg_reg(cd, (b), (a), (c))
+#define M_IINC(a) emit_incl_reg(cd, (a))
+#define M_IDEC(a) emit_decl_reg(cd, (a))
-#define CALCOFFSETBYTES(var, reg, val) \
- if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
- else if ((s4) (val) != 0) (var) += 1; \
- else if ((reg) == RBP || (reg) == RSP || (reg) == R12 || (reg) == R13) (var) += 1;
+#define M_ALD(a,b,disp) M_LLD(a,b,disp)
+#define M_ALD32(a,b,disp) M_LLD32(a,b,disp)
+#define M_AST(a,b,c) M_LST(a,b,c)
+#define M_AST_IMM32(a,b,c) M_LST_IMM32(a,b,c)
-#define CALCIMMEDIATEBYTES(var, val) \
- if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
- else (var) += 1;
+#define M_AADD(a,b) M_LADD(a,b)
+#define M_AADD_IMM(a,b) M_LADD_IMM(a,b)
+#define M_ASUB_IMM(a,b) M_LSUB_IMM(a,b)
+#define M_LADD_IMM32(a,b) emit_alu_imm32_reg(cd, ALU_ADD, (a), (b))
+#define M_AADD_IMM32(a,b) M_LADD_IMM32(a,b)
+#define M_LSUB_IMM32(a,b) emit_alu_imm32_reg(cd, ALU_SUB, (a), (b))
-/* gen_nullptr_check(objreg) */
+#define M_ILEA(a,b,c) emit_leal_membase_reg(cd, (a), (b), (c))
+#define M_LLEA(a,b,c) emit_lea_membase_reg(cd, (a), (b), (c))
+#define M_ALEA(a,b,c) M_LLEA(a,b,c)
-#define gen_nullptr_check(objreg) \
- if (checknull) { \
- x86_64_test_reg_reg(cd, (objreg), (objreg)); \
- x86_64_jcc(cd, X86_64_CC_E, 0); \
- codegen_addxnullrefs(cd, cd->mcodeptr); \
- }
+#define M_INEG(a) emit_negl_reg(cd, (a))
+#define M_LNEG(a) emit_neg_reg(cd, (a))
+#define M_IAND(a,b) emit_alul_reg_reg(cd, ALU_AND, (a), (b))
+#define M_IOR(a,b) emit_alul_reg_reg(cd, ALU_OR, (a), (b))
+#define M_IXOR(a,b) emit_alul_reg_reg(cd, ALU_XOR, (a), (b))
-#define gen_bound_check \
- if (checkbounds) { \
- x86_64_alul_membase_reg(cd, X86_64_CMP, s1, OFFSET(java_arrayheader, size), s2); \
- x86_64_jcc(cd, X86_64_CC_AE, 0); \
- codegen_addxboundrefs(cd, cd->mcodeptr, s2); \
- }
+#define M_IAND_IMM(a,b) emit_alul_imm_reg(cd, ALU_AND, (a), (b))
+#define M_IOR_IMM(a,b) emit_alul_imm_reg(cd, ALU_OR, (a), (b))
+#define M_IXOR_IMM(a,b) emit_alul_imm_reg(cd, ALU_XOR, (a), (b))
+#define M_LAND(a,b) emit_alu_reg_reg(cd, ALU_AND, (a), (b))
+#define M_LOR(a,b) emit_alu_reg_reg(cd, ALU_OR, (a), (b))
+#define M_LXOR(a,b) emit_alu_reg_reg(cd, ALU_XOR, (a), (b))
-#define gen_div_check(v) \
- if (checknull) { \
- if ((v)->flags & INMEMORY) { \
- x86_64_alu_imm_membase(cd, X86_64_CMP, 0, REG_SP, src->regoff * 8); \
- } else { \
- x86_64_test_reg_reg(cd, src->regoff, src->regoff); \
- } \
- x86_64_jcc(cd, X86_64_CC_E, 0); \
- codegen_addxdivrefs(cd, cd->mcodeptr); \
- }
+#define M_LAND_IMM(a,b) emit_alu_imm_reg(cd, ALU_AND, (a), (b))
+#define M_LOR_IMM(a,b) emit_alu_imm_reg(cd, ALU_OR, (a), (b))
+#define M_LXOR_IMM(a,b) emit_alu_imm_reg(cd, ALU_XOR, (a), (b))
+#define M_BSEXT(a,b) emit_movsbq_reg_reg(cd, (a), (b))
+#define M_SSEXT(a,b) emit_movswq_reg_reg(cd, (a), (b))
+#define M_ISEXT(a,b) emit_movslq_reg_reg(cd, (a), (b))
-/* MCODECHECK(icnt) */
+#define M_CZEXT(a,b) emit_movzwq_reg_reg(cd, (a), (b))
-#define MCODECHECK(icnt) \
- if ((cd->mcodeptr + (icnt)) > (u1 *) cd->mcodeend) \
- cd->mcodeptr = (u1 *) codegen_increase(cd, cd->mcodeptr)
+#define M_ISLL_IMM(a,b) emit_shiftl_imm_reg(cd, SHIFT_SHL, (a), (b))
+#define M_ISRA_IMM(a,b) emit_shiftl_imm_reg(cd, SHIFT_SAR, (a), (b))
+#define M_ISRL_IMM(a,b) emit_shiftl_imm_reg(cd, SHIFT_SHR, (a), (b))
-/* M_INTMOVE:
- generates an integer-move from register a to b.
- if a and b are the same int-register, no code will be generated.
-*/
+#define M_LSLL_IMM(a,b) emit_shift_imm_reg(cd, SHIFT_SHL, (a), (b))
+#define M_LSRA_IMM(a,b) emit_shift_imm_reg(cd, SHIFT_SAR, (a), (b))
+#define M_LSRL_IMM(a,b) emit_shift_imm_reg(cd, SHIFT_SHR, (a), (b))
-#define M_INTMOVE(reg,dreg) \
- if ((reg) != (dreg)) { \
- x86_64_mov_reg_reg(cd, (reg),(dreg)); \
- }
+#define M_TEST(a) emit_test_reg_reg(cd, (a), (a))
+#define M_ITEST(a) emit_testl_reg_reg(cd, (a), (a))
+#define M_LCMP(a,b) emit_alu_reg_reg(cd, ALU_CMP, (a), (b))
+#define M_LCMP_IMM(a,b) emit_alu_imm_reg(cd, ALU_CMP, (a), (b))
+#define M_LCMP_IMM_MEMBASE(a,b,c) emit_alu_imm_membase(cd, ALU_CMP, (a), (b), (c))
+#define M_LCMP_MEMBASE(a,b,c) emit_alu_membase_reg(cd, ALU_CMP, (a), (b), (c))
-/* M_FLTMOVE:
- generates a floating-point-move from register a to b.
- if a and b are the same float-register, no code will be generated
-*/
+#define M_ICMP(a,b) emit_alul_reg_reg(cd, ALU_CMP, (a), (b))
+#define M_ICMP_IMM(a,b) emit_alul_imm_reg(cd, ALU_CMP, (a), (b))
+#define M_ICMP_IMM_MEMBASE(a,b,c) emit_alul_imm_membase(cd, ALU_CMP, (a), (b), (c))
+#define M_ICMP_MEMBASE(a,b,c) emit_alul_membase_reg(cd, ALU_CMP, (a), (b), (c))
-#define M_FLTMOVE(reg,dreg) \
- if ((reg) != (dreg)) { \
- x86_64_movq_reg_reg(cd, (reg),(dreg)); \
- }
+#define M_BEQ(disp) emit_jcc(cd, CC_E, (disp))
+#define M_BNE(disp) emit_jcc(cd, CC_NE, (disp))
+#define M_BLT(disp) emit_jcc(cd, CC_L, (disp))
+#define M_BLE(disp) emit_jcc(cd, CC_LE, (disp))
+#define M_BGE(disp) emit_jcc(cd, CC_GE, (disp))
+#define M_BGT(disp) emit_jcc(cd, CC_G, (disp))
+#define M_BAE(disp) emit_jcc(cd, CC_AE, (disp))
+#define M_BA(disp) emit_jcc(cd, CC_A, (disp))
+#define M_CMOVEQ(a,b) emit_cmovcc_reg_reg(cd, CC_E, (a), (b))
+#define M_CMOVNE(a,b) emit_cmovcc_reg_reg(cd, CC_NE, (a), (b))
+#define M_CMOVLT(a,b) emit_cmovcc_reg_reg(cd, CC_L, (a), (b))
+#define M_CMOVLE(a,b) emit_cmovcc_reg_reg(cd, CC_LE, (a), (b))
+#define M_CMOVGE(a,b) emit_cmovcc_reg_reg(cd, CC_GE, (a), (b))
+#define M_CMOVGT(a,b) emit_cmovcc_reg_reg(cd, CC_G, (a), (b))
-/* var_to_reg_xxx:
- this function generates code to fetch data from a pseudo-register
- into a real register.
- If the pseudo-register has actually been assigned to a real
- register, no code will be emitted, since following operations
- can use this register directly.
-
- v: pseudoregister to be fetched from
- tempregnum: temporary register to be used if v is actually spilled to ram
+#define M_CMOVEQ_MEMBASE(a,b,c) emit_cmovcc_reg_membase(cd, CC_E, (a), (b))
+#define M_CMOVNE_MEMBASE(a,b,c) emit_cmovcc_reg_membase(cd, CC_NE, (a), (b))
+#define M_CMOVLT_MEMBASE(a,b,c) emit_cmovcc_reg_membase(cd, CC_L, (a), (b))
+#define M_CMOVLE_MEMBASE(a,b,c) emit_cmovcc_reg_membase(cd, CC_LE, (a), (b))
+#define M_CMOVGE_MEMBASE(a,b,c) emit_cmovcc_reg_membase(cd, CC_GE, (a), (b))
+#define M_CMOVGT_MEMBASE(a,b,c) emit_cmovcc_reg_membase(cd, CC_G, (a), (b))
- return: the register number, where the operand can be found after
- fetching (this wil be either tempregnum or the register
- number allready given to v)
-*/
+#define M_CMOVB(a,b) emit_cmovcc_reg_reg(cd, CC_B, (a), (b))
+#define M_CMOVA(a,b) emit_cmovcc_reg_reg(cd, CC_A, (a), (b))
+#define M_CMOVP(a,b) emit_cmovcc_reg_reg(cd, CC_P, (a), (b))
-#define var_to_reg_int(regnr,v,tempnr) \
- if ((v)->flags & INMEMORY) { \
- COUNT_SPILLS; \
- if ((v)->type == TYPE_INT) { \
- x86_64_movl_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
- } else { \
- x86_64_mov_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
- } \
- regnr = tempnr; \
- } else { \
- regnr = (v)->regoff; \
- }
+#define M_PUSH(a) emit_push_reg(cd, (a))
+#define M_PUSH_IMM(a) emit_push_imm(cd, (a))
+#define M_POP(a) emit_pop_reg(cd, (a))
+#define M_JMP(a) emit_jmp_reg(cd, (a))
+#define M_JMP_IMM(a) emit_jmp_imm(cd, (a))
+#define M_CALL(a) emit_call_reg(cd, (a))
+#define M_CALL_IMM(a) emit_call_imm(cd, (a))
+#define M_RET emit_ret(cd)
+#define M_NOP emit_nop(cd)
-#define var_to_reg_flt(regnr,v,tempnr) \
- if ((v)->flags & INMEMORY) { \
- COUNT_SPILLS; \
- if ((v)->type == TYPE_FLT) { \
- x86_64_movlps_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
- } else { \
- x86_64_movlpd_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
- } \
-/* x86_64_movq_membase_reg(REG_SP, (v)->regoff * 8, tempnr);*/ \
- regnr = tempnr; \
- } else { \
- regnr = (v)->regoff; \
- }
+#define M_CLR(a) M_LXOR(a,a)
-/* store_reg_to_var_xxx:
- This function generates the code to store the result of an operation
- back into a spilled pseudo-variable.
- If the pseudo-variable has not been spilled in the first place, this
- function will generate nothing.
-
- v ............ Pseudovariable
- tempregnum ... Number of the temporary registers as returned by
- reg_of_var.
-*/
-
-#define store_reg_to_var_int(sptr, tempregnum) \
- if ((sptr)->flags & INMEMORY) { \
- COUNT_SPILLS; \
- x86_64_mov_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
- }
+#define M_FLD(a,b,disp) emit_movss_membase_reg(cd, (b), (disp), (a))
+#define M_DLD(a,b,disp) emit_movsd_membase_reg(cd, (b), (disp), (a))
+#define M_FLD32(a,b,disp) emit_movss_membase32_reg(cd, (b), (disp), (a))
+#define M_DLD32(a,b,disp) emit_movsd_membase32_reg(cd, (b), (disp), (a))
-#define store_reg_to_var_flt(sptr, tempregnum) \
- if ((sptr)->flags & INMEMORY) { \
- COUNT_SPILLS; \
- x86_64_movq_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
- }
+#define M_FST(a,b,disp) emit_movss_reg_membase(cd, (a), (b), (disp))
+#define M_DST(a,b,disp) emit_movsd_reg_membase(cd, (a), (b), (disp))
+#define M_FST32(a,b,disp) emit_movss_reg_membase32(cd, (a), (b), (disp))
+#define M_DST32(a,b,disp) emit_movsd_reg_membase32(cd, (a), (b), (disp))
+
+#define M_FADD(a,b) emit_addss_reg_reg(cd, (a), (b))
+#define M_DADD(a,b) emit_addsd_reg_reg(cd, (a), (b))
+#define M_FSUB(a,b) emit_subss_reg_reg(cd, (a), (b))
+#define M_DSUB(a,b) emit_subsd_reg_reg(cd, (a), (b))
+#define M_FMUL(a,b) emit_mulss_reg_reg(cd, (a), (b))
+#define M_DMUL(a,b) emit_mulsd_reg_reg(cd, (a), (b))
+#define M_FDIV(a,b) emit_divss_reg_reg(cd, (a), (b))
+#define M_DDIV(a,b) emit_divsd_reg_reg(cd, (a), (b))
+
+#define M_CVTIF(a,b) emit_cvtsi2ss_reg_reg(cd, (a), (b))
+#define M_CVTID(a,b) emit_cvtsi2sd_reg_reg(cd, (a), (b))
+#define M_CVTLF(a,b) emit_cvtsi2ssq_reg_reg(cd, (a), (b))
+#define M_CVTLD(a,b) emit_cvtsi2sdq_reg_reg(cd, (a), (b))
+#define M_CVTFI(a,b) emit_cvttss2si_reg_reg(cd, (a), (b))
+#define M_CVTDI(a,b) emit_cvttsd2si_reg_reg(cd, (a), (b))
+#define M_CVTFL(a,b) emit_cvttss2siq_reg_reg(cd, (a), (b))
+#define M_CVTDL(a,b) emit_cvttsd2siq_reg_reg(cd, (a), (b))
+
+#define M_CVTFD(a,b) emit_cvtss2sd_reg_reg(cd, (a), (b))
+#define M_CVTDF(a,b) emit_cvtsd2ss_reg_reg(cd, (a), (b))
-#define M_COPY(from,to) \
- d = reg_of_var(rd, to, REG_ITMP1); \
- if ((from->regoff != to->regoff) || \
- ((from->flags ^ to->flags) & INMEMORY)) { \
- if (IS_FLT_DBL_TYPE(from->type)) { \
- var_to_reg_flt(s1, from, d); \
- M_FLTMOVE(s1, d); \
- store_reg_to_var_flt(to, d); \
- } else { \
- var_to_reg_int(s1, from, d); \
- M_INTMOVE(s1, d); \
- store_reg_to_var_int(to, d); \
- } \
- }
+/* system instructions ********************************************************/
-/* #define ALIGNCODENOP {if((int)((long)mcodeptr&7)){M_NOP;}} */
-#define ALIGNCODENOP
+#define M_RDTSC emit_rdtsc(cd)
+
+#define M_IINC_MEMBASE(a,b) emit_incl_membase(cd, (a), (b))
+
+#define M_IADD_MEMBASE(a,b,c) emit_alul_reg_membase(cd, ALU_ADD, (a), (b), (c))
+#define M_IADC_MEMBASE(a,b,c) emit_alul_reg_membase(cd, ALU_ADC, (a), (b), (c))
+#define M_ISUB_MEMBASE(a,b,c) emit_alul_reg_membase(cd, ALU_SUB, (a), (b), (c))
+#define M_ISBB_MEMBASE(a,b,c) emit_alul_reg_membase(cd, ALU_SBB, (a), (b), (c))
+
+
+#define PROFILE_CYCLE_START \
+ do { \
+ if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) { \
+ M_PUSH(RAX); \
+ M_PUSH(RDX); \
+ \
+ M_MOV_IMM(code, REG_ITMP3); \
+ M_RDTSC; \
+ M_ISUB_MEMBASE(RAX, REG_ITMP3, OFFSET(codeinfo, cycles)); \
+ M_ISBB_MEMBASE(RDX, REG_ITMP3, OFFSET(codeinfo, cycles) + 4); \
+ \
+ M_POP(RDX); \
+ M_POP(RAX); \
+ } \
+ } while (0)
+
+#define PROFILE_CYCLE_STOP \
+ do { \
+ if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) { \
+ M_PUSH(RAX); \
+ M_PUSH(RDX); \
+ \
+ M_MOV_IMM(code, REG_ITMP3); \
+ M_RDTSC; \
+ M_IADD_MEMBASE(RAX, REG_ITMP3, OFFSET(codeinfo, cycles)); \
+ M_IADC_MEMBASE(RDX, REG_ITMP3, OFFSET(codeinfo, cycles) + 4); \
+ \
+ M_POP(RDX); \
+ M_POP(RAX); \
+ } \
+ } while (0)
/* function gen_resolvebranch **************************************************
#define gen_resolvebranch(ip,so,to) \
*((s4*) ((ip) - 4)) = (s4) ((to) - (so));
-
-/* function prototypes */
-
-void thread_restartcriticalsection(ucontext_t *uc);
-
#endif /* _CODEGEN_H */