-/* jit/i386/codegen.h - code generation macros and definitions for x86_64
+/* vm/jit/x86_64/codegen.h - code generation macros and definitions for x86_64
- Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
- R. Grafl, A. Krall, C. Kruegel, C. Oates, R. Obermaisser,
- M. Probst, S. Ring, E. Steiner, C. Thalinger, D. Thuernbeck,
- P. Tomsich, J. Wenninger
+ Copyright (C) 1996-2005 R. Grafl, A. Krall, C. Kruegel, C. Oates,
+ R. Obermaisser, M. Platter, M. Probst, S. Ring, E. Steiner,
+ C. Thalinger, D. Thuernbeck, P. Tomsich, C. Ullrich, J. Wenninger,
+ Institut f. Computersprachen - TU Wien
This file is part of CACAO.
Authors: Andreas Krall
Christian Thalinger
- $Id: codegen.h 712 2003-12-07 20:39:09Z twisti $
+ $Id: codegen.h 1735 2004-12-07 14:33:27Z twisti $
*/
#ifndef _CODEGEN_H
#define _CODEGEN_H
-#include "jit.h"
+#include <ucontext.h>
-
-/* x86_64 register numbers */
-#define RIP -1
-#define RAX 0
-#define RCX 1
-#define RDX 2
-#define RBX 3
-#define RSP 4
-#define RBP 5
-#define RSI 6
-#define RDI 7
-#define R8 8
-#define R9 9
-#define R10 10
-#define R11 11
-#define R12 12
-#define R13 13
-#define R14 14
-#define R15 15
-
-
-#define XMM0 0
-#define XMM1 1
-#define XMM2 2
-#define XMM3 3
-#define XMM4 4
-#define XMM5 5
-#define XMM6 6
-#define XMM7 7
-#define XMM8 8
-#define XMM9 9
-#define XMM10 10
-#define XMM11 11
-#define XMM12 12
-#define XMM13 13
-#define XMM14 14
-#define XMM15 15
-
-
-/* preallocated registers *****************************************************/
-
-/* integer registers */
-
-#define REG_RESULT RAX /* to deliver method results */
-
-#define REG_ITMP1 RAX /* temporary register */
-#define REG_ITMP2 R10 /* temporary register and method pointer */
-#define REG_ITMP3 R11 /* temporary register */
-
-#define REG_NULL -1 /* used for reg_of_var where d is not needed */
-
-#define REG_ITMP1_XPTR RAX /* exception pointer = temporary register 1 */
-#define REG_ITMP2_XPC R10 /* exception pc = temporary register 2 */
-
-#define REG_SP RSP /* stack pointer */
-
-/* floating point registers */
-
-#define REG_FRESULT XMM0 /* to deliver floating point method results */
-
-#define REG_FTMP1 XMM8 /* temporary floating point register */
-#define REG_FTMP2 XMM9 /* temporary floating point register */
-#define REG_FTMP3 XMM10 /* temporary floating point register */
-
-
-#define INT_ARG_CNT 6 /* number of int argument registers */
-#define INT_SAV_CNT 5 /* number of int callee saved registers */
-
-#define FLT_ARG_CNT 4 /* number of flt argument registers */
-#define FLT_SAV_CNT 0 /* number of flt callee saved registers */
+#include "vm/jit/x86_64/types.h"
/* macros to create code ******************************************************/
/* modrm and stuff */
#define x86_64_address_byte(mod,reg,rm) \
- *(mcodeptr++) = ((((mod) & 0x03) << 6) | (((reg) & 0x07) << 3) | ((rm) & 0x07));
+ *(cd->mcodeptr++) = ((((mod) & 0x03) << 6) | (((reg) & 0x07) << 3) | ((rm) & 0x07));
#define x86_64_emit_reg(reg,rm) \
#define x86_64_emit_rex(size,reg,index,rm) \
if ((size) == 1 || (reg) > 7 || (index) > 7 || (rm) > 7) { \
- *(mcodeptr++) = (0x40 | (((size) & 0x01) << 3) | ((((reg) >> 3) & 0x01) << 2) | ((((index) >> 3) & 0x01) << 1) | (((rm) >> 3) & 0x01)); \
+ *(cd->mcodeptr++) = (0x40 | (((size) & 0x01) << 3) | ((((reg) >> 3) & 0x01) << 2) | ((((index) >> 3) & 0x01) << 1) | (((rm) >> 3) & 0x01)); \
}
#define x86_64_emit_imm8(imm) \
- *(mcodeptr++) = (u1) ((imm) & 0xff);
+ *(cd->mcodeptr++) = (u1) ((imm) & 0xff);
#define x86_64_emit_imm16(imm) \
do { \
x86_64_imm_buf imb; \
imb.i = (s4) (imm); \
- *(mcodeptr++) = imb.b[0]; \
- *(mcodeptr++) = imb.b[1]; \
+ *(cd->mcodeptr++) = imb.b[0]; \
+ *(cd->mcodeptr++) = imb.b[1]; \
} while (0)
do { \
x86_64_imm_buf imb; \
imb.i = (s4) (imm); \
- *(mcodeptr++) = imb.b[0]; \
- *(mcodeptr++) = imb.b[1]; \
- *(mcodeptr++) = imb.b[2]; \
- *(mcodeptr++) = imb.b[3]; \
+ *(cd->mcodeptr++) = imb.b[0]; \
+ *(cd->mcodeptr++) = imb.b[1]; \
+ *(cd->mcodeptr++) = imb.b[2]; \
+ *(cd->mcodeptr++) = imb.b[3]; \
} while (0)
do { \
x86_64_imm_buf imb; \
imb.l = (s8) (imm); \
- *(mcodeptr++) = imb.b[0]; \
- *(mcodeptr++) = imb.b[1]; \
- *(mcodeptr++) = imb.b[2]; \
- *(mcodeptr++) = imb.b[3]; \
- *(mcodeptr++) = imb.b[4]; \
- *(mcodeptr++) = imb.b[5]; \
- *(mcodeptr++) = imb.b[6]; \
- *(mcodeptr++) = imb.b[7]; \
+ *(cd->mcodeptr++) = imb.b[0]; \
+ *(cd->mcodeptr++) = imb.b[1]; \
+ *(cd->mcodeptr++) = imb.b[2]; \
+ *(cd->mcodeptr++) = imb.b[3]; \
+ *(cd->mcodeptr++) = imb.b[4]; \
+ *(cd->mcodeptr++) = imb.b[5]; \
+ *(cd->mcodeptr++) = imb.b[6]; \
+ *(cd->mcodeptr++) = imb.b[7]; \
} while (0)
-/* code generation prototypes */
-
-void x86_64_emit_ialu(s4 alu_op, stackptr src, instruction *iptr);
-void x86_64_emit_lalu(s4 alu_op, stackptr src, instruction *iptr);
-void x86_64_emit_ialuconst(s4 alu_op, stackptr src, instruction *iptr);
-void x86_64_emit_laluconst(s4 alu_op, stackptr src, instruction *iptr);
-void x86_64_emit_ishift(s4 shift_op, stackptr src, instruction *iptr);
-void x86_64_emit_lshift(s4 shift_op, stackptr src, instruction *iptr);
-void x86_64_emit_ishiftconst(s4 shift_op, stackptr src, instruction *iptr);
-void x86_64_emit_lshiftconst(s4 shift_op, stackptr src, instruction *iptr);
-void x86_64_emit_ifcc(s4 if_op, stackptr src, instruction *iptr);
-void x86_64_emit_if_lcc(s4 if_op, stackptr src, instruction *iptr);
-void x86_64_emit_if_icmpcc(s4 if_op, stackptr src, instruction *iptr);
-void x86_64_emit_if_lcmpcc(s4 if_op, stackptr src, instruction *iptr);
-
-
-/* integer instructions */
-
-void x86_64_mov_reg_reg(s8 reg, s8 dreg);
-void x86_64_mov_imm_reg(s8 imm, s8 reg);
-void x86_64_movl_imm_reg(s8 imm, s8 reg);
-void x86_64_mov_membase_reg(s8 basereg, s8 disp, s8 reg);
-void x86_64_movl_membase_reg(s8 basereg, s8 disp, s8 reg);
-void x86_64_mov_membase32_reg(s8 basereg, s8 disp, s8 reg);
-void x86_64_mov_reg_membase(s8 reg, s8 basereg, s8 disp);
-void x86_64_movl_reg_membase(s8 reg, s8 basereg, s8 disp);
-void x86_64_mov_memindex_reg(s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg);
-void x86_64_movl_memindex_reg(s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg);
-void x86_64_mov_reg_memindex(s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
-void x86_64_movl_reg_memindex(s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
-void x86_64_movw_reg_memindex(s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
-void x86_64_movb_reg_memindex(s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
-void x86_64_mov_imm_membase(s8 imm, s8 basereg, s8 disp);
-void x86_64_movl_imm_membase(s8 imm, s8 basereg, s8 disp);
-void x86_64_movsbq_reg_reg(s8 reg, s8 dreg);
-void x86_64_movsbq_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movswq_reg_reg(s8 reg, s8 dreg);
-void x86_64_movswq_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movslq_reg_reg(s8 reg, s8 dreg);
-void x86_64_movslq_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movzwq_reg_reg(s8 reg, s8 dreg);
-void x86_64_movzwq_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movswq_memindex_reg(s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg);
-void x86_64_movsbq_memindex_reg(s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg);
-void x86_64_movzwq_memindex_reg(s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 reg);
-void x86_64_alu_reg_reg(s8 opc, s8 reg, s8 dreg);
-void x86_64_alul_reg_reg(s8 opc, s8 reg, s8 dreg);
-void x86_64_alu_reg_membase(s8 opc, s8 reg, s8 basereg, s8 disp);
-void x86_64_alul_reg_membase(s8 opc, s8 reg, s8 basereg, s8 disp);
-void x86_64_alu_membase_reg(s8 opc, s8 basereg, s8 disp, s8 reg);
-void x86_64_alul_membase_reg(s8 opc, s8 basereg, s8 disp, s8 reg);
-void x86_64_alu_imm_reg(s8 opc, s8 imm, s8 dreg);
-void x86_64_alul_imm_reg(s8 opc, s8 imm, s8 dreg);
-void x86_64_alu_imm_membase(s8 opc, s8 imm, s8 basereg, s8 disp);
-void x86_64_alul_imm_membase(s8 opc, s8 imm, s8 basereg, s8 disp);
-void x86_64_test_reg_reg(s8 reg, s8 dreg);
-void x86_64_testl_reg_reg(s8 reg, s8 dreg);
-void x86_64_test_imm_reg(s8 imm, s8 reg);
-void x86_64_testw_imm_reg(s8 imm, s8 reg);
-void x86_64_testb_imm_reg(s8 imm, s8 reg);
-void x86_64_lea_membase_reg(s8 basereg, s8 disp, s8 reg);
-void x86_64_leal_membase_reg(s8 basereg, s8 disp, s8 reg);
-void x86_64_inc_reg(s8 reg);
-void x86_64_incl_reg(s8 reg);
-void x86_64_inc_membase(s8 basereg, s8 disp);
-void x86_64_incl_membase(s8 basereg, s8 disp);
-void x86_64_dec_reg(s8 reg);
-void x86_64_decl_reg(s8 reg);
-void x86_64_dec_membase(s8 basereg, s8 disp);
-void x86_64_decl_membase(s8 basereg, s8 disp);
-void x86_64_cltd();
-void x86_64_cqto();
-void x86_64_imul_reg_reg(s8 reg, s8 dreg);
-void x86_64_imull_reg_reg(s8 reg, s8 dreg);
-void x86_64_imul_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_imull_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_imul_imm_reg(s8 imm, s8 dreg);
-void x86_64_imul_imm_reg_reg(s8 imm,s8 reg, s8 dreg);
-void x86_64_imull_imm_reg_reg(s8 imm, s8 reg, s8 dreg);
-void x86_64_imul_imm_membase_reg(s8 imm, s8 basereg, s8 disp, s8 dreg);
-void x86_64_imull_imm_membase_reg(s8 imm, s8 basereg, s8 disp, s8 dreg);
-void x86_64_idiv_reg(s8 reg);
-void x86_64_idivl_reg(s8 reg);
-void x86_64_ret();
-void x86_64_shift_reg(s8 opc, s8 reg);
-void x86_64_shiftl_reg(s8 opc, s8 reg);
-void x86_64_shift_membase(s8 opc, s8 basereg, s8 disp);
-void x86_64_shiftl_membase(s8 opc, s8 basereg, s8 disp);
-void x86_64_shift_imm_reg(s8 opc, s8 imm, s8 dreg);
-void x86_64_shiftl_imm_reg(s8 opc, s8 imm, s8 dreg);
-void x86_64_shift_imm_membase(s8 opc, s8 imm, s8 basereg, s8 disp);
-void x86_64_shiftl_imm_membase(s8 opc, s8 imm, s8 basereg, s8 disp);
-void x86_64_jmp_imm(s8 imm);
-void x86_64_jmp_reg(s8 reg);
-void x86_64_jcc(s8 opc, s8 imm);
-void x86_64_setcc_reg(s8 opc, s8 reg);
-void x86_64_setcc_membase(s8 opc, s8 basereg, s8 disp);
-void x86_64_cmovcc_reg_reg(s8 opc, s8 reg, s8 dreg);
-void x86_64_cmovccl_reg_reg(s8 opc, s8 reg, s8 dreg);
-void x86_64_neg_reg(s8 reg);
-void x86_64_negl_reg(s8 reg);
-void x86_64_neg_membase(s8 basereg, s8 disp);
-void x86_64_negl_membase(s8 basereg, s8 disp);
-void x86_64_push_imm(s8 imm);
-void x86_64_pop_reg(s8 reg);
-void x86_64_xchg_reg_reg(s8 reg, s8 dreg);
-void x86_64_nop();
-void x86_64_call_reg(s8 reg);
-void x86_64_call_imm(s8 imm);
-
-
-/* floating point instructions (SSE2) */
-
-void x86_64_addsd_reg_reg(s8 reg, s8 dreg);
-void x86_64_addss_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvtsi2ssq_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvtsi2ss_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvtsi2sdq_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvtsi2sd_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvtss2sd_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvtsd2ss_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvttss2siq_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvttss2si_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvttsd2siq_reg_reg(s8 reg, s8 dreg);
-void x86_64_cvttsd2si_reg_reg(s8 reg, s8 dreg);
-void x86_64_divss_reg_reg(s8 reg, s8 dreg);
-void x86_64_divsd_reg_reg(s8 reg, s8 dreg);
-void x86_64_movd_reg_freg(s8 reg, s8 freg);
-void x86_64_movd_freg_reg(s8 freg, s8 reg);
-void x86_64_movd_reg_membase(s8 reg, s8 basereg, s8 disp);
-void x86_64_movd_reg_memindex(s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
-void x86_64_movd_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movdl_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movd_memindex_reg(s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 dreg);
-void x86_64_movq_reg_reg(s8 reg, s8 dreg);
-void x86_64_movq_reg_membase(s8 reg, s8 basereg, s8 disp);
-void x86_64_movq_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movss_reg_reg(s8 reg, s8 dreg);
-void x86_64_movsd_reg_reg(s8 reg, s8 dreg);
-void x86_64_movss_reg_membase(s8 reg, s8 basereg, s8 disp);
-void x86_64_movsd_reg_membase(s8 reg, s8 basereg, s8 disp);
-void x86_64_movss_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movlps_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movsd_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movlpd_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_movss_reg_memindex(s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
-void x86_64_movsd_reg_memindex(s8 reg, s8 disp, s8 basereg, s8 indexreg, s8 scale);
-void x86_64_movss_memindex_reg(s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 dreg);
-void x86_64_movsd_memindex_reg(s8 disp, s8 basereg, s8 indexreg, s8 scale, s8 dreg);
-void x86_64_mulss_reg_reg(s8 reg, s8 dreg);
-void x86_64_mulsd_reg_reg(s8 reg, s8 dreg);
-void x86_64_subss_reg_reg(s8 reg, s8 dreg);
-void x86_64_subsd_reg_reg(s8 reg, s8 dreg);
-void x86_64_ucomiss_reg_reg(s8 reg, s8 dreg);
-void x86_64_ucomisd_reg_reg(s8 reg, s8 dreg);
-void x86_64_xorps_reg_reg(s8 reg, s8 dreg);
-void x86_64_xorps_membase_reg(s8 basereg, s8 disp, s8 dreg);
-void x86_64_xorpd_reg_reg(s8 reg, s8 dreg);
-void x86_64_xorpd_membase_reg(s8 basereg, s8 disp, s8 dreg);
+/* additional functions and macros to generate code ***************************/
+
+#define BlockPtrOfPC(pc) ((basicblock *) iptr->target)
+
+
+#ifdef STATISTICS
+#define COUNT_SPILLS count_spills++
+#else
+#define COUNT_SPILLS
+#endif
+
+
+#define CALCOFFSETBYTES(var, reg, val) \
+ if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
+ else if ((s4) (val) != 0) (var) += 1; \
+ else if ((reg) == RBP || (reg) == RSP || (reg) == R12 || (reg) == R13) (var) += 1;
+
+
+#define CALCIMMEDIATEBYTES(var, val) \
+ if ((s4) (val) < -128 || (s4) (val) > 127) (var) += 4; \
+ else (var) += 1;
+
+
+/* gen_nullptr_check(objreg) */
+
+#define gen_nullptr_check(objreg) \
+ if (checknull) { \
+ x86_64_test_reg_reg(cd, (objreg), (objreg)); \
+ x86_64_jcc(cd, X86_64_CC_E, 0); \
+ codegen_addxnullrefs(cd, cd->mcodeptr); \
+ }
+
+
+#define gen_bound_check \
+ if (checkbounds) { \
+ x86_64_alul_membase_reg(cd, X86_64_CMP, s1, OFFSET(java_arrayheader, size), s2); \
+ x86_64_jcc(cd, X86_64_CC_AE, 0); \
+ codegen_addxboundrefs(cd, cd->mcodeptr, s2); \
+ }
+
+
+#define gen_div_check(v) \
+ if (checknull) { \
+ if ((v)->flags & INMEMORY) { \
+ x86_64_alu_imm_membase(cd, X86_64_CMP, 0, REG_SP, src->regoff * 8); \
+ } else { \
+ x86_64_test_reg_reg(cd, src->regoff, src->regoff); \
+ } \
+ x86_64_jcc(cd, X86_64_CC_E, 0); \
+ codegen_addxdivrefs(cd, cd->mcodeptr); \
+ }
+
+
+/* MCODECHECK(icnt) */
+
+#define MCODECHECK(icnt) \
+ if ((cd->mcodeptr + (icnt)) > (u1 *) cd->mcodeend) \
+ cd->mcodeptr = (u1 *) codegen_increase(cd, cd->mcodeptr)
+
+/* M_INTMOVE:
+ generates an integer-move from register a to b.
+ if a and b are the same int-register, no code will be generated.
+*/
+
+#define M_INTMOVE(reg,dreg) \
+ if ((reg) != (dreg)) { \
+ x86_64_mov_reg_reg(cd, (reg),(dreg)); \
+ }
+
+
+/* M_FLTMOVE:
+ generates a floating-point-move from register a to b.
+ if a and b are the same float-register, no code will be generated
+*/
+
+#define M_FLTMOVE(reg,dreg) \
+ if ((reg) != (dreg)) { \
+ x86_64_movq_reg_reg(cd, (reg),(dreg)); \
+ }
+
+
+/* var_to_reg_xxx:
+ this function generates code to fetch data from a pseudo-register
+ into a real register.
+ If the pseudo-register has actually been assigned to a real
+ register, no code will be emitted, since following operations
+ can use this register directly.
+
+ v: pseudoregister to be fetched from
+ tempregnum: temporary register to be used if v is actually spilled to ram
+
+ return: the register number, where the operand can be found after
+ fetching (this wil be either tempregnum or the register
+ number allready given to v)
+*/
+
+#define var_to_reg_int(regnr,v,tempnr) \
+ if ((v)->flags & INMEMORY) { \
+ COUNT_SPILLS; \
+ if ((v)->type == TYPE_INT) { \
+ x86_64_movl_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
+ } else { \
+ x86_64_mov_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
+ } \
+ regnr = tempnr; \
+ } else { \
+ regnr = (v)->regoff; \
+ }
+
+
+
+#define var_to_reg_flt(regnr,v,tempnr) \
+ if ((v)->flags & INMEMORY) { \
+ COUNT_SPILLS; \
+ if ((v)->type == TYPE_FLT) { \
+ x86_64_movlps_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
+ } else { \
+ x86_64_movlpd_membase_reg(cd, REG_SP, (v)->regoff * 8, tempnr); \
+ } \
+/* x86_64_movq_membase_reg(REG_SP, (v)->regoff * 8, tempnr);*/ \
+ regnr = tempnr; \
+ } else { \
+ regnr = (v)->regoff; \
+ }
+
+
+/* store_reg_to_var_xxx:
+ This function generates the code to store the result of an operation
+ back into a spilled pseudo-variable.
+ If the pseudo-variable has not been spilled in the first place, this
+ function will generate nothing.
+
+ v ............ Pseudovariable
+ tempregnum ... Number of the temporary registers as returned by
+ reg_of_var.
+*/
+
+#define store_reg_to_var_int(sptr, tempregnum) \
+ if ((sptr)->flags & INMEMORY) { \
+ COUNT_SPILLS; \
+ x86_64_mov_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
+ }
+
+
+#define store_reg_to_var_flt(sptr, tempregnum) \
+ if ((sptr)->flags & INMEMORY) { \
+ COUNT_SPILLS; \
+ x86_64_movq_reg_membase(cd, tempregnum, REG_SP, (sptr)->regoff * 8); \
+ }
+
+
+#define M_COPY(from,to) \
+ d = reg_of_var(rd, to, REG_ITMP1); \
+ if ((from->regoff != to->regoff) || \
+ ((from->flags ^ to->flags) & INMEMORY)) { \
+ if (IS_FLT_DBL_TYPE(from->type)) { \
+ var_to_reg_flt(s1, from, d); \
+ M_FLTMOVE(s1, d); \
+ store_reg_to_var_flt(to, d); \
+ } else { \
+ var_to_reg_int(s1, from, d); \
+ M_INTMOVE(s1, d); \
+ store_reg_to_var_int(to, d); \
+ } \
+ }
+
+
+/* #define ALIGNCODENOP {if((int)((long)mcodeptr&7)){M_NOP;}} */
+#define ALIGNCODENOP
/* function gen_resolvebranch **************************************************
#define gen_resolvebranch(ip,so,to) \
*((s4*) ((ip) - 4)) = (s4) ((to) - (so));
-#define SOFTNULLPTRCHECK /* soft null pointer check supportet as option */
+
+/* function prototypes */
+
+void thread_restartcriticalsection(ucontext_t *uc);
#endif /* _CODEGEN_H */
* tab-width: 4
* End:
*/
-