+/*
+ * x86.brg: X86 code generator
+ *
+ * Author:
+ * Dietmar Maurer (dietmar@ximian.com)
+ *
+ * (C) 2001 Ximian, Inc.
+ */
+
#include <glib.h>
#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <sys/syscall.h>
#include <mono/metadata/blob.h>
#include <mono/metadata/metadata.h>
#include <mono/metadata/loader.h>
+#include <mono/metadata/object.h>
+#include <mono/metadata/tabledefs.h>
+#include <mono/metadata/appdomain.h>
#include <mono/arch/x86/x86-codegen.h>
+#include "regset.h"
+#include "jit.h"
+
#define MBTREE_TYPE MBTree
+#define MBCGEN_TYPE MonoFlowGraph
+#define MBCOST_DATA MonoFlowGraph
+#define MBALLOC_STATE mono_mempool_alloc (data->mp, sizeof (MBState))
+
+typedef enum {
+ AMImmediate = 0, // ptr
+ AMBase = 1, // V[REG]
+ AMIndex = 2, // V[REG*X]
+ AMBaseIndex = 3, // V[REG*X][REG]
+} X86AddMode;
+
+typedef struct {
+ int offset;
+ X86AddMode amode:2;
+ unsigned int shift:2;
+ gint8 basereg;
+ gint8 indexreg;
+} X86AddressInfo;
-typedef struct _MBTree MBTree;
struct _MBTree {
- guint16 op;
- MBTree *left, *right;
- gpointer state;
- gpointer emit;
+ guint16 op;
+ unsigned last_instr:1;
+
+ MBTree *left, *right;
+ gpointer state;
+ gpointer emit;
- guint is_jump:1;
- guint last_instr:1;
- guint jump_target:1;
+ gint32 addr;
+ gint32 cli_addr;
- gint32 cli_addr; /* virtual cli address */
- gint32 addr; /* address of emitted instruction */
- gint32 first_addr; /* first code address of a tree */
+ guint8 exclude_mask;
- int reg;
- MonoTypeEnum type;
+ gint8 reg1;
+ gint8 reg2;
+ gint8 reg3;
+
+ MonoValueType svt;
union {
- int i;
+ gint32 i;
+ gint64 l;
gpointer p;
+ MonoBBlock *bb;
+ MonoMethod *m;
+ MethodCallInfo *ci;
+ MonoClass *klass;
+ X86AddressInfo ainfo;
} data;
};
-gboolean same_tree (MBTree *t1, MBTree *t2);
+gint64 mono_llmult (gint64 a, gint64 b);
+guint64 mono_llmult_ovf (gpointer *exc, guint32 al, gint32 ah, guint32 bl, gint32 bh);
+guint64 mono_llmult_ovf_un (gpointer *exc, guint32 al, guint32 ah, guint32 bl, guint32 bh);
+gint64 mono_lldiv (gint64 a, gint64 b);
+gint64 mono_llrem (gint64 a, gint64 b);
+guint64 mono_lldiv_un (guint64 a, guint64 b);
+guint64 mono_llrem_un (guint64 a, guint64 b);
+gpointer mono_ldsflda (MonoClass *klass, int offset);
+
+gpointer arch_get_lmf_addr (void);
+
+MonoArray*
+mono_array_new_wrapper (MonoClass *eclass, guint32 n);
+MonoObject *
+mono_object_new_wrapper (MonoClass *klass);
+MonoString*
+mono_string_new_wrapper (const char *text);
+MonoString*
+mono_ldstr_wrapper (MonoImage *image, guint32 index);
+
+gpointer
+get_mono_object_isinst (void);
+
+#define MB_OPT_LEVEL 1
+
+#if MB_OPT_LEVEL == 0
+#define MB_USE_OPT1(c) 65535
+#define MB_USE_OPT2(c) 65535
+#endif
+#if MB_OPT_LEVEL == 1
+#define MB_USE_OPT1(c) c
+#define MB_USE_OPT2(c) 65535
+#endif
+#if MB_OPT_LEVEL >= 2
+#define MB_USE_OPT1(c) c
+#define MB_USE_OPT2(c) c
+#endif
+
+//#define DEBUG
+
+#define REAL_PRINT_REG(text,reg) \
+mono_assert (reg >= 0); \
+x86_push_reg (s->code, X86_EAX); \
+x86_push_reg (s->code, X86_EDX); \
+x86_push_reg (s->code, X86_ECX); \
+x86_push_reg (s->code, reg); \
+x86_push_imm (s->code, reg); \
+x86_push_imm (s->code, text " %d %p\n"); \
+x86_mov_reg_imm (s->code, X86_EAX, printf); \
+x86_call_reg (s->code, X86_EAX); \
+x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 3*4); \
+x86_pop_reg (s->code, X86_ECX); \
+x86_pop_reg (s->code, X86_EDX); \
+x86_pop_reg (s->code, X86_EAX);
+
+#ifdef DEBUG
+#define MEMCOPY debug_memcpy
+void *MEMCOPY (void *dest, const void *src, size_t n);
+
+#define PRINT_REG(text,reg) REAL_PRINT_REG(text,reg)
+#else
+
+#define MEMCOPY memcpy
+
+#define PRINT_REG(x,y)
+
+#endif
+
+/* The call instruction for virtual functions must have a known
+ * size (used by x86_magic_trampoline)
+ */
+#define x86_call_virtual(inst,basereg,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_address_byte ((inst), 2, 2, (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } while (0)
+
+/* emit an exception if condition is fail */
+#define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
+ do { \
+ gpointer t; \
+ x86_branch8 (s->code, cond, 10, signed); \
+ x86_push_imm (s->code, exc_name); \
+ t = arch_get_throw_exception_by_name (); \
+ mono_add_jump_info (s, s->code + 1, t, NULL); \
+ x86_call_code (s->code, 0); \
+ } while (0);
%%
#
# constatnts
-%term CONST_I4 CONST_R8
-%term LDLOC LDARG STLOC BR RET RETV ARG CALL
-%term ADD SUB MUL
-%term BLT BEQ BGE BRTRUE
-%term CONV_I4 CONV_I1 CONV_I2
+%term CONST_I4 CONST_I8 CONST_R4 CONST_R8
+%term LDIND_I1 LDIND_U1 LDIND_I2 LDIND_U2 LDIND_I4 LDIND_REF LDIND_I8 LDIND_R4 LDIND_R8
+%term LDIND_U4 LDIND_OBJ
+%term STIND_I1 STIND_I2 STIND_I4 STIND_REF STIND_I8 STIND_R4 STIND_R8 STIND_OBJ
+%term ADDR_L ADDR_G ARG_I4 ARG_I8 ARG_R4 ARG_R8 ARG_OBJ ARG_STRING CALL_I4 CALL_I8 CALL_R8 CALL_VOID
+%term BREAK SWITCH BR RET_VOID RET RET_OBJ ENDFINALLY
+%term ADD ADD_OVF ADD_OVF_UN SUB SUB_OVF SUB_OVF_UN MUL MUL_OVF MUL_OVF_UN
+%term DIV DIV_UN REM REM_UN AND OR XOR SHL SHR SHR_UN NEG NOT
+%term BLT BLT_UN BEQ BNE_UN BRTRUE BRFALSE BGE BGE_UN BLE BLE_UN BGT BGT_UN
+%term CEQ CLT CLT_UN CGT CGT_UN
+%term CONV_I4 CONV_I1 CONV_I2 CONV_I8 CONV_U8 CONV_R4 CONV_R8 CONV_R_UN
+%term INTF_ADDR VFUNC_ADDR NOP NEWARR NEWOBJ NEWSTRUCT CPOBJ POP INITOBJ
+%term ISINST CASTCLASS UNBOX
+%term CONV_OVF_I1 CONV_OVF_U1 CONV_OVF_I2 CONV_OVF_U2 CONV_OVF_U4 CONV_OVF_U8 CONV_OVF_I4
+%term CONV_OVF_I4_UN CONV_OVF_U1_UN CONV_OVF_U2_UN
+%term CONV_OVF_I2_UN CONV_OVF_I8_UN CONV_OVF_I1_UN
+%term EXCEPTION THROW RETHROW HANDLER SAVE_LMF RESTORE_LMF
+%term LDLEN LDELEMA LDFTN TOSTRING LDSTR LDSFLDA
+
#
# we start at stmt
#
# tree definitions
#
-locaddr: LDLOC {
- /* nothing do do */;
+#
+# x86 adressing mode
+#
+
+acon: CONST_I4 {
+ tree->data.ainfo.offset = tree->data.i;
+ tree->data.ainfo.amode = AMImmediate;
}
-locaddr: LDARG {
- tree->data.i = tree->data.i + 8;
+acon: ADDR_G {
+ tree->data.ainfo.offset = tree->data.i;
+ tree->data.ainfo.amode = AMImmediate;
}
-# do nothing
-reg: CONV_I4 (reg)
+acon: ADD (ADDR_G, CONST_I4) {
+ tree->data.ainfo.offset = (unsigned)tree->left->data.p + tree->right->data.i;
+ tree->data.ainfo.amode = AMImmediate;
+}
-reg: CONV_I1 (reg) {
- if (tree->reg != tree->left->reg)
- x86_mov_reg_reg (*code, tree->reg, tree->left->reg, 4);
- x86_alu_reg_imm (*code, X86_AND, tree->reg, 0xff);
+base: acon
+
+base: reg {
+ tree->data.ainfo.offset = 0;
+ tree->data.ainfo.basereg = tree->reg1;
+ tree->data.ainfo.amode = AMBase;
}
-reg: CONV_I2 (reg) {
- if (tree->reg != tree->left->reg)
- x86_mov_reg_reg (*code, tree->reg, tree->left->reg, 4);
- x86_alu_reg_imm (*code, X86_AND, tree->reg, 0xffff);
+base: ADD (reg, acon) {
+ tree->data.ainfo.offset = tree->right->data.i;
+ tree->data.ainfo.basereg = tree->left->reg1;
+ tree->data.ainfo.amode = AMBase;
}
-reg: LDLOC {
- switch (tree->type) {
- case MONO_TYPE_I1:
- x86_widen_membase (*code, tree->reg, X86_EBP,
- tree->data.i, TRUE, FALSE);
- break;
- case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
- x86_widen_membase (*code, tree->reg, X86_EBP,
- tree->data.i, FALSE, FALSE);
- break;
- case MONO_TYPE_I2:
- x86_widen_membase (*code, tree->reg, X86_EBP,
- tree->data.i, TRUE, TRUE);
- break;
- case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
- x86_widen_membase (*code, tree->reg, X86_EBP,
- tree->data.i, FALSE, TRUE);
- break;
+base: ADDR_L {
+ tree->data.ainfo.offset = g_array_index (s->varinfo, MonoVarInfo, tree->data.i).offset;
+ tree->data.ainfo.basereg = X86_EBP;
+ tree->data.ainfo.amode = AMBase;
+}
- case MONO_TYPE_I:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_STRING:
- case MONO_TYPE_PTR:
- x86_mov_reg_membase (*code, tree->reg, X86_EBP,
- tree->data.i, 4);
- break;
- default:
- g_warning ("unknown type %02x", tree->type);
- g_assert_not_reached ();
- }
+index: reg {
+ tree->data.ainfo.offset = 0;
+ tree->data.ainfo.indexreg = tree->reg1;
+ tree->data.ainfo.shift = 0;
+ tree->data.ainfo.amode = AMIndex;
+}
+
+index: SHL (reg, CONST_I4) {
+ tree->data.ainfo.offset = 0;
+ tree->data.ainfo.amode = AMIndex;
+ tree->data.ainfo.indexreg = tree->left->reg1;
+ tree->data.ainfo.shift = tree->right->data.i;
} cost {
- MBCOND (tree->type != MONO_TYPE_R4 &&
- tree->type != MONO_TYPE_R8 &&
- tree->type != MONO_TYPE_I8);
-
- return 1;
-}
-
-reg: LDARG {
- switch (tree->type) {
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_STRING:
- case MONO_TYPE_PTR:
- x86_mov_reg_membase (*code, tree->reg, X86_EBP,
- tree->data.i + 8, 4);
- break;
- default:
- g_warning ("unknown type %02x", tree->type);
- g_assert_not_reached ();
- }
+ MBCOND (tree->right->data.i == 0 ||
+ tree->right->data.i == 1 ||
+ tree->right->data.i == 2 ||
+ tree->right->data.i == 3);
+
+ return 0;
+}
+
+index: MUL (reg, CONST_I4) {
+ static int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
+
+ tree->data.ainfo.offset = 0;
+ tree->data.ainfo.amode = AMIndex;
+ tree->data.ainfo.indexreg = tree->left->reg1;
+ tree->data.ainfo.shift = fast_log2 [tree->right->data.i];
} cost {
- MBCOND (tree->type != MONO_TYPE_R4 &&
- tree->type != MONO_TYPE_R8 &&
- tree->type != MONO_TYPE_I8);
+ MBCOND (tree->right->data.i == 1 ||
+ tree->right->data.i == 2 ||
+ tree->right->data.i == 4 ||
+ tree->right->data.i == 8);
- return 1;
+ return 0;
}
+addr: base
-reg: MUL (reg, reg) {
- if (tree->reg != tree->left->reg)
- x86_mov_reg_reg (*code, tree->reg, tree->left->reg, 4);
- x86_imul_reg_reg (*code, tree->reg, tree->right->reg);
+addr: index
+
+addr: ADD (index, base) {
+ tree->data.ainfo.offset = tree->right->data.ainfo.offset;
+ tree->data.ainfo.basereg = tree->right->data.ainfo.basereg;
+ tree->data.ainfo.amode = tree->left->data.ainfo.amode |
+ tree->right->data.ainfo.amode;
+ tree->data.ainfo.shift = tree->left->data.ainfo.shift;
+ tree->data.ainfo.indexreg = tree->left->data.ainfo.indexreg;
}
-reg: ADD (reg, CONST_I4) {
- if (tree->reg != tree->left->reg)
- x86_mov_reg_reg (*code, tree->reg, tree->left->reg, 4);
- x86_alu_reg_imm (*code, X86_ADD, tree->reg, tree->right->data.i);
+# we pass exception in ECX to catch handler
+reg: EXCEPTION {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, tree->data.i).offset;
+
+ if (tree->reg1 != X86_ECX)
+ x86_mov_reg_reg (s->code, tree->reg1, X86_ECX, 4);
+
+ /* store it so that we can RETHROW it later */
+ x86_mov_membase_reg (s->code, X86_EBP, offset, tree->reg1, 4);
}
-reg: ADD (reg, reg) {
- if (tree->reg != tree->left->reg)
- x86_mov_reg_reg (*code, tree->reg, tree->left->reg, 4);
- x86_alu_reg_reg (*code, X86_ADD, tree->reg, tree->right->reg);
+stmt: THROW (reg) {
+ gpointer target;
+
+ x86_push_reg (s->code, tree->left->reg1);
+ target = arch_get_throw_exception ();
+ mono_add_jump_info (s, s->code + 1, target, NULL);
+ x86_call_code (s->code, target);
+}
+
+stmt: RETHROW {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, tree->data.i).offset;
+ gpointer target;
+
+ x86_push_membase (s->code, X86_EBP, offset);
+ target = arch_get_throw_exception ();
+ mono_add_jump_info (s, s->code + 1, target, NULL);
+ x86_call_code (s->code, target);
}
-reg: SUB (reg, CONST_I4) {
- if (tree->reg != tree->left->reg)
- x86_mov_reg_reg (*code, tree->reg, tree->left->reg, 4);
- x86_alu_reg_imm (*code, X86_SUB, tree->reg, tree->right->data.i);
+stmt: HANDLER {
+ mono_add_jump_info (s, s->code + 1, NULL, tree->data.bb);
+ x86_call_imm (s->code, 0);
}
-reg: SUB (reg, reg) {
- if (tree->reg != tree->left->reg)
- x86_mov_reg_reg (*code, tree->reg, tree->left->reg, 4);
- x86_alu_reg_reg (*code, X86_SUB, tree->reg, tree->right->reg);
+stmt: ENDFINALLY {
+ x86_ret (s->code);
+}
+
+stmt: SAVE_LMF {
+ /* save all caller saved regs */
+ x86_push_reg (s->code, X86_EBX);
+ x86_push_reg (s->code, X86_EDI);
+ x86_push_reg (s->code, X86_ESI);
+ x86_push_reg (s->code, X86_EBP);
+
+ /* save the IP */
+ x86_push_imm (s->code, s->code);
+
+ /* save method info */
+ x86_push_imm (s->code, tree->data.m);
+ /* get the address of lmf for the current thread */
+ mono_add_jump_info (s, s->code + 1, arch_get_lmf_addr, NULL);
+ x86_call_code (s->code, arch_get_lmf_addr);
+ /* push lmf */
+ x86_push_reg (s->code, X86_EAX);
+ /* push *lfm (previous_lmf) */
+ x86_push_membase (s->code, X86_EAX, 0);
+ /* *(lmf) = ESP */
+ x86_mov_membase_reg (s->code, X86_EAX, 0, X86_ESP, 4);
}
-stmt: STLOC (CONST_I4) {
- x86_mov_membase_imm (*code, X86_EBP, tree->data.i,
- tree->left->data.i, 4);
+stmt: RESTORE_LMF {
+ /* ebx = previous_lmf */
+ x86_pop_reg (s->code, X86_EBX);
+ /* edi = lmf */
+ x86_pop_reg (s->code, X86_EDI);
+ /* *(lmf) = previous_lmf */
+ x86_mov_membase_reg (s->code, X86_EDI, 0, X86_EBX, 4);
+
+ /* discard method info */
+ x86_pop_reg (s->code, X86_ESI);
+
+ /* discard save IP */
+ x86_pop_reg (s->code, X86_ESI);
+
+ /* restore caller saved regs */
+ x86_pop_reg (s->code, X86_EBP);
+ x86_pop_reg (s->code, X86_ESI);
+ x86_pop_reg (s->code, X86_EDI);
+ x86_pop_reg (s->code, X86_EBX);
}
-stmt: STLOC (reg) {
- switch (tree->type) {
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
- x86_mov_membase_reg (*code, X86_EBP, tree->data.i,
- tree->left->reg, 1);
+stmt: STIND_I4 (addr, reg) {
+ PRINT_REG ("STIND_I4", tree->right->reg1);
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 4);
break;
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
- x86_mov_membase_reg (*code, X86_EBP, tree->data.i,
- tree->left->reg, 2);
+
+ case AMBase:
+ x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->right->reg1, 4);
+ break;
+ case AMIndex:
+ x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 4);
+ break;
+ case AMBaseIndex:
+ x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 4);
+ break;
+ }
+}
+
+stmt: STIND_REF (addr, reg) {
+ PRINT_REG ("STIND_REF", tree->right->reg1);
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 4);
break;
- case MONO_TYPE_I:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_STRING:
- case MONO_TYPE_PTR:
- x86_mov_membase_reg (*code, X86_EBP, tree->data.i,
- tree->left->reg, 4);
+
+ case AMBase:
+ x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->right->reg1, 4);
+ break;
+ case AMIndex:
+ x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 4);
+ break;
+ case AMBaseIndex:
+ x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 4);
+ break;
+ }
+}
+
+stmt: STIND_I1 (addr, reg) {
+ PRINT_REG ("STIND_I1", tree->right->reg1);
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 1);
break;
- default:
- g_warning ("unknown type %02x", tree->type);
- g_assert_not_reached ();
+
+ case AMBase:
+ x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->right->reg1, 1);
+ break;
+ case AMIndex:
+ x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 1);
+ break;
+ case AMBaseIndex:
+ x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 1);
+ break;
}
}
-stmt: BR {
- tree->is_jump = 1;
- x86_jump32 (*code, tree->data.i - 5);
+stmt: STIND_I2 (addr, reg) {
+ PRINT_REG ("STIND_I2", tree->right->reg1);
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 2);
+ break;
+
+ case AMBase:
+ x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->right->reg1, 2);
+ break;
+ case AMIndex:
+ x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 2);
+ break;
+ case AMBaseIndex:
+ x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 2);
+ break;
+ }
}
-stmt: BLT (reg, CONST_I4) {
- guint8 *start = *code;
- gint32 offset;
+reg: LDIND_I4 (addr) {
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_mov_reg_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, 4);
+ break;
+
+ case AMBase:
+ x86_mov_reg_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, 4);
+ break;
+ case AMIndex:
+ x86_mov_reg_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, 4);
+ break;
+ case AMBaseIndex:
+ x86_mov_reg_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift, 4);
+ break;
+ }
+
- tree->is_jump = 1;
- x86_alu_reg_imm (*code, X86_CMP, tree->left->reg, tree->right->data.i);
- offset = 6 + *code - start;
- x86_branch32 (*code, X86_CC_LT, tree->data.i - offset, TRUE);
+ PRINT_REG ("LDIND_I4", tree->reg1);
}
-stmt: BEQ (reg, CONST_I4) {
- guint8 *start = *code;
- gint32 offset;
+reg: LDIND_REF (addr) {
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_mov_reg_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, 4);
+ break;
+
+ case AMBase:
+ x86_mov_reg_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, 4);
+ break;
+ case AMIndex:
+ x86_mov_reg_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, 4);
+ break;
+ case AMBaseIndex:
+ x86_mov_reg_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift, 4);
+ break;
+ }
+
- tree->is_jump = 1;
- x86_alu_reg_imm (*code, X86_CMP, tree->left->reg, tree->right->data.i);
- offset = 6 + *code - start;
- x86_branch32 (*code, X86_CC_EQ, tree->data.i - offset, TRUE);
+ PRINT_REG ("LDIND_REF", tree->reg1);
}
-stmt: BGE (reg, CONST_I4) {
- guint8 *start = *code;
- gint32 offset;
+reg: LDIND_I1 (addr) {
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_widen_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, TRUE, FALSE);
+ break;
+
+ case AMBase:
+ x86_widen_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, TRUE, FALSE);
+ break;
+ case AMIndex:
+ x86_widen_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, TRUE, FALSE);
+ break;
+ case AMBaseIndex:
+ x86_widen_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift, TRUE, FALSE);
+ break;
+ }
- tree->is_jump = 1;
- x86_alu_reg_imm (*code, X86_CMP, tree->left->reg, tree->right->data.i);
- offset = 6 + *code - start;
- x86_branch32 (*code, X86_CC_GE, tree->data.i - offset, TRUE);
+ PRINT_REG ("LDIND_I1", tree->reg1);
}
-stmt: BRTRUE (reg) {
- guint8 *start = *code;
- gint32 offset;
+reg: LDIND_U1 (addr) {
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_widen_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, FALSE, FALSE);
+ break;
+
+ case AMBase:
+ x86_widen_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, FALSE, FALSE);
+ break;
+ case AMIndex:
+ x86_widen_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, FALSE, FALSE);
+ break;
+ case AMBaseIndex:
+ x86_widen_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift, FALSE, FALSE);
+ break;
+ }
- tree->is_jump = 1;
- x86_alu_reg_imm (*code, X86_CMP, tree->left->reg, 0);
- offset = 6 + *code - start;
- x86_branch32 (*code, X86_CC_NE, tree->data.i - offset, TRUE);
+ PRINT_REG ("LDIND_U1", tree->reg1);
}
-stmt: RETV (reg) {
+reg: LDIND_I2 (addr) {
+ switch (tree->left->data.ainfo.amode) {
- if (!tree->last_instr) {
- tree->is_jump = 1;
- x86_jump32 (*code, tree->data.i - 5);
+ case AMImmediate:
+ x86_widen_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, TRUE, TRUE);
+ break;
+
+ case AMBase:
+ x86_widen_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, TRUE, TRUE);
+ break;
+ case AMIndex:
+ x86_widen_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, TRUE, TRUE);
+ break;
+ case AMBaseIndex:
+ x86_widen_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift, TRUE, TRUE);
+ break;
}
- g_assert (tree->left->reg == X86_EAX); // return must be in EAX
+ PRINT_REG ("LDIND_U2", tree->reg1);
}
-stmt: RET {
- if (!tree->last_instr) {
- tree->is_jump = 1;
- x86_jump32 (*code, tree->data.i - 5);
- }
+reg: LDIND_U2 (addr) {
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_widen_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, FALSE, TRUE);
+ break;
+
+ case AMBase:
+ x86_widen_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, FALSE, TRUE);
+ break;
+ case AMIndex:
+ x86_widen_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, FALSE, TRUE);
+ break;
+ case AMBaseIndex:
+ x86_widen_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift, FALSE, TRUE);
+ break;
+ }
+
+ PRINT_REG ("LDIND_U2", tree->reg1);
}
-stmt: ARG (CONST_I4) {
- x86_push_imm (*code, tree->left->data.i);
-}
-
-stmt: ARG (reg) {
- x86_push_reg (*code, tree->left->reg);
-}
-
-reg: CALL {
- switch (tree->type) {
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_STRING:
- case MONO_TYPE_PTR:
- x86_mov_reg_imm (*code, X86_EAX, tree->data.p);
- x86_call_membase (*code, X86_EAX,
- G_STRUCT_OFFSET (MonoMethod, addr));
- g_assert (tree->reg == X86_EAX);
+reg: LDIND_U4 (addr) {
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_mov_reg_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, 4);
break;
- default:
- g_warning ("unknown type %02x", tree->type);
- g_assert_not_reached ();
+
+ case AMBase:
+ x86_mov_reg_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, 4);
+ break;
+ case AMIndex:
+ x86_mov_reg_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, 4);
+ break;
+ case AMBaseIndex:
+ x86_mov_reg_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift, 4);
+ break;
}
-} cost {
- MBCOND (tree->type != MONO_TYPE_R4 &&
- tree->type != MONO_TYPE_R8 &&
- tree->type != MONO_TYPE_I8);
- return 1;
+ PRINT_REG ("LDIND_U4", tree->reg1);
+}
+
+reg: ADDR_L 5 {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, tree->data.i).offset;
+ x86_lea_membase (s->code, tree->reg1, X86_EBP, offset);
+
+ PRINT_REG ("ADDR_L", tree->reg1);
}
-stmt: CALL {
- x86_mov_reg_imm (*code, X86_EAX, tree->data.p);
- x86_call_membase (*code, X86_EAX, G_STRUCT_OFFSET (MonoMethod, addr));
+reg: ADDR_G 5 {
+ x86_mov_reg_imm (s->code, tree->reg1, tree->data.p);
}
-#
-# floating point
-# fixme: dont know how to assign registers?
+reg: CONV_I1 (reg) {
+ x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, 0xff);
-freg: CONST_R8 {
- x86_fld (*code, tree->data.p, TRUE);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
-freg: LDLOC {
- x86_fld_membase (*code, X86_EBP, tree->data.i,
- tree->type == MONO_TYPE_R8);
-} cost {
- MBCOND (tree->type == MONO_TYPE_R4 ||
- tree->type == MONO_TYPE_R8);
- return 0;
+reg: CONV_I2 (reg) {
+ x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, 0xffff);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
-freg: LDARG {
- x86_fld_membase (*code, X86_EBP, tree->data.i + 8,
- tree->type == MONO_TYPE_R8);
-} cost {
- MBCOND (tree->type == MONO_TYPE_R4 ||
- tree->type == MONO_TYPE_R8);
- return 0;
+reg: CONST_I4 1 {
+ x86_mov_reg_imm (s->code, tree->reg1, tree->data.i);
}
-freg: ADD (freg, freg) {
- x86_fp_op_reg (*code, X86_FADD, 1, TRUE);
+reg: CONV_I4 (reg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ PRINT_REG ("CONV_I4", tree->left->reg1);
+}
+
+reg: CONV_OVF_U4 (reg) {
+ /* Keep in sync with CONV_OVF_I4_UN below, they are the same on 32-bit machines */
+ x86_test_reg_imm (s->code, tree->left->reg1, 0x8000000);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
-stmt: STLOC (freg) {
- g_assert (tree->type == MONO_TYPE_R4 ||
- tree->type == MONO_TYPE_R8);
+reg: CONV_OVF_I4_UN (reg) {
+ /* Keep in sync with CONV_OVF_U4 above, they are the same on 32-bit machines */
+ x86_test_reg_imm (s->code, tree->left->reg1, 0x8000000);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
- x86_fst_membase (*code, X86_EBP, tree->data.i,
- (tree->type == MONO_TYPE_R8), TRUE);
+reg: CONV_OVF_I1 (reg) {
+ /* probe value to be within -128 to 127 */
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 127);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, TRUE, "OverflowException");
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, -128);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, TRUE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
-stmt: ARG (freg) {
- x86_alu_reg_imm (*code, X86_SUB, X86_ESP, 8);
- x86_fst_membase (*code, X86_ESP, 0, TRUE, TRUE);
+reg: CONV_OVF_I1_UN (reg) {
+ /* probe values between 0 to 128 */
+ x86_test_reg_imm (s->code, tree->left->reg1, 0xffffff80);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
-stmt: BEQ (freg, freg) {
- guint8 *start = *code;
- gint32 offset;
-
- tree->is_jump = 1;
- x86_fcompp (*code);
- x86_fnstsw (*code);
- x86_alu_reg_imm (*code, X86_AND, X86_EAX, 0x4500);
- x86_alu_reg_imm (*code, X86_CMP, X86_EAX, 0x4000);
- offset = 6 + *code - start;
- x86_branch32 (*code, X86_CC_EQ, tree->data.i - offset, TRUE);
-}
-
-freg: CALL {
- x86_mov_reg_imm (*code, X86_EAX, tree->data.p);
- x86_call_membase (*code, X86_EAX,
- G_STRUCT_OFFSET (MonoMethod, addr));
-} cost {
- MBCOND (tree->type == MONO_TYPE_R4 ||
- tree->type == MONO_TYPE_R8);
- return 0;
+reg: CONV_OVF_U1 (reg) {
+ /* Keep in sync with CONV_OVF_U1_UN routine below, they are the same on 32-bit machines */
+ /* probe value to be within 0 to 255 */
+ x86_test_reg_imm (s->code, tree->left->reg1, 0xffffff00);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
-stmt: RETV (freg) {
+reg: CONV_OVF_U1_UN (reg) {
+ /* Keep in sync with CONV_OVF_U1 routine above, they are the same on 32-bit machines */
+ /* probe value to be within 0 to 255 */
+ x86_test_reg_imm (s->code, tree->left->reg1, 0xffffff00);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
- if (!tree->last_instr) {
- tree->is_jump = 1;
- x86_jump32 (*code, tree->data.i - 5);
+reg: CONV_OVF_I2 (reg) {
+ /* Probe value to be within -32768 and 32767 */
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 32767);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, TRUE, "OverflowException");
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, -32768);
+ // fixme: check branch
+ mono_assert_not_reached ();
+ x86_branch8 (s->code, X86_CC_LT, -17, TRUE);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: CONV_OVF_U2 (reg) {
+ /* Keep in sync with CONV_OVF_U2_UN below, they are the same on 32-bit machines */
+ /* Probe value to be within 0 and 65535 */
+ x86_test_reg_imm (s->code, tree->left->reg1, 0xffff0000);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: CONV_OVF_U2_UN (reg) {
+ /* Keep in sync with CONV_OVF_U2 above, they are the same on 32-bit machines */
+ /* Probe value to be within 0 and 65535 */
+ x86_test_reg_imm (s->code, tree->left->reg1, 0xffff0000);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: CONV_OVF_I2_UN (reg) {
+ /* Convert uint value into short, value within 0 and 32767 */
+ x86_test_reg_imm (s->code, tree->left->reg1, 0xffff8000);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: MUL (reg, reg) {
+ x86_imul_reg_reg (s->code, tree->left->reg1, tree->right->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: MUL_OVF (reg, reg) {
+ x86_imul_reg_reg (s->code, tree->left->reg1, tree->right->reg1);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: MUL_OVF_UN (reg, reg) {
+ //fixme: implement me
+ mono_assert_not_reached ();
+}
+
+reg: DIV (reg, reg) {
+ mono_assert (tree->right->reg1 != X86_EAX);
+
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+
+ x86_cdq (s->code);
+ x86_div_reg (s->code, tree->right->reg1, TRUE);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+reg: DIV_UN (reg, reg) {
+ mono_assert (tree->right->reg1 != X86_EAX);
+
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+
+ x86_cdq (s->code);
+ x86_div_reg (s->code, tree->right->reg1, FALSE);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+reg: REM (reg, reg) {
+ mono_assert (tree->right->reg1 != X86_EAX);
+ mono_assert (tree->right->reg1 != X86_EDX);
+
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+
+ /* sign extend to 64bit in EAX/EDX */
+ x86_cdq (s->code);
+ x86_div_reg (s->code, tree->right->reg1, TRUE);
+ x86_mov_reg_reg (s->code, X86_EAX, X86_EDX, 4);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+reg: REM_UN (reg, reg) {
+ mono_assert (tree->right->reg1 != X86_EAX);
+ mono_assert (tree->right->reg1 != X86_EDX);
+
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+
+ /* zero extend to 64bit in EAX/EDX */
+ x86_mov_reg_imm (s->code, X86_EDX, 0);
+ x86_div_reg (s->code, tree->right->reg1, FALSE);
+ x86_mov_reg_reg (s->code, X86_EAX, X86_EDX, 4);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+reg: ADD (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ if (tree->right->data.i == 1)
+ x86_inc_reg (s->code, tree->left->reg1);
+ else
+ x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, tree->right->data.i);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+
+}
+
+reg: ADD (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: ADD_OVF (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: ADD_OVF_UN (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: SUB (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ if (tree->right->data.i == 1)
+ x86_dec_reg (s->code, tree->left->reg1);
+ else
+ x86_alu_reg_imm (s->code, X86_SUB, tree->left->reg1, tree->right->data.i);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: SUB (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: SUB_OVF (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: SUB_OVF_UN (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: CEQ (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ x86_set_reg (s->code, X86_CC_EQ, tree->reg1, TRUE);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+}
+
+reg: CGT (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ x86_set_reg (s->code, X86_CC_GT, tree->reg1, TRUE);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+}
+
+reg: CGT_UN (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ x86_set_reg (s->code, X86_CC_GT, tree->reg1, FALSE);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+}
+
+reg: CLT (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ x86_set_reg (s->code, X86_CC_LT, tree->reg1, TRUE);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+}
+
+reg: CLT_UN (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ x86_set_reg (s->code, X86_CC_LT, tree->reg1, FALSE);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+}
+
+reg: AND (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_AND, tree->left->reg1, tree->right->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: OR (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_OR, tree->left->reg1, tree->right->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: XOR (reg, reg) {
+ x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg1, tree->right->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: NEG (reg) {
+ x86_neg_reg (s->code, tree->left->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: NOT (reg) {
+ x86_not_reg (s->code, tree->left->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: SHL (reg, CONST_I4) {
+ x86_shift_reg_imm (s->code, X86_SHL, tree->left->reg1, tree->right->data.i);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: SHL (reg, reg) {
+ if (tree->right->reg1 != X86_ECX)
+ x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+ x86_shift_reg (s->code, X86_SHL, tree->left->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+
+ mono_assert (tree->reg1 != X86_ECX &&
+ tree->left->reg1 != X86_ECX);
+}
+
+reg: SHR (reg, CONST_I4) {
+ x86_shift_reg_imm (s->code, X86_SAR, tree->left->reg1, tree->right->data.i);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: SHR (reg, reg) {
+ if (tree->right->reg1 != X86_ECX)
+ x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+ x86_shift_reg (s->code, X86_SAR, tree->left->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+
+ mono_assert (tree->reg1 != X86_ECX &&
+ tree->left->reg1 != X86_ECX);
+}
+
+reg: SHR_UN (reg, CONST_I4) {
+ x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg1, tree->right->data.i);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: SHR_UN (reg, reg) {
+ if (tree->right->reg1 != X86_ECX)
+ x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+ x86_shift_reg (s->code, X86_SHR, tree->left->reg1);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+
+ mono_assert (tree->reg1 != X86_ECX &&
+ tree->left->reg1 != X86_ECX);
+}
+
+reg: LDSFLDA (CONST_I4) {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_imm (s->code, tree->left->data.i);
+ x86_push_imm (s->code, tree->data.klass);
+ mono_add_jump_info (s, s->code + 1, mono_ldsflda, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
}
+}
- //g_assert (tree->left->reg == X86_EAX); // return must be in EAX
+# array support
+reg: LDLEN (reg) {
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NE, TRUE, "NullReferenceException");
+
+ x86_mov_reg_membase (s->code, tree->reg1, tree->left->reg1,
+ G_STRUCT_OFFSET (MonoArray, bounds), 4);
+ x86_mov_reg_membase (s->code, tree->reg1, tree->reg1,
+ G_STRUCT_OFFSET (MonoArrayBounds, length), 4);
}
+reg: LDELEMA (reg, reg) {
+ x86_alu_reg_membase (s->code, X86_CMP, tree->right->reg1, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length));
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, FALSE, "IndexOutOfRangeException");
+
+ if (tree->data.i == 1 || tree->data.i == 2 ||
+ tree->data.i == 4 || tree->data.i == 8) {
+ static int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1,
+ G_STRUCT_OFFSET (MonoArray, vector), tree->right->reg1,
+ fast_log2 [tree->data.i]);
+ } else {
+ x86_imul_reg_reg_imm (s->code, tree->right->reg1, tree->right->reg1, tree->data.i);
+ x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->right->reg1);
+ x86_alu_reg_imm (s->code, X86_ADD, tree->reg1, G_STRUCT_OFFSET (MonoArray, vector));
+ }
+}
-#
-# just some optimizazions
-#
+reg: LDSTR {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_imm (s->code, tree->data.p);
+ x86_push_imm (s->code, s->method->klass->image);
+ mono_add_jump_info (s, s->code + 1, mono_ldstr_wrapper, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
-reg: ADD (locaddr, reg) {
- if (tree->reg != tree->right->reg)
- x86_mov_reg_reg (*code, tree->reg, tree->right->reg, 4);
- x86_alu_reg_membase (*code, X86_ADD, tree->reg, X86_EBP,
- tree->left->data.i);
-} cost {
- MBCOND (tree->left->type == MONO_TYPE_I4 ||
- tree->left->type == MONO_TYPE_U4);
- return 0;
+ PRINT_REG ("LDSTR", tree->reg1);
}
-reg: MUL (locaddr, reg) {
- if (tree->reg != tree->right->reg)
- x86_mov_reg_reg (*code, tree->reg, tree->right->reg, 4);
- x86_imul_reg_membase (*code, tree->reg, X86_EBP, tree->left->data.i);
-} cost {
- MBCOND (tree->left->type == MONO_TYPE_I4 ||
- tree->left->type == MONO_TYPE_U4);
- return 0;
+reg: TOSTRING (reg) {
+ guint8 *start = s->code, *l1, *le;
+ int i;
+
+ l1 = le = NULL;
+
+ for (i = 0; i < 2; i++) {
+ s->code = start;
+
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+ x86_branch8 (s->code, X86_CC_EQ, le - l1, FALSE);
+ l1 = s->code;
+
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, mono_string_new_wrapper, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer));
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
+
+ le = s->code;
+ }
}
-stmt: BEQ (locaddr, CONST_I4) {
- guint8 *start = *code;
- gint32 offset;
+reg: NEWARR (reg) {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_push_imm (s->code, tree->data.p);
+ mono_add_jump_info (s, s->code + 1, mono_array_new_wrapper, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer) + 4);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
- tree->is_jump = 1;
- x86_alu_membase_imm (*code, X86_CMP, X86_EBP, tree->left->data.i, tree->right->data.i);
- offset = 6 + *code - start;
- x86_branch32 (*code, X86_CC_EQ, tree->data.i - offset, TRUE);
-} cost {
- MBCOND (tree->left->type == MONO_TYPE_I4 ||
- tree->left->type == MONO_TYPE_U4);
- return 0;
+ PRINT_REG ("NEWARR", tree->reg1);
+}
+
+reg: NEWOBJ {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_imm (s->code, tree->data.klass);
+ mono_add_jump_info (s, s->code + 1, mono_object_new_wrapper, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer));
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
+ PRINT_REG ("NEWOBJ", tree->reg1);
}
-stmt: BGE (locaddr, CONST_I4) {
- guint8 *start = *code;
- gint32 offset;
+reg: NEWSTRUCT {
+ int size = tree->data.i;
+ int sa;
+
+ mono_assert (size > 0);
- tree->is_jump = 1;
- x86_alu_membase_imm (*code, X86_CMP, X86_EBP, tree->left->data.i,
- tree->right->data.i);
- offset = 6 + *code - start;
- x86_branch32 (*code, X86_CC_GE, tree->data.i - offset, TRUE);
-} cost {
- MBCOND (tree->left->type == MONO_TYPE_I4 ||
- tree->left->type == MONO_TYPE_U4);
- return 0;
+ sa = size + 3;
+ sa &= ~3;
+
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, sa);
+ x86_mov_reg_reg (s->code, tree->reg1, X86_ESP, 4);
}
-stmt: STLOC (ADD (LDLOC, CONST_I4)) {
- x86_alu_membase_imm (*code, X86_ADD, X86_EBP, tree->data.i,
- tree->left->right->data.i);
-} cost {
- MBCOND (tree->type == MONO_TYPE_I4 &&
- tree->left->left->type == MONO_TYPE_I4 &&
- tree->data.i == tree->left->left->data.i);
- return 0;
+reg: UNBOX (reg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+
+ x86_test_reg_reg (s->code, tree->reg1, tree->reg1);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NE, TRUE, "NullReferenceException");
+
+ x86_push_reg (s->code, tree->reg1);
+ x86_mov_reg_membase (s->code, tree->reg1, tree->reg1, 0, 4);
+ x86_mov_reg_membase (s->code, tree->reg1, tree->reg1, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, tree->reg1,
+ G_STRUCT_OFFSET (MonoClass, element_class), ((int)(tree->data.klass->element_class)));
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "InvalidCastException");
+ x86_pop_reg (s->code, tree->reg1);
+ x86_alu_reg_imm (s->code, X86_ADD, tree->reg1, sizeof (MonoObject));
}
+reg: CASTCLASS (reg) {
+ guint8 *start = s->code, *l1, *l2, *le;
+ int i;
-%%
+ l1 = l2 = le = NULL;
-gboolean
-same_tree (MBTree *t1, MBTree *t2)
-{
- if (t1 == t2)
- return TRUE;
- if (!t1 || !t2)
- return FALSE;
+ for (i = 0; i < 2; i++) {
+ s->code = start;
+
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+ x86_branch8 (s->code, X86_CC_EQ, le - l2, FALSE);
+ l2 = s->code;
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_imm (s->code, tree->data.klass);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, mono_object_isinst, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NE, TRUE, "InvalidCastException");
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
+
+ le = s->code;
- if (t1->op == t2->op &&
- t1->type == t2->type &&
- t1->data.i == t2->data.i) {
- if (!same_tree (t1->left, t2->left))
- return FALSE;
- return same_tree (t1->right, t2->right);
}
- return FALSE;
}
+
+reg: ISINST (reg) {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_imm (s->code, tree->data.klass);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, mono_object_isinst, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
+
+}
+
+stmt: INITOBJ (reg) {
+ int i, j;
+
+ i = tree->data.i;
+
+ if (i == 1 || i == 2 || i == 4) {
+ int t = X86_ECX;
+
+ if (tree->left->reg1 != X86_EAX)
+ t = X86_EAX;
+
+ x86_push_reg (s->code, t);
+ x86_alu_reg_reg (s->code, X86_XOR, t, t);
+
+ switch (tree->data.i) {
+ case 4:
+ x86_mov_regp_reg (s->code, tree->left->reg1, t, 4);
+ break;
+ case 2:
+ x86_mov_regp_reg (s->code, tree->left->reg1, t, 4);
+ break;
+ case 1:
+ x86_mov_regp_reg (s->code, tree->left->reg1, t, 4);
+ break;
+ }
+ x86_pop_reg (s->code, t);
+
+ return;
+ }
+
+ i = tree->data.i / 4;
+ j = tree->data.i % 4;
+
+ x86_push_reg (s->code, X86_EAX);
+
+ if (tree->left->reg1 != X86_EDI) {
+ x86_push_reg (s->code, X86_EDI);
+ x86_mov_reg_reg (s->code, X86_EDI, tree->left->reg1, 4);
+ }
+
+ if (i) {
+ x86_push_reg (s->code, X86_ECX);
+ x86_alu_reg_reg (s->code, X86_XOR, X86_EAX, X86_EAX);
+ x86_mov_reg_imm (s->code, X86_ECX, i);
+ x86_cld (s->code);
+ x86_prefix (s->code, X86_REP_PREFIX);
+ x86_stosl (s->code);
+ x86_pop_reg (s->code, X86_ECX);
+ }
+
+
+ for (i = 0; i < j; i++)
+ x86_stosb (s->code);
+
+ if (tree->left->reg1 != X86_EDI)
+ x86_pop_reg (s->code, X86_EDI);
+
+ x86_pop_reg (s->code, X86_EAX);
+}
+
+stmt: NOP
+
+stmt: POP (reg)
+
+stmt: BR {
+ mono_add_jump_info (s, s->code + 1, NULL, tree->data.bb);
+ x86_jump32 (s->code, 0);
+}
+
+stmt: BLT (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, TRUE);
+}
+
+stmt: BLT (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, TRUE);
+}
+
+stmt: BLT_UN (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+}
+
+stmt: BLT_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+}
+
+stmt: BGT (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, TRUE);
+}
+
+stmt: BGT (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, TRUE);
+}
+
+stmt: BGT_UN (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+}
+
+stmt: BGT_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+}
+
+stmt: BEQ (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
+}
+
+stmt: BEQ (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
+}
+
+stmt: BNE_UN (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+}
+
+stmt: BNE_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+}
+
+stmt: BGE (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GE, 0, TRUE);
+}
+
+stmt: BGE (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GE, 0, TRUE);
+}
+
+stmt: BGE_UN (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GE, 0, FALSE);
+}
+
+stmt: BGE_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GE, 0, FALSE);
+}
+
+stmt: BLE (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LE, 0, TRUE);
+}
+
+stmt: BLE (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LE, 0, TRUE);
+}
+
+stmt: BLE_UN (reg, reg) 1 {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LE, 0, FALSE);
+}
+
+stmt: BLE_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LE, 0, FALSE);
+}
+
+stmt: BRTRUE (reg) {
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, TRUE);
+}
+
+stmt: BRFALSE (reg) {
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
+}
+
+stmt: BREAK {
+ x86_breakpoint (s->code);
+}
+
+stmt: RET (reg) {
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+
+ if (!tree->last_instr) {
+ mono_add_jump_info (s, s->code + 1, NULL, NULL);
+ x86_jump32 (s->code, 0);
+ }
+}
+
+stmt: RET_VOID {
+ if (!tree->last_instr) {
+ mono_add_jump_info (s, s->code + 1, NULL, NULL);
+ x86_jump32 (s->code, 0);
+ }
+}
+
+
+stmt: ARG_I4 (LDIND_I4 (addr)) {
+ MBTree *at = tree->left->left;
+
+ switch (at->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_push_mem (s->code, at->data.ainfo.offset);
+ break;
+
+ case AMBase:
+ x86_push_membase (s->code, at->data.ainfo.basereg, at->data.ainfo.offset);
+ break;
+ case AMIndex:
+ x86_push_memindex (s->code, X86_NOBASEREG, at->data.ainfo.offset,
+ at->data.ainfo.indexreg, at->data.ainfo.shift);
+ break;
+ case AMBaseIndex:
+ x86_push_memindex (s->code, at->data.ainfo.basereg,
+ at->data.ainfo.offset, at->data.ainfo.indexreg,
+ at->data.ainfo.shift);
+ break;
+ }
+}
+
+stmt: ARG_I4 (LDIND_U4 (addr)) {
+ MBTree *at = tree->left->left;
+
+ switch (at->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_push_mem (s->code, at->data.ainfo.offset);
+ break;
+
+ case AMBase:
+ x86_push_membase (s->code, at->data.ainfo.basereg, at->data.ainfo.offset);
+ break;
+ case AMIndex:
+ x86_push_memindex (s->code, X86_NOBASEREG, at->data.ainfo.offset,
+ at->data.ainfo.indexreg, at->data.ainfo.shift);
+ break;
+ case AMBaseIndex:
+ x86_push_memindex (s->code, at->data.ainfo.basereg,
+ at->data.ainfo.offset, at->data.ainfo.indexreg,
+ at->data.ainfo.shift);
+ break;
+ }
+}
+
+stmt: ARG_I4 (reg) {
+ x86_push_reg (s->code, tree->left->reg1);
+ PRINT_REG ("ARG_I4", tree->left->reg1);
+}
+
+# fixme: we must free the allocated strings somewhere
+stmt: ARG_STRING (reg) {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, mono_string_to_utf8, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
+
+ x86_mov_membase_reg (s->code, X86_ESP, 12, X86_EAX, 4);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ x86_pop_reg (s->code, X86_EAX);
+}
+
+stmt: ARG_I4 (ADDR_G) {
+ x86_push_imm (s->code, tree->left->data.p);
+}
+
+stmt: ARG_I4 (CONST_I4) "MB_USE_OPT1(0)" {
+ x86_push_imm (s->code, tree->left->data.i);
+}
+
+this: reg {
+ PRINT_REG ("THIS", tree->reg1);
+}
+
+this: NOP
+
+reg: CALL_I4 (this, reg) {
+ MethodCallInfo *ci = tree->data.ci;
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+
+ if (lreg == treg || rreg == treg)
+ treg = X86_EDX;
+ if (lreg == treg || rreg == treg)
+ treg = X86_ECX;
+ if (lreg == treg || rreg == treg)
+ mono_assert_not_reached ();
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ x86_call_reg (s->code, rreg);
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+
+ PRINT_REG ("CALL_I4", tree->reg1);
+
+ mono_assert (tree->reg1 == X86_EAX);
+}
+
+reg: CALL_I4 (this, ADDR_G) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ mono_add_jump_info (s, s->code + 1, tree->right->data.p, NULL);
+ x86_call_code (s->code, 0);
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+
+ PRINT_REG ("CALL_I4", tree->reg1);
+
+ mono_assert (tree->reg1 == X86_EAX);
+}
+
+reg: LDFTN (reg, INTF_ADDR) {
+ int lreg = tree->left->reg1;
+
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_mov_reg_membase (s->code, lreg, lreg,
+ G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
+ x86_mov_reg_membase (s->code, tree->reg1, lreg, tree->right->data.m->slot << 2, 4);
+}
+
+reg: CALL_I4 (this, INTF_ADDR) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_mov_reg_membase (s->code, lreg, lreg,
+ G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
+ x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+
+ PRINT_REG ("CALL_I4(INTERFACE)", tree->reg1);
+
+ mono_assert (tree->reg1 == X86_EAX);
+}
+
+reg: LDFTN (reg, VFUNC_ADDR) {
+ int lreg = tree->left->reg1;
+
+ x86_mov_reg_membase (s->code, tree->reg1, lreg, 0, 4);
+
+ x86_mov_reg_membase (s->code, tree->reg1, tree->reg1, G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2), 4);
+}
+
+reg: CALL_I4 (this, VFUNC_ADDR) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_call_virtual (s->code, lreg,
+ G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+
+ PRINT_REG ("CALL_I4(VIRTUAL)", tree->reg1);
+
+ mono_assert (tree->reg1 == X86_EAX);
+}
+
+stmt: CALL_VOID (this, ADDR_G) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ mono_add_jump_info (s, s->code + 1, tree->right->data.p, NULL);
+ x86_call_code (s->code, 0);
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+}
+
+stmt: CALL_VOID (this, INTF_ADDR) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_mov_reg_membase (s->code, lreg, lreg,
+ G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
+ x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+}
+
+stmt: CALL_VOID (this, VFUNC_ADDR) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_call_virtual (s->code, lreg,
+ G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+}
+
+stmt: SWITCH (reg) {
+ guint32 offset;
+ guint32 *jt = (guint32 *)tree->data.p;
+
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, jt [0]);
+ offset = 6 + (guint32)s->code;
+ x86_branch32 (s->code, X86_CC_GE, jt [jt [0] + 1] - offset, FALSE);
+
+ x86_mov_reg_memindex (s->code, X86_EAX, X86_NOBASEREG,
+ tree->data.i + 4, tree->left->reg1, 2, 4);
+ x86_jump_reg (s->code, X86_EAX);
+}
+
+#
+# 64 bit integers
+#
+
+reg: CONV_I1 (lreg) {
+ x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, 0xff);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: CONV_I2 (lreg) {
+ x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, 0xffff);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: CONV_I4 (lreg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+
+reg: CONV_OVF_I4 (lreg){
+ guint8 *start = s->code;
+ gpointer o1, o2, o3, o4, o5;
+ int i;
+
+ /*
+ * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
+ */
+ for (i = 0; i < 2; i++) {
+ s->code = start;
+
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+
+ /* If the low word top bit is set, see if we are negative */
+ x86_branch8 (s->code, X86_CC_LT, o3 - o1, TRUE);
+ o1 = s->code;
+
+ /* We are not negative (no top bit set, check for our top word to be zero */
+ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2);
+ x86_branch8 (s->code, X86_CC_EQ, o4 - o2, TRUE);
+ o2 = s->code;
+
+ /* throw exception */
+ x86_push_imm (s->code, "OverflowException");
+ mono_add_jump_info (s, s->code + 1, arch_get_throw_exception_by_name (), NULL);
+ x86_call_code (s->code, 0);
+
+ o3 = s->code;
+ /* our top bit is set, check that top word is 0xfffffff */
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg2, 0xffffffff);
+
+ o4 = s->code;
+ /* nope, emit exception */
+ x86_branch8 (s->code, X86_CC_NE, o2 - o5, TRUE);
+ o5 = s->code;
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ }
+}
+
+reg: CONV_OVF_U4 (lreg) {
+ /* Keep in sync with CONV_OVF_I4_UN below, they are the same on 32-bit machines */
+ /* top word must be 0 */
+ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: CONV_OVF_I4_UN (lreg) {
+ /* Keep in sync with CONV_OVF_U4 above, they are the same on 32-bit machines */
+ /* top word must be 0 */
+ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException");
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+stmt: POP (lreg)
+
+lreg: CONST_I8 1 {
+ x86_mov_reg_imm (s->code, tree->reg1, *((gint32 *)&tree->data.p));
+ x86_mov_reg_imm (s->code, tree->reg2, *((gint32 *)&tree->data.p + 1));
+}
+
+reg: CONV_I1 (lreg) {
+ x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, 0xff);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+lreg: CONV_I8 (CONST_I4) {
+ x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i);
+
+ if (tree->left->data.i >= 0)
+ x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
+ else
+ x86_mov_reg_imm (s->code, tree->reg2, -1);
+}
+
+lreg: CONV_I8 (reg) {
+ guint8 *i1;
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 0);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
+ x86_branch8 (s->code, X86_CC_GE, 5, TRUE);
+ i1 = s->code;
+ x86_mov_reg_imm (s->code, tree->reg2, -1);
+ mono_assert ((s->code - i1) == 5);
+}
+
+lreg: CONV_U8 (CONST_I4) 1 {
+ x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
+}
+
+lreg: CONV_OVF_U8 (CONST_I4) {
+ if (tree->left->data.i < 0){
+ x86_push_imm (s->code, "OverflowException");
+ mono_add_jump_info (s, s->code + 1, arch_get_throw_exception_by_name (), NULL);
+ x86_call_code (s->code, 0);
+ } else {
+ x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
+ }
+}
+
+lreg: CONV_OVF_I8_UN (CONST_I4) {
+ x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
+}
+
+lreg: CONV_OVF_U8 (reg) {
+ x86_test_reg_imm (s->code, tree->left->reg1, 0x8000000);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
+}
+
+lreg: CONV_OVF_I8_UN (reg) {
+ /* Convert uint value into int64, we pass everything */
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
+}
+
+stmt: STIND_I8 (addr, lreg) {
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 4);
+ x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset + 4, tree->right->reg2, 4);
+ break;
+
+ case AMBase:
+ x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->right->reg1, 4);
+ x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset + 4, tree->right->reg2, 4);
+ break;
+ case AMIndex:
+ x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 4);
+ x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset + 4,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg2, 4);
+ break;
+ case AMBaseIndex:
+ x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg1, 4);
+ x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset + 4,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
+ tree->right->reg2, 4);
+ break;
+ }
+
+}
+
+# an addr can use two address register (base and index register). The must take care
+# that we do not override them (thus the use of x86_lea)
+lreg: LDIND_I8 (addr) {
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_mov_reg_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, 4);
+ x86_mov_reg_mem (s->code, tree->reg2, tree->left->data.ainfo.offset + 4, 4);
+ break;
+
+ case AMBase:
+ x86_lea_membase (s->code, tree->reg2, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset);
+ x86_mov_reg_membase (s->code, tree->reg1, tree->reg2, 0, 4);
+ x86_mov_reg_membase (s->code, tree->reg2, tree->reg2, 4, 4);
+ break;
+ case AMIndex:
+ x86_lea_memindex (s->code, tree->reg2, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift);
+ x86_mov_reg_membase (s->code, tree->reg1, tree->reg2, 0, 4);
+ x86_mov_reg_membase (s->code, tree->reg2, tree->reg2, 4, 4);
+ break;
+ case AMBaseIndex:
+ x86_lea_memindex (s->code, tree->reg2, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift);
+ x86_mov_reg_membase (s->code, tree->reg1, tree->reg2, 0, 4);
+ x86_mov_reg_membase (s->code, tree->reg2, tree->reg2, 4, 4);
+ break;
+ }
+ PRINT_REG ("LDIND_I8_0", tree->reg1);
+ PRINT_REG ("LDIND_I8_1", tree->reg2);
+}
+
+lreg: SHR (lreg, CONST_I4) {
+ if (tree->right->data.i < 32) {
+ x86_shrd_reg_imm (s->code, tree->left->reg1, tree->left->reg2, tree->right->data.i);
+ x86_shift_reg_imm (s->code, X86_SAR, tree->left->reg2, tree->right->data.i);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ } else if (tree->right->data.i < 64) {
+ if (tree->reg1 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg2, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ x86_shift_reg_imm (s->code, X86_SAR, tree->reg2, 31);
+ x86_shift_reg_imm (s->code, X86_SAR, tree->reg1, (tree->right->data.i - 32));
+ } /* else unspecified result */
+}
+
+lreg: SHR_UN (lreg, CONST_I4) {
+ if (tree->right->data.i < 32) {
+ x86_shrd_reg_imm (s->code, tree->left->reg1, tree->left->reg2, tree->right->data.i);
+ x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg2, tree->right->data.i);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ } else if (tree->right->data.i < 64) {
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg2, 4);
+ x86_shift_reg_imm (s->code, X86_SHR, tree->reg1, (tree->right->data.i - 32));
+ x86_mov_reg_imm (s->code, tree->reg2, 0);
+ } /* else unspecified result */
+}
+
+lreg: SHR (lreg, reg) {
+ guint8 *start = s->code;
+ gint32 o1, o2, i;
+
+ if (tree->right->reg1 != X86_ECX)
+ x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_shrd_reg (s->code, tree->left->reg1, tree->left->reg2);
+ x86_shift_reg (s->code, X86_SAR, tree->left->reg2);
+ x86_test_reg_imm (s->code, X86_ECX, 32);
+ o1 = 2 + s->code - s->start;
+ x86_branch8 (s->code, X86_CC_EQ, o2 - o1, FALSE);
+ x86_mov_reg_reg (s->code, tree->left->reg1, tree->left->reg2, 4);
+ x86_shift_reg_imm (s->code, X86_SAR, tree->reg2, 31);
+ o2 = s->code - s->start;
+ }
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: SHR_UN (lreg, reg) {
+ guint8 *start = s->code;
+ gint32 o1, o2, i;
+
+ if (tree->right->reg1 != X86_ECX)
+ x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_shrd_reg (s->code, tree->left->reg1, tree->left->reg2);
+ x86_shift_reg (s->code, X86_SHR, tree->left->reg2);
+ x86_test_reg_imm (s->code, X86_ECX, 32);
+ o1 = 2 + s->code - s->start;
+ x86_branch8 (s->code, X86_CC_EQ, o2 - o1, FALSE);
+ x86_mov_reg_reg (s->code, tree->left->reg1, tree->left->reg2, 4);
+ x86_shift_reg_imm (s->code, X86_SHR, tree->reg2, 31);
+ o2 = s->code - s->start;
+ }
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: SHL (lreg, CONST_I4) {
+ if (tree->right->data.i < 32) {
+ x86_shld_reg_imm (s->code, tree->left->reg2, tree->left->reg1, tree->right->data.i);
+ x86_shift_reg_imm (s->code, X86_SHL, tree->left->reg1, tree->right->data.i);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ } else if (tree->right->data.i < 64) {
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg1, 4);
+ x86_shift_reg_imm (s->code, X86_SHL, tree->reg2, (tree->right->data.i - 32));
+ x86_alu_reg_reg (s->code, X86_XOR, tree->reg1, tree->reg1);
+ } /* else unspecified result */
+}
+
+lreg: SHL (lreg, reg) {
+ guint8 *start = s->code;
+ gint32 o1, o2, i;
+
+ if (tree->right->reg1 != X86_ECX)
+ x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_shld_reg (s->code, tree->left->reg2, tree->left->reg1);
+ x86_shift_reg (s->code, X86_SHL, tree->left->reg1);
+ x86_test_reg_imm (s->code, X86_ECX, 32);
+ o1 = 2 + s->code - s->start;
+ x86_branch8 (s->code, X86_CC_EQ, o2 - o1, FALSE);
+ x86_mov_reg_reg (s->code, tree->left->reg2, tree->left->reg1, 4);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg1, tree->left->reg1);
+ o2 = s->code - s->start;
+ }
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: ADD (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
+ x86_alu_reg_reg (s->code, X86_ADC, tree->left->reg2, tree->right->reg2);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: ADD_OVF (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
+ x86_alu_reg_reg (s->code, X86_ADC, tree->left->reg2, tree->right->reg2);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: ADD_OVF_UN (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
+ x86_alu_reg_reg (s->code, X86_ADC, tree->left->reg2, tree->right->reg2);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: SUB (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
+ x86_alu_reg_reg (s->code, X86_SBB, tree->left->reg2, tree->right->reg2);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: SUB_OVF (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
+ x86_alu_reg_reg (s->code, X86_SBB, tree->left->reg2, tree->right->reg2);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: SUB_OVF_UN (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
+ x86_alu_reg_reg (s->code, X86_SBB, tree->left->reg2, tree->right->reg2);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException");
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: AND (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_AND, tree->left->reg1, tree->right->reg1);
+ x86_alu_reg_reg (s->code, X86_AND, tree->left->reg2, tree->right->reg2);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: OR (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_OR, tree->left->reg1, tree->right->reg1);
+ x86_alu_reg_reg (s->code, X86_OR, tree->left->reg2, tree->right->reg2);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+}
+
+lreg: NEG (lreg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+
+ x86_neg_reg (s->code, tree->reg1);
+ x86_alu_reg_imm (s->code, X86_ADC, tree->reg2, 0);
+ x86_neg_reg (s->code, tree->reg2);
+}
+
+lreg: NOT (lreg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+
+ x86_not_reg (s->code, tree->reg1);
+ x86_not_reg (s->code, tree->reg2);
+}
+
+lreg: MUL (lreg, lreg) {
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_reg (s->code, tree->right->reg2);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, mono_llmult, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
+
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+lreg: MUL_OVF (lreg, lreg) {
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_reg (s->code, tree->right->reg2);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ /* pass a pointer to store the resulting exception -
+ * ugly, but it works */
+ x86_push_reg (s->code, X86_ESP);
+ mono_add_jump_info (s, s->code + 1, mono_llmult_ovf, NULL);
+ x86_call_code (s->code, 0);
+ x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, 4, 4);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_ECX, 0);
+
+ /* cond. emit exception */
+ x86_branch8 (s->code, X86_CC_EQ, 7, FALSE);
+ x86_push_reg (s->code, X86_ECX);
+ mono_add_jump_info (s, s->code + 1, arch_get_throw_exception (), NULL);
+ x86_call_code (s->code, 0);
+
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+lreg: MUL_OVF_UN (lreg, lreg) {
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_reg (s->code, tree->right->reg2);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ /* pass a pointer to store the resulting exception -
+ * ugly, but it works */
+ x86_push_reg (s->code, X86_ESP);
+ mono_add_jump_info (s, s->code + 1, mono_llmult_ovf_un, NULL);
+ x86_call_code (s->code, 0);
+ x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, 4, 4);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_ECX, 0);
+
+ /* cond. emit exception */
+ x86_branch8 (s->code, X86_CC_EQ, 7, FALSE);
+ x86_push_reg (s->code, X86_ECX);
+ mono_add_jump_info (s, s->code + 1, arch_get_throw_exception (), NULL);
+ x86_call_code (s->code, 0);
+
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+lreg: DIV (lreg, lreg) {
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_reg (s->code, tree->right->reg2);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, mono_lldiv, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
+
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+lreg: REM (lreg, lreg) {
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_reg (s->code, tree->right->reg2);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, mono_llrem, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
+
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+lreg: DIV_UN (lreg, lreg) {
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_reg (s->code, tree->right->reg2);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, mono_lldiv_un, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
+
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+lreg: REM_UN (lreg, lreg) {
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_reg (s->code, tree->right->reg2);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, mono_llrem_un, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
+
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+lreg: CALL_I8 (this, ADDR_G) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ mono_add_jump_info (s, s->code + 1, tree->right->data.p, NULL);
+ x86_call_code (s->code, 0);
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+
+ mono_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg2 == X86_EDX);
+}
+
+lreg: CALL_I8 (this, VFUNC_ADDR) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_call_virtual (s->code, lreg,
+ G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+
+ PRINT_REG ("CALL0_I8(VIRTUAL)", tree->reg1);
+ PRINT_REG ("CALL1_I8(VIRTUAL)", tree->reg2);
+
+ mono_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg2 == X86_EDX);
+}
+
+lreg: CALL_I8 (this, INTF_ADDR) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_mov_reg_membase (s->code, lreg, lreg,
+ G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
+ x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+
+ PRINT_REG ("CALL_I8(INTERFACE)", tree->reg1);
+
+ mono_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg2 == X86_EDX);
+}
+
+stmt: RET (lreg) {
+ if (tree->left->reg1 != X86_EAX) {
+ if (tree->left->reg2 != X86_EAX) {
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+ if (tree->left->reg2 != X86_EDX)
+ x86_mov_reg_reg (s->code, X86_EDX, tree->left->reg2, 4);
+ } else {
+ x86_mov_reg_reg (s->code, X86_ECX, tree->left->reg2, 4);
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+ x86_mov_reg_reg (s->code, X86_EDX, X86_ECX, 4);
+ }
+ } else if (tree->left->reg2 != X86_EDX) {
+ x86_mov_reg_reg (s->code, X86_EDX, tree->left->reg2, 4);
+ }
+
+ if (!tree->last_instr) {
+ mono_add_jump_info (s, s->code + 1, NULL, NULL);
+ x86_jump32 (s->code, 0);
+ }
+}
+
+
+stmt: ARG_I8 (lreg) {
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+}
+
+reg: CEQ (lreg, lreg) {
+ guint8 *start = s->code;
+ gint32 o1, o2, i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ o1 = 2 + s->code - s->start;
+ x86_branch8 (s->code, X86_CC_NE, o2 - o1, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ o2 = s->code - s->start;
+ x86_set_reg (s->code, X86_CC_EQ, tree->reg1, FALSE);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+ }
+}
+
+reg: CLT (lreg, lreg) {
+ guint8 *start = s->code;
+ gpointer o1, o2, o3, o4, o5;
+ int i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ x86_branch8 (s->code, X86_CC_GT, o4 - o1, TRUE);
+ o1 = s->code;
+ x86_branch8 (s->code, X86_CC_NE, o4 - o2, TRUE);
+ o2 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ x86_branch8 (s->code, X86_CC_GE, o4 - o3, FALSE);
+
+ /* set result to 1 */
+ o3 = s->code;
+ x86_mov_reg_imm (s->code, tree->reg1, 1);
+ x86_jump8 (s->code, (o4 - o3));
+ /* set result to 0 */
+ o4 = s->code;
+ x86_mov_reg_imm (s->code, tree->reg1, 0);
+ o5 = s->code;
+ }
+}
+
+stmt: BEQ (lreg, lreg) {
+ guint8 *start = s->code;
+ int i;
+ gpointer o1, o2;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ x86_branch8 (s->code, X86_CC_NE, o2 - o1, FALSE);
+ o1 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
+ o2 = s->code;
+ }
+}
+
+stmt: BNE_UN (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+}
+
+stmt: BGE (lreg, lreg) {
+ guint8 *start = s->code;
+ gpointer o2, oe;
+ int i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ x86_branch8 (s->code, X86_CC_NE, oe - o2, TRUE);
+ o2 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GE, 0, FALSE);
+ oe = s->code;
+ }
+}
+
+stmt: BGE_UN (lreg, lreg) {
+ guint8 *start = s->code;
+ gpointer o2, oe;
+ int i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ x86_branch8 (s->code, X86_CC_NE, oe - o2, FALSE);
+ o2 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GE, 0, FALSE);
+ oe = s->code;
+ }
+}
+
+stmt: BGT (lreg, lreg) {
+ guint8 *start = s->code;
+ gpointer o2, oe;
+ int i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ x86_branch8 (s->code, X86_CC_NE, oe - o2, TRUE);
+ o2 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+ oe = s->code;
+ }
+}
+
+stmt: BGT_UN (lreg, lreg) {
+ guint8 *start = s->code;
+ gpointer o2, oe;
+ int i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ x86_branch8 (s->code, X86_CC_NE, oe - o2, FALSE);
+ o2 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+ oe = s->code;
+ }
+}
+
+stmt: BLT (lreg, lreg) {
+ guint8 *start = s->code;
+ gpointer o2, oe;
+ int i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ x86_branch8 (s->code, X86_CC_NE, oe - o2, TRUE);
+ o2 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+ oe = s->code;
+ }
+}
+
+stmt: BLT_UN (lreg, lreg) {
+ guint8 *start = s->code;
+ gpointer o2, oe;
+ int i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ x86_branch8 (s->code, X86_CC_NE, oe - o2, FALSE);
+ o2 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+ oe = s->code;
+ }
+}
+
+stmt: BLE (lreg, lreg) {
+ guint8 *start = s->code;
+ gpointer o2, oe;
+ int i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ x86_branch8 (s->code, X86_CC_NE, oe - o2, TRUE);
+ o2 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LE, 0, FALSE);
+ oe = s->code;
+ }
+}
+
+stmt: BLE_UN (lreg, lreg) {
+ guint8 *start = s->code;
+ gpointer o2, oe;
+ int i;
+
+ for (i = 0; i < 2; i ++) {
+ s->code = start;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
+ x86_branch8 (s->code, X86_CC_NE, oe - o2, FALSE);
+ o2 = s->code;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
+ if (i) mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_LE, 0, FALSE);
+ oe = s->code;
+ }
+}
+
+#
+# floating point
+
+#stmt: STLOC (CONV_I4 (freg)) {
+# // fixme: set CW
+# x86_fist_pop_membase (s->code, X86_EBP, tree->data.i, FALSE);
+#}
+
+reg: CONV_I1 (freg) {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_push_reg (s->code, X86_EAX); // SP = SP - 4
+ x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_alu_reg_imm (s->code, X86_AND, tree->reg1, 0xff);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
+}
+
+reg: CONV_I2 (freg) {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_push_reg (s->code, X86_EAX); // SP = SP - 4
+ x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_alu_reg_imm (s->code, X86_AND, tree->reg1, 0xffff);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
+}
+
+reg: CONV_I4 (freg) {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_push_reg (s->code, X86_EAX); // SP = SP - 4
+ x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
+}
+
+lreg: CONV_I8 (freg) {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
+ x86_fist_pop_membase (s->code, X86_ESP, 0, TRUE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_pop_reg (s->code, tree->reg2);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+}
+
+reg: CEQ (freg, freg) {
+ int treg = tree->reg1;
+
+ if (treg != X86_EAX)
+ x86_push_reg (s->code, X86_EAX); // save EAX
+
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000);
+ x86_set_reg (s->code, X86_CC_EQ, tree->reg1, TRUE);
+ x86_widen_reg (s->code, treg, treg, FALSE, FALSE);
+
+ if (treg != X86_EAX)
+ x86_pop_reg (s->code, X86_EAX); // save EAX
+}
+
+freg: CONV_R8 (freg) {
+ /* nothing to do */
+}
+
+freg: CONV_R4 (freg) {
+ /* fixme: nothing to do ??*/
+}
+
+freg: CONV_R8 (LDIND_I4 (ADDR_G)) {
+ x86_fild (s->code, tree->left->left->data.p, FALSE);
+}
+
+freg: CONV_R4 (reg) {
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_fild_membase (s->code, X86_ESP, 0, FALSE);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+}
+
+freg: CONV_R8 (reg) {
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_fild_membase (s->code, X86_ESP, 0, FALSE);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+}
+
+freg: CONV_R_UN (reg) {
+ x86_push_imm (s->code, 0);
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_fild_membase (s->code, X86_ESP, 0, TRUE);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
+}
+
+freg: CONV_R_UN (lreg) {
+ guint8 *start = s->code, *l1, *l2;
+ int i;
+
+ static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
+
+ for (i = 0; i < 2; i++) {
+ s->code = start;
+
+ /* load 64bit integer to FP stack */
+ x86_push_imm (s->code, 0);
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_fild_membase (s->code, X86_ESP, 0, TRUE);
+ /* store as 80bit FP value */
+ x86_fst80_membase (s->code, X86_ESP, 0);
+
+ /* test if lreg is negative */
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+ x86_branch8 (s->code, X86_CC_GEZ, l2 - l1, TRUE);
+ l1 = s->code;
+
+ /* add correction constant mn */
+ x86_fld80_mem (s->code, mn);
+ x86_fld80_membase (s->code, X86_ESP, 0);
+ x86_fp_op_reg (s->code, X86_FADD, 1, TRUE);
+ x86_fst80_membase (s->code, X86_ESP, 0);
+
+ l2 = s->code;
+
+ x86_fld80_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 12);
+ }
+}
+
+freg: CONV_R4 (lreg) {
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_fild_membase (s->code, X86_ESP, 0, TRUE);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
+}
+
+freg: CONV_R8 (lreg) {
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_fild_membase (s->code, X86_ESP, 0, TRUE);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
+}
+
+freg: CONST_R4 {
+ float f = *(float *)tree->data.p;
+
+ if (f == 0.0)
+ x86_fldz (s->code);
+ else if (f == 1.0)
+ x86_fld1(s->code);
+ else
+ x86_fld (s->code, tree->data.p, FALSE);
+}
+
+freg: CONST_R8 {
+ double d = *(double *)tree->data.p;
+
+ if (d == 0.0)
+ x86_fldz (s->code);
+ else if (d == 1.0)
+ x86_fld1(s->code);
+ else
+ x86_fld (s->code, tree->data.p, TRUE);
+}
+
+freg: LDIND_R4 (reg) {
+ x86_fld_membase (s->code, tree->left->reg1, 0, FALSE);
+}
+
+freg: LDIND_R8 (reg) {
+ x86_fld_membase (s->code, tree->left->reg1, 0, TRUE);
+}
+
+freg: ADD (freg, freg) {
+ x86_fp_op_reg (s->code, X86_FADD, 1, TRUE);
+}
+
+freg: SUB (freg, freg) {
+ x86_fp_op_reg (s->code, X86_FSUB, 1, TRUE);
+}
+
+freg: MUL (freg, freg) {
+ x86_fp_op_reg (s->code, X86_FMUL, 1, TRUE);
+}
+
+freg: DIV (freg, freg) {
+ x86_fp_op_reg (s->code, X86_FDIV, 1, TRUE);
+}
+
+#freg: REM (freg, freg) {
+# this does not work, since it does not pop a value from the stack,
+# and we need to test if the instruction is ready
+# x86_fprem1 (s->code);
+#}
+
+freg: NEG (freg) {
+ x86_fchs (s->code);
+}
+
+stmt: POP (freg)
+
+stmt: STIND_R4 (ADDR_L, freg) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, tree->left->data.i).offset;
+ x86_fst_membase (s->code, X86_EBP, offset, FALSE, TRUE);
+}
+
+stmt: STIND_R4 (reg, freg) {
+ x86_fst_membase (s->code, tree->left->reg1, 0, FALSE, TRUE);
+}
+
+stmt: STIND_R8 (ADDR_L, freg) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, tree->left->data.i).offset;
+ x86_fst_membase (s->code, X86_EBP, offset, TRUE, TRUE);
+}
+
+stmt: STIND_R8 (reg, freg) {
+ x86_fst_membase (s->code, tree->left->reg1, 0, TRUE, TRUE);
+}
+
+stmt: ARG_R4 (freg) {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fst_membase (s->code, X86_ESP, 0, FALSE, TRUE);
+}
+
+stmt: ARG_R8 (freg) {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
+ x86_fst_membase (s->code, X86_ESP, 0, TRUE, TRUE);
+}
+
+# fixme: we need to implement unordered and ordered compares
+
+stmt: BEQ (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
+}
+
+stmt: BNE_UN (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+}
+
+stmt: BLT (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, FALSE);
+}
+
+stmt: BLT_UN (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, FALSE);
+}
+
+stmt: BGE (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+}
+
+stmt: BGE_UN (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+}
+
+stmt: BGT (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, FALSE);
+}
+
+stmt: BGT_UN (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, FALSE);
+}
+
+stmt: BLE (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+}
+
+stmt: BLE_UN (freg, freg) {
+ x86_fcompp (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ mono_add_jump_info (s, s->code + 2, NULL, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+}
+
+freg: CALL_R8 (this, ADDR_G) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ mono_add_jump_info (s, s->code + 1, tree->right->data.p, NULL);
+ x86_call_code (s->code, 0);
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+}
+
+freg: CALL_R8 (this, INTF_ADDR) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_mov_reg_membase (s->code, lreg, lreg,
+ G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
+ x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+}
+
+freg: CALL_R8 (this, VFUNC_ADDR) {
+ MethodCallInfo *ci = tree->data.ci;
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (tree->left->op != MB_TERM_NOP) {
+ mono_assert (lreg >= 0);
+ x86_push_reg (s->code, lreg);
+ }
+
+ if (ci->vtype_num) {
+ int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
+ x86_lea_membase (s->code, treg, X86_EBP, offset);
+ x86_push_reg (s->code, treg);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_call_virtual (s->code, lreg,
+ G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
+
+ if (ci->args_size)
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+}
+
+stmt: RET (freg) {
+ if (!tree->last_instr) {
+ mono_add_jump_info (s, s->code + 1, NULL, NULL);
+ x86_jump32 (s->code, 0);
+ }
+}
+
+# support for value types
+
+reg: LDIND_OBJ (reg) {
+ if (tree->left->reg1 != tree->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+stmt: STIND_OBJ (reg, reg) {
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_EDX);
+ x86_push_reg (s->code, X86_ECX);
+
+ mono_assert (tree->data.i > 0);
+ x86_push_imm (s->code, tree->data.i);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code + 1, MEMCOPY, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
+
+ x86_pop_reg (s->code, X86_ECX);
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_EAX);
+}
+
+stmt: ARG_OBJ (CONST_I4) {
+ x86_push_imm (s->code, tree->left->data.i);
+}
+
+stmt: ARG_OBJ (reg) {
+ int size = tree->data.i;
+ int sa;
+
+ mono_assert (size > 0);
+
+ sa = size + 3;
+ sa &= ~3;
+
+ /* reserve space for the argument */
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, sa);
+
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_EDX);
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_imm (s->code, size);
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_lea_membase (s->code, X86_EAX, X86_ESP, 5*4);
+ x86_push_reg (s->code, X86_EAX);
+
+ mono_add_jump_info (s, s->code + 1, MEMCOPY, NULL);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
+
+ x86_pop_reg (s->code, X86_ECX);
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_EAX);
+}
+
+stmt: RET_OBJ (reg) {
+ int size = tree->data.i;
+
+ x86_push_imm (s->code, size);
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_push_membase (s->code, X86_EBP, 8);
+
+
+ mono_add_jump_info (s, s->code + 1, MEMCOPY, NULL);
+ x86_call_code (s->code, 0);
+
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
+
+ if (!tree->last_instr) {
+ mono_add_jump_info (s, s->code + 1, NULL, NULL);
+ x86_jump32 (s->code, 0);
+ }
+}
+
+%%
+
+#include "jit.h"
+
+gint64
+mono_llmult (gint64 a, gint64 b)
+{
+ return a * b;
+}
+
+guint64
+mono_llmult_ovf_un (gpointer *exc, guint32 al, guint32 ah, guint32 bl, guint32 bh)
+{
+ guint64 res, t1;
+
+ // fixme: this is incredible slow
+
+ if (ah && bh)
+ goto raise_exception;
+
+ res = (guint64)al * (guint64)bl;
+
+ t1 = (guint64)ah * (guint64)bl + (guint64)al * (guint64)bh;
+
+ if (t1 > 0xffffffff)
+ goto raise_exception;
+
+ res += ((guint64)t1) << 32;
+
+ *exc = NULL;
+ return res;
+
+ raise_exception:
+ *exc = mono_get_exception_overflow ();
+ return 0;
+}
+
+guint64
+mono_llmult_ovf (gpointer *exc, guint32 al, gint32 ah, guint32 bl, gint32 bh)
+{
+ gint64 res, t1;
+
+ // fixme: check for overflow
+
+ res = (gint64)al * (gint64)bl;
+
+ t1 = (gint64)ah * bl + al * (gint64)bh;
+
+ res += ((gint64)t1) << 32;
+
+ *exc = NULL;
+ return res;
+
+ raise_exception:
+ *exc = mono_get_exception_overflow ();
+ return 0;
+}
+
+gint64
+mono_lldiv (gint64 a, gint64 b)
+{
+ return a / b;
+}
+
+gint64
+mono_llrem (gint64 a, gint64 b)
+{
+ return a % b;
+}
+
+guint64
+mono_lldiv_un (guint64 a, guint64 b)
+{
+ return a / b;
+}
+
+guint64
+mono_llrem_un (guint64 a, guint64 b)
+{
+ return a % b;
+}
+
+MBTree *
+mono_ctree_new (MonoMemPool *mp, int op, MBTree *left, MBTree *right)
+{
+ MBTree *t = mono_mempool_alloc0 (mp, sizeof (MBTree));
+
+ t->op = op;
+ t->left = left;
+ t->right = right;
+ t->reg1 = -1;
+ t->reg2 = -1;
+ t->reg3 = -1;
+ t->svt = VAL_UNKNOWN;
+ t->cli_addr = -1;
+ return t;
+}
+
+MBTree *
+mono_ctree_new_leaf (MonoMemPool *mp, int op)
+{
+ return mono_ctree_new (mp, op, NULL, NULL);
+}
+
+gpointer
+arch_get_lmf_addr (void)
+{
+ gpointer *lmf;
+
+ if ((lmf = TlsGetValue (lmf_thread_id)))
+ return lmf;
+
+ lmf = g_malloc (sizeof (gpointer));
+ *lmf = NULL;
+
+ TlsSetValue (lmf_thread_id, lmf);
+
+ return lmf;
+}
+
+MonoArray*
+mono_array_new_wrapper (MonoClass *eclass, guint32 n)
+{
+ MonoDomain *domain = mono_domain_get ();
+
+ return mono_array_new (domain, eclass, n);
+}
+
+MonoObject *
+mono_object_new_wrapper (MonoClass *klass)
+{
+ MonoDomain *domain = mono_domain_get ();
+
+ return mono_object_new (domain, klass);
+}
+
+MonoString*
+mono_string_new_wrapper (const char *text)
+{
+ MonoDomain *domain = mono_domain_get ();
+
+ return mono_string_new (domain, text);
+}
+
+MonoString*
+mono_ldstr_wrapper (MonoImage *image, guint32 index)
+{
+ MonoDomain *domain = mono_domain_get ();
+
+ return mono_ldstr (domain, image, index);
+}
+
+gpointer
+mono_ldsflda (MonoClass *klass, int offset)
+{
+ MonoDomain *domain = mono_domain_get ();
+ MonoVTable *vt;
+ gpointer addr;
+
+ vt = mono_class_vtable (domain, klass);
+ addr = (char*)(vt->data) + offset;
+
+ return addr;
+}
+
+#ifdef DEBUG
+void *
+MEMCOPY (void *dest, const void *src, size_t n)
+{
+ int i, l = n;
+
+ printf ("MEMCPY(%p to %p [%d]) ", src, dest, n);
+
+ for (i = 0; i < l; i++)
+ printf ("%02x ", *((guint8 *)src + i));
+ printf ("\n");
+
+ return memcpy (dest, src, n);
+}
+#endif
+