* (C) 2001 Ximian, Inc.
*/
+#include <config.h>
+
#include <glib.h>
#include <stdio.h>
#include <string.h>
+#include <math.h>
+#ifndef PLATFORM_WIN32
#include <signal.h>
#include <sys/syscall.h>
+#endif
#include <mono/metadata/blob.h>
#include <mono/metadata/metadata.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/object.h>
#include <mono/metadata/tabledefs.h>
+#include <mono/metadata/appdomain.h>
+#include <mono/metadata/marshal.h>
#include <mono/arch/x86/x86-codegen.h>
#include "regset.h"
-#include "mempool.h"
#include "jit.h"
+/*
+ * Pull the list of opcodes
+ */
+#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
+ a = i,
+
+enum {
+#include "mono/cil/opcode.def"
+ LAST = 0xff
+};
+#undef OPDEF
+
+/* alignment of activation frames */
+#define MONO_FRAME_ALIGNMENT 4
+
+void print_lmf (void);
+
#define MBTREE_TYPE MBTree
#define MBCGEN_TYPE MonoFlowGraph
#define MBCOST_DATA MonoFlowGraph
struct _MBTree {
guint16 op;
+ unsigned last_instr:1;
+ unsigned spilled:1;
+
MBTree *left, *right;
gpointer state;
gpointer emit;
gint32 addr;
gint32 cli_addr;
- unsigned is_jump:1;
- unsigned last_instr:1;
-
- guint8 exclude_mask;
-
gint8 reg1;
gint8 reg2;
gint8 reg3;
gint32 i;
gint64 l;
gpointer p;
- MonoBBlock *bb;
MonoMethod *m;
- MethodCallInfo *ci;
+ MonoBBlock *bb;
MonoClass *klass;
+ MonoClassField *field;
X86AddressInfo ainfo;
+ MonoJitFieldInfo fi;
+ MonoJitBranchInfo bi;
+ MonoJitCallInfo call_info;
+ MonoJitArgumentInfo arg_info;
} data;
};
-gint64 mono_llmult (gint64 a, gint64 b);
-guint64 mono_llmult_ovf_un (gpointer *exc, guint32 al, guint32 ah, guint32 bl, gint32 bh);
-gint64 mono_lldiv (gint64 a, gint64 b);
-gint64 mono_llrem (gint64 a, gint64 b);
-guint64 mono_lldiv_un (guint64 a, guint64 b);
-guint64 mono_llrem_un (guint64 a, guint64 b);
-
-gpointer arch_get_lmf_addr (void);
+gint64 mono_llmult (gint64 a, gint64 b);
+guint64 mono_llmult_ovf (gpointer *exc, guint32 al, gint32 ah, guint32 bl, gint32 bh);
+guint64 mono_llmult_ovf_un (gpointer *exc, guint32 al, guint32 ah, guint32 bl, guint32 bh);
+gint64 mono_lldiv (gint64 a, gint64 b);
+gint64 mono_llrem (gint64 a, gint64 b);
+guint64 mono_lldiv_un (guint64 a, guint64 b);
+guint64 mono_llrem_un (guint64 a, guint64 b);
+gpointer mono_ldsflda (MonoClass *klass, int offset);
+
+gpointer mono_ldvirtftn (MonoObject *this, int slot);
+gpointer mono_ldintftn (MonoObject *this, int slot);
+gpointer mono_ldftn (MonoMethod *method);
+
+void mono_emit_fast_iconv(MBCGEN_TYPE* s, MBTREE_TYPE* tree);
+void mono_emit_fast_iconv_i8(MBCGEN_TYPE* s, MBTREE_TYPE* tree);
+
+MonoArray*
+mono_array_new_wrapper (MonoClass *eclass, guint32 n);
+MonoObject *
+mono_object_new_wrapper (MonoClass *klass);
+MonoString*
+mono_ldstr_wrapper (MonoImage *image, guint32 ind);
gpointer
get_mono_object_isinst (void);
//#define DEBUG
#define REAL_PRINT_REG(text,reg) \
-g_assert (reg >= 0); \
+mono_assert (reg >= 0); \
x86_push_reg (s->code, X86_EAX); \
x86_push_reg (s->code, X86_EDX); \
x86_push_reg (s->code, X86_ECX); \
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 3*4); \
x86_pop_reg (s->code, X86_ECX); \
x86_pop_reg (s->code, X86_EDX); \
-x86_pop_reg (s->code, X86_EAX);
+x86_pop_reg (s->code, X86_EAX);
-#ifdef DEBUG
-#define MEMCOPY debug_memcpy
-void *MEMCOPY (void *dest, const void *src, size_t n);
+void *
+debug_memcopy (void *dest, const void *src, size_t n);
+#ifdef DEBUG
+#define MEMCOPY debug_memcopy
#define PRINT_REG(text,reg) REAL_PRINT_REG(text,reg)
#else
} while (0)
/* emit an exception if condition is fail */
-#define EMIT_COND_EXCEPTION(cond,signed,exc) \
- do { \
- x86_branch8 (s->code, cond, 12, signed); \
- x86_push_imm (s->code, exc); \
- x86_mov_reg_imm (s->code, X86_EAX, arch_get_throw_exception ()); \
- x86_call_reg (s->code, X86_EAX); \
+#define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
+ do { \
+ gpointer t; \
+ x86_branch8 (s->code, cond, 10, signed); \
+ x86_push_imm (s->code, exc_name); \
+ t = arch_get_throw_exception_by_name (); \
+ mono_add_jump_info (s, s->code, \
+ MONO_JUMP_INFO_ABS, t); \
+ x86_call_code (s->code, 0); \
+ } while (0);
+
+#define X86_ARG_PAD(pad) do { \
+ if (pad) { \
+ if (pad == 4) \
+ x86_push_reg (s->code, X86_EAX); \
+ else \
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, pad); \
+ } \
+} while (0)
+
+#define X86_CALL_END do { \
+ int size = tree->data.call_info.frame_size; \
+ if (size) \
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, size); \
+} while (0)
+
+#define X86_CALL_BEGIN do { \
+ int pad = tree->data.call_info.pad; \
+ X86_ARG_PAD (pad); \
+ if (tree->left->op != MB_TERM_NOP) { \
+ mono_assert (lreg >= 0); \
+ x86_push_reg (s->code, lreg); \
+ x86_alu_membase_imm (s->code, X86_CMP, lreg, 0, 0); \
+ } \
+ if (tree->data.call_info.vtype_num) { \
+ int offset = VARINFO (s, tree->data.call_info.vtype_num).offset; \
+ x86_lea_membase (s->code, treg, X86_EBP, offset); \
+ x86_push_reg (s->code, treg); \
+ } \
+} while (0)
+
+/* we use this macro to move one lreg to another - source and
+ destination may overlap, but the register allocator has to
+ make sure that ((d1 < d2) && (s1 < s2))
+*/
+#define MOVE_LREG(d1,d2,s1,s2) \
+ do { \
+ g_assert ((d1 < d2) && (s1 < s2)); \
+ if ((d1) <= (s1)) { \
+ if ((d1) != (s1)) \
+ x86_mov_reg_reg (s->code, d1, s1, 4); \
+ if ((d2) != (s2)) \
+ x86_mov_reg_reg (s->code, d2, s2, 4); \
+ } else { \
+ if ((d2) != (s2)) \
+ x86_mov_reg_reg (s->code, d2, s2, 4); \
+ if ((d1) != (s1)) \
+ x86_mov_reg_reg (s->code, d1, s1, 4); \
+ } \
} while (0);
+
%%
# terminal definitions
#
-# constatnts
+# constants
%term CONST_I4 CONST_I8 CONST_R4 CONST_R8
-%term LDIND_I1 LDIND_U1 LDIND_I2 LDIND_U2 LDIND_I4 LDIND_REF LDIND_I8 LDIND_R4 LDIND_R8
-%term LDIND_U4 LDIND_OBJ
-%term STIND_I1 STIND_I2 STIND_I4 STIND_REF STIND_I8 STIND_R4 STIND_R8 STIND_OBJ
-%term ADDR_L ADDR_G ARG_I4 ARG_I8 ARG_R4 ARG_R8 ARG_OBJ ARG_STRING CALL_I4 CALL_I8 CALL_R8 CALL_VOID
-%term BREAK SWITCH BR RET_VOID RET RET_OBJ ENDFINALLY
+%term LDIND_I1 LDIND_U1 LDIND_I2 LDIND_U2 LDIND_I4 LDIND_I8 LDIND_R4 LDIND_R8 LDIND_OBJ
+%term STIND_I1 STIND_I2 STIND_I4 STIND_I8 STIND_R4 STIND_R8 STIND_OBJ
+%term ADDR_L ADDR_G ARG_I4 ARG_I8 ARG_R4 ARG_R8 ARG_OBJ CALL_I4 CALL_I8 CALL_R8 CALL_VOID
+%term BREAK SWITCH BR RET_VOID RET RET_OBJ ENDFINALLY ENDFILTER JMP
%term ADD ADD_OVF ADD_OVF_UN SUB SUB_OVF SUB_OVF_UN MUL MUL_OVF MUL_OVF_UN
-%term DIV DIV_UN REM REM_UN AND OR XOR SHL SHR SHR_UN NEG NOT
-%term BLT BLT_UN BEQ BNE_UN BRTRUE BRFALSE BGE BGE_UN BLE BLE_UN BGT BGT_UN
-%term CEQ CLT CLT_UN CGT CGT_UN
-%term CONV_I4 CONV_I1 CONV_I2 CONV_I8 CONV_U8 CONV_R4 CONV_R8
-%term INTF_ADDR VFUNC_ADDR NOP NEWARR NEWOBJ NEWSTRUCT CPOBJ POP INITOBJ
+%term DIV DIV_UN REM REM_UN AND OR XOR SHL SHR SHR_UN NEG NOT CKFINITE
+%term COMPARE CBRANCH BRTRUE BRFALSE CSET
+%term CONV_I4 CONV_I1 CONV_I2 CONV_I8 CONV_U1 CONV_U2 CONV_U4 CONV_U8 CONV_R4 CONV_R8 CONV_R_UN
+%term INTF_ADDR VFUNC_ADDR NOP NEWARR NEWARR_SPEC NEWOBJ NEWOBJ_SPEC
+%term INITBLK CPBLK CPSRC POP INITOBJ LOCALLOC
%term ISINST CASTCLASS UNBOX
%term CONV_OVF_I1 CONV_OVF_U1 CONV_OVF_I2 CONV_OVF_U2 CONV_OVF_U4 CONV_OVF_U8 CONV_OVF_I4
%term CONV_OVF_I4_UN CONV_OVF_U1_UN CONV_OVF_U2_UN
%term CONV_OVF_I2_UN CONV_OVF_I8_UN CONV_OVF_I1_UN
-%term EXCEPTION THROW RETHROW HANDLER SAVE_LMF RESTORE_LMF
-%term LDLEN LDFTN TOSTRING
+%term EXCEPTION THROW RETHROW HANDLER CHECKTHIS
+%term LDLEN LDELEMA LDFTN LDVIRTFTN LDSTR LDSFLDA
+%term REMOTE_LDFLDA REMOTE_STIND_I1 REMOTE_STIND_I2 REMOTE_STIND_I4
+%term REMOTE_STIND_I8 REMOTE_STIND_R4 REMOTE_STIND_R8 REMOTE_STIND_OBJ
+%term SIN COS SQRT
+
+%term FUNC1 PROC2 PROC3 FREE OBJADDR VTADDR
#
# we start at stmt
tree->data.ainfo.amode = AMBase;
}
-base: ADD (reg, acon) {
+base: ADD (reg, CONST_I4) {
tree->data.ainfo.offset = tree->right->data.i;
tree->data.ainfo.basereg = tree->left->reg1;
tree->data.ainfo.amode = AMBase;
}
base: ADDR_L {
- tree->data.ainfo.offset = g_array_index (s->varinfo, MonoVarInfo, tree->data.i).offset;
+ tree->data.ainfo.offset = VARINFO (s, tree->data.i).offset;
tree->data.ainfo.basereg = X86_EBP;
tree->data.ainfo.amode = AMBase;
+} cost {
+ MBCOND (VARINFO (data, tree->data.i).reg < 0);
+ return 0;
}
index: reg {
# we pass exception in ECX to catch handler
reg: EXCEPTION {
- int offset = g_array_index (s->varinfo, MonoVarInfo, tree->data.i).offset;
+ int offset = VARINFO (s, tree->data.i).offset;
if (tree->reg1 != X86_ECX)
x86_mov_reg_reg (s->code, tree->reg1, X86_ECX, 4);
}
stmt: THROW (reg) {
- tree->is_jump = TRUE;
+ gpointer target;
x86_push_reg (s->code, tree->left->reg1);
- x86_call_code (s->code, arch_get_throw_exception ());
+ target = arch_get_throw_exception ();
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, target);
+ x86_call_code (s->code, target);
}
stmt: RETHROW {
- int offset = g_array_index (s->varinfo, MonoVarInfo, tree->data.i).offset;
-
- tree->is_jump = TRUE;
+ int offset = VARINFO (s, tree->data.i).offset;
+ gpointer target;
x86_push_membase (s->code, X86_EBP, offset);
- x86_call_code (s->code, arch_get_throw_exception ());
+
+ target = arch_get_throw_exception ();
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, target);
+ x86_call_code (s->code, target);
}
stmt: HANDLER {
- gint32 addr = tree->data.bb->addr - tree->addr - 5;
- tree->is_jump = TRUE;
- x86_call_imm (s->code, addr);
+ /* save ESP (used by ENDFINALLY) */
+ x86_mov_membase_reg (s->code, X86_EBP, mono_exc_esp_offset, X86_ESP, 4);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb);
+ x86_call_imm (s->code, 0);
}
stmt: ENDFINALLY {
+ /* restore ESP - which can be modified when we allocate value types
+ * in the finally handler */
+ x86_mov_reg_membase (s->code, X86_ESP, X86_EBP, mono_exc_esp_offset, 4);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_ret (s->code);
+}
+
+stmt: ENDFILTER (reg) {
+ /* restore ESP - which can be modified when we allocate value types
+ * in the filter */
+ x86_mov_reg_membase (s->code, X86_ESP, X86_EBP, mono_exc_esp_offset, 4);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
x86_ret (s->code);
}
-stmt: SAVE_LMF {
- tree->is_jump = TRUE;
+stmt: STIND_I4 (ADDR_L, ADD (LDIND_I4 (ADDR_L), CONST_I4)) {
+ int vn = tree->left->data.i;
+ int treg = VARINFO (s, vn).reg;
+ int offset = VARINFO (s, vn).offset;
+ int data = tree->right->right->data.i;
+
+ if (data == 1) {
+ if (treg >= 0)
+ x86_inc_reg (s->code, treg);
+ else
+ x86_inc_membase (s->code, X86_EBP, offset);
+ } else {
+ if (treg >= 0)
+ x86_alu_reg_imm (s->code, X86_ADD, treg, data);
+ else
+ x86_alu_membase_imm (s->code, X86_ADD, X86_EBP, offset, data);
+ }
+} cost {
+ MBCOND (tree->right->left->left->data.i == tree->left->data.i);
+ return 0;
+}
+
+stmt: STIND_I4 (ADDR_L, SUB (LDIND_I4 (ADDR_L), CONST_I4)) {
+ int vn = tree->left->data.i;
+ int treg = VARINFO (s, vn).reg;
+ int offset = VARINFO (s, vn).offset;
+ int data = tree->right->right->data.i;
+ if (data == 1) {
+ if (treg >= 0)
+ x86_dec_reg (s->code, treg);
+ else
+ x86_dec_membase (s->code, X86_EBP, offset);
+ } else {
+ if (treg >= 0)
+ x86_alu_reg_imm (s->code, X86_SUB, treg, data);
+ else
+ x86_alu_membase_imm (s->code, X86_SUB, X86_EBP, offset, data);
+ }
+} cost {
+ MBCOND (tree->right->left->left->data.i == tree->left->data.i);
+ return 0;
+}
- /* save all caller saved regs */
- x86_push_reg (s->code, X86_EBX);
- x86_push_reg (s->code, X86_EDI);
- x86_push_reg (s->code, X86_ESI);
- x86_push_reg (s->code, X86_EBP);
+stmt: STIND_I4 (ADDR_L, ADD (LDIND_I4 (ADDR_L), reg)) {
+ int vn = tree->left->data.i;
+ int treg = VARINFO (s, vn).reg;
+ int sreg = tree->right->right->reg1;
+ int offset = VARINFO (s, vn).offset;
- /* save the IP */
- x86_push_imm (s->code, s->code);
+ if (treg >= 0)
+ x86_alu_reg_reg (s->code, X86_ADD, treg, sreg);
+ else
+ x86_alu_membase_reg (s->code, X86_ADD, X86_EBP, offset, sreg);
- /* save method info */
- x86_push_imm (s->code, tree->data.m);
- /* get the address of lmf for the current thread */
- x86_call_code (s->code, arch_get_lmf_addr);
- /* push lmf */
- x86_push_reg (s->code, X86_EAX);
- /* push *lfm (previous_lmf) */
- x86_push_membase (s->code, X86_EAX, 0);
- /* *(lmf) = ESP */
- x86_mov_membase_reg (s->code, X86_EAX, 0, X86_ESP, 4);
+} cost {
+ MBCOND (tree->right->left->left->data.i == tree->left->data.i);
+ return 0;
}
-stmt: RESTORE_LMF {
- /* ebx = previous_lmf */
- x86_pop_reg (s->code, X86_EBX);
- /* edi = lmf */
- x86_pop_reg (s->code, X86_EDI);
- /* *(lmf) = previous_lmf */
- x86_mov_membase_reg (s->code, X86_EDI, 0, X86_EBX, 4);
+stmt: STIND_I4 (ADDR_L, LDIND_I4 (ADDR_L)) {
+ int treg1 = VARINFO (s, tree->left->data.i).reg;
+ int treg2 = VARINFO (s, tree->right->left->data.i).reg;
+ int offset1 = VARINFO (s, tree->left->data.i).offset;
+ int offset2 = VARINFO (s, tree->right->left->data.i).offset;
+
+ //{static int cx= 0; printf ("CX %5d\n", cx++);}
- /* discard method info */
- x86_pop_reg (s->code, X86_ESI);
+ if (treg1 >= 0 && treg2 >= 0) {
+ x86_mov_reg_reg (s->code, treg1, treg2, 4);
+ return;
+ }
+ if (treg1 >= 0 && treg2 < 0) {
+ x86_mov_reg_membase (s->code, treg1, X86_EBP, offset2, 4);
+ return;
+ }
+ if (treg1 < 0 && treg2 >= 0) {
+ x86_mov_membase_reg (s->code, X86_EBP, offset1, treg2, 4);
+ return;
+ }
- /* discard save IP */
- x86_pop_reg (s->code, X86_ESI);
+ g_assert_not_reached ();
- /* restore caller saved regs */
- x86_pop_reg (s->code, X86_EBP);
- x86_pop_reg (s->code, X86_ESI);
- x86_pop_reg (s->code, X86_EDI);
- x86_pop_reg (s->code, X86_EBX);
+} cost {
+ MBCOND (VARINFO (data, tree->left->data.i).reg >= 0 ||
+ VARINFO (data, tree->right->left->data.i).reg >= 0);
+ return 0;
}
-stmt: STIND_I4 (addr, reg) {
- PRINT_REG ("STIND_I4", tree->right->reg1);
-
+stmt: STIND_I4 (addr, CONST_I4) {
switch (tree->left->data.ainfo.amode) {
case AMImmediate:
- x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 4);
+ x86_mov_mem_imm (s->code, tree->left->data.ainfo.offset, tree->right->data.i, 4);
break;
case AMBase:
- x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg,
- tree->left->data.ainfo.offset, tree->right->reg1, 4);
+ x86_mov_membase_imm (s->code, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->right->data.i, 4);
break;
case AMIndex:
- x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset,
+ x86_mov_memindex_imm (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset,
tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
- tree->right->reg1, 4);
+ tree->right->data.i, 4);
break;
case AMBaseIndex:
- x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset,
+ x86_mov_memindex_imm (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset,
tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift,
- tree->right->reg1, 4);
+ tree->right->data.i, 4);
break;
}
}
-stmt: STIND_REF (addr, reg) {
- PRINT_REG ("STIND_REF", tree->right->reg1);
+stmt: STIND_I4 (addr, reg) {
switch (tree->left->data.ainfo.amode) {
}
}
+stmt: REMOTE_STIND_I4 (reg, reg) {
+ guint8 *br[2];
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+ int offset;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (rreg == treg)
+ treg = X86_ECX;
+
+ x86_mov_reg_membase (s->code, treg, lreg, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class));
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+
+ /* this is a transparent proxy - remote the call */
+
+ /* save value to stack */
+ x86_push_reg (s->code, rreg);
+
+ x86_push_reg (s->code, X86_ESP);
+ x86_push_imm (s->code, tree->data.fi.field);
+ x86_push_imm (s->code, tree->data.fi.klass);
+ x86_push_reg (s->code, lreg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20);
+
+ br [1] = s->code; x86_jump8 (s->code, 0);
+
+ x86_patch (br [0], s->code);
+ offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) :
+ tree->data.fi.field->offset;
+ x86_mov_membase_reg (s->code, lreg, offset, rreg, 4);
+
+ x86_patch (br [1], s->code);
+}
+
stmt: STIND_I1 (addr, reg) {
PRINT_REG ("STIND_I1", tree->right->reg1);
}
}
+stmt: REMOTE_STIND_I1 (reg, reg) {
+ guint8 *br[2];
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+ int offset;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (rreg == treg)
+ treg = X86_ECX;
+
+ x86_mov_reg_membase (s->code, treg, lreg, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class));
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+
+ /* this is a transparent proxy - remote the call */
+
+ /* save value to stack */
+ x86_push_reg (s->code, rreg);
+
+ x86_push_reg (s->code, X86_ESP);
+ x86_push_imm (s->code, tree->data.fi.field);
+ x86_push_imm (s->code, tree->data.fi.klass);
+ x86_push_reg (s->code, lreg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20);
+
+ br [1] = s->code; x86_jump8 (s->code, 0);
+
+ x86_patch (br [0], s->code);
+ offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) :
+ tree->data.fi.field->offset;
+ x86_mov_membase_reg (s->code, lreg, offset, rreg, 1);
+
+ x86_patch (br [1], s->code);
+}
+
stmt: STIND_I2 (addr, reg) {
PRINT_REG ("STIND_I2", tree->right->reg1);
}
}
-reg: LDIND_I4 (addr) {
+stmt: REMOTE_STIND_I2 (reg, reg) {
+ guint8 *br[2];
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+ int offset;
- switch (tree->left->data.ainfo.amode) {
+ if (lreg == treg)
+ treg = X86_EDX;
- case AMImmediate:
- x86_mov_reg_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, 4);
- break;
+ if (rreg == treg)
+ treg = X86_ECX;
- case AMBase:
- x86_mov_reg_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg,
- tree->left->data.ainfo.offset, 4);
- break;
- case AMIndex:
- x86_mov_reg_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset,
- tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, 4);
- break;
- case AMBaseIndex:
- x86_mov_reg_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg,
- tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
- tree->left->data.ainfo.shift, 4);
- break;
- }
+ x86_mov_reg_membase (s->code, treg, lreg, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class));
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ /* this is a transparent proxy - remote the call */
- PRINT_REG ("LDIND_I4", tree->reg1);
+ /* save value to stack */
+ x86_push_reg (s->code, rreg);
+
+ x86_push_reg (s->code, X86_ESP);
+ x86_push_imm (s->code, tree->data.fi.field);
+ x86_push_imm (s->code, tree->data.fi.klass);
+ x86_push_reg (s->code, lreg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20);
+
+ br [1] = s->code; x86_jump8 (s->code, 0);
+
+ x86_patch (br [0], s->code);
+ offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) :
+ tree->data.fi.field->offset;
+ x86_mov_membase_reg (s->code, lreg, offset, rreg, 2);
+
+ x86_patch (br [1], s->code);
+}
+
+reg: LDIND_I4 (ADDR_L) {
+ int treg = VARINFO (s, tree->left->data.i).reg;
+
+ if (treg != tree->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, treg, 4);
+
+} cost {
+ MBCOND ((VARINFO (data, tree->left->data.i).reg >= 0));
+ return 0;
+}
+
+stmt: STIND_I4 (ADDR_L, CONST_I4) {
+ int treg = VARINFO (s, tree->left->data.i).reg;
+
+ x86_mov_reg_imm (s->code, treg, tree->right->data.i);
+
+} cost {
+ MBCOND ((VARINFO (data, tree->left->data.i).reg >= 0));
+ return 0;
+}
+
+stmt: STIND_I4 (ADDR_L, LDIND_I4 (ADDR_L)) {
+ int treg = VARINFO (s, tree->left->data.i).reg;
+ int offset = VARINFO (s, tree->right->left->data.i).offset;
+
+ x86_mov_reg_membase (s->code, treg, X86_EBP, offset, 4);
+} cost {
+ MBCOND ((VARINFO (data, tree->left->data.i).reg >= 0));
+ MBCOND ((VARINFO (data, tree->right->left->data.i).reg < 0));
+ return 0;
+}
+
+stmt: STIND_I4 (ADDR_L, reg) {
+ int treg = VARINFO (s, tree->left->data.i).reg;
+
+ if (treg != tree->right->reg1)
+ x86_mov_reg_reg (s->code, treg, tree->right->reg1, 4);
+
+} cost {
+ MBCOND ((VARINFO (data, tree->left->data.i).reg >= 0));
+ return 0;
}
-reg: LDIND_REF (addr) {
+
+reg: LDIND_I4 (addr) {
switch (tree->left->data.ainfo.amode) {
}
- PRINT_REG ("LDIND_REF", tree->reg1);
+ PRINT_REG ("LDIND_I4", tree->reg1);
}
reg: LDIND_I1 (addr) {
PRINT_REG ("LDIND_U2", tree->reg1);
}
-reg: LDIND_U4 (addr) {
- switch (tree->left->data.ainfo.amode) {
+reg: REMOTE_LDFLDA (reg) {
+ guint8 *br[2];
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
- case AMImmediate:
- x86_mov_reg_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, 4);
- break;
+ if (lreg == X86_EAX)
+ treg = X86_EDX;
- case AMBase:
- x86_mov_reg_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg,
- tree->left->data.ainfo.offset, 4);
- break;
- case AMIndex:
- x86_mov_reg_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset,
- tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, 4);
- break;
- case AMBaseIndex:
- x86_mov_reg_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg,
- tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
- tree->left->data.ainfo.shift, 4);
- break;
- }
+ if (tree->reg1 != treg)
+ x86_push_reg (s->code, treg);
+
+ x86_mov_reg_membase (s->code, treg, lreg, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class));
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+
+ /* this is a transparent proxy - remote the call */
+ if (treg != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ if (treg != X86_EDX)
+ x86_push_reg (s->code, X86_EDX);
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_reg (s->code, X86_ESP);
+ x86_push_imm (s->code, tree->data.fi.field);
+ x86_push_imm (s->code, tree->data.fi.klass);
+ x86_push_reg (s->code, lreg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_load_remote_field);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
+
+ if (treg != X86_EAX)
+ x86_mov_reg_reg (s->code, treg, X86_EAX, 4);
+
+ x86_pop_reg (s->code, X86_ECX);
+ if (treg != X86_EDX)
+ x86_pop_reg (s->code, X86_EDX);
+ if (treg != X86_EAX)
+ x86_pop_reg (s->code, X86_EAX);
+
+ x86_mov_reg_reg (s->code, tree->reg1, treg, 4);
+
+ br [1] = s->code; x86_jump8 (s->code, 0);
+
+ x86_patch (br [0], s->code);
+ if (tree->data.fi.klass->valuetype)
+ x86_lea_membase (s->code, tree->reg1, lreg,
+ tree->data.fi.field->offset - sizeof (MonoObject));
+ else
+ x86_lea_membase (s->code, tree->reg1, lreg, tree->data.fi.field->offset);
- PRINT_REG ("LDIND_U4", tree->reg1);
+ x86_patch (br [1], s->code);
+
+ if (tree->reg1 != treg)
+ x86_pop_reg (s->code, treg);
}
-reg: ADDR_L 5 {
- int offset = g_array_index (s->varinfo, MonoVarInfo, tree->data.i).offset;
+reg: ADDR_L {
+ int offset = VARINFO (s, tree->data.i).offset;
+
x86_lea_membase (s->code, tree->reg1, X86_EBP, offset);
PRINT_REG ("ADDR_L", tree->reg1);
+} cost {
+ MBCOND (VARINFO (data, tree->data.i).reg < 0);
+ return 5;
}
}
reg: CONV_I1 (reg) {
- x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, 0xff);
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, FALSE);
+}
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+reg: CONV_U1 (reg) {
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE);
}
reg: CONV_I2 (reg) {
- x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, 0xffff);
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, TRUE);
+}
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+reg: CONV_U2 (reg) {
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE);
}
reg: CONST_I4 1 {
PRINT_REG ("CONV_I4", tree->left->reg1);
}
+reg: CONV_U4 (reg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ PRINT_REG ("CONV_U4", tree->left->reg1);
+}
+
+reg: CONV_OVF_I4 (reg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ PRINT_REG ("CONV_OVF_I4", tree->left->reg1);
+}
+
reg: CONV_OVF_U4 (reg) {
/* Keep in sync with CONV_OVF_I4_UN below, they are the same on 32-bit machines */
x86_test_reg_imm (s->code, tree->left->reg1, 0x8000000);
- EMIT_COND_EXCEPTION (X86_CC_EQ, FALSE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
reg: CONV_OVF_I4_UN (reg) {
/* Keep in sync with CONV_OVF_U4 above, they are the same on 32-bit machines */
x86_test_reg_imm (s->code, tree->left->reg1, 0x8000000);
- EMIT_COND_EXCEPTION (X86_CC_EQ, FALSE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
reg: CONV_OVF_I1 (reg) {
/* probe value to be within -128 to 127 */
x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 127);
- EMIT_COND_EXCEPTION (X86_CC_LE, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, TRUE, "OverflowException");
x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, -128);
- EMIT_COND_EXCEPTION (X86_CC_GT, TRUE, get_exception_overflow ());
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, TRUE, "OverflowException");
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, FALSE);
}
reg: CONV_OVF_I1_UN (reg) {
/* probe values between 0 to 128 */
x86_test_reg_imm (s->code, tree->left->reg1, 0xffffff80);
- EMIT_COND_EXCEPTION (X86_CC_EQ, FALSE, get_exception_overflow ());
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE);
}
reg: CONV_OVF_U1 (reg) {
/* Keep in sync with CONV_OVF_U1_UN routine below, they are the same on 32-bit machines */
/* probe value to be within 0 to 255 */
x86_test_reg_imm (s->code, tree->left->reg1, 0xffffff00);
- EMIT_COND_EXCEPTION (X86_CC_EQ, FALSE, get_exception_overflow ());
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE);
}
reg: CONV_OVF_U1_UN (reg) {
/* Keep in sync with CONV_OVF_U1 routine above, they are the same on 32-bit machines */
/* probe value to be within 0 to 255 */
x86_test_reg_imm (s->code, tree->left->reg1, 0xffffff00);
- EMIT_COND_EXCEPTION (X86_CC_EQ, FALSE, get_exception_overflow ());
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE);
}
-reg: CONV_OVF_I2 (reg) {
+reg: CONV_OVF_I2 (reg) {
/* Probe value to be within -32768 and 32767 */
x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 32767);
- EMIT_COND_EXCEPTION (X86_CC_LE, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, TRUE, "OverflowException");
x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, -32768);
- // fixme: check branch
- g_assert_not_reached ();
- x86_branch8 (s->code, X86_CC_LT, -17, TRUE);
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GE, TRUE, "OverflowException");
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, TRUE);
}
reg: CONV_OVF_U2 (reg) {
/* Keep in sync with CONV_OVF_U2_UN below, they are the same on 32-bit machines */
/* Probe value to be within 0 and 65535 */
x86_test_reg_imm (s->code, tree->left->reg1, 0xffff0000);
- EMIT_COND_EXCEPTION (X86_CC_EQ, TRUE, get_exception_overflow ());
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException");
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE);
}
reg: CONV_OVF_U2_UN (reg) {
/* Keep in sync with CONV_OVF_U2 above, they are the same on 32-bit machines */
/* Probe value to be within 0 and 65535 */
x86_test_reg_imm (s->code, tree->left->reg1, 0xffff0000);
- EMIT_COND_EXCEPTION (X86_CC_EQ, FALSE, get_exception_overflow ());
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE);
}
reg: CONV_OVF_I2_UN (reg) {
/* Convert uint value into short, value within 0 and 32767 */
x86_test_reg_imm (s->code, tree->left->reg1, 0xffff8000);
- EMIT_COND_EXCEPTION (X86_CC_EQ, FALSE, get_exception_overflow ());
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE);
+}
+
+reg: MUL (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ unsigned int i, j, k, v;
+
+ v = tree->right->data.i;
+ for (i = 0, j = 1, k = 0xfffffffe; i < 32; i++, j = j << 1, k = k << 1) {
+ if (v & j)
+ break;
+ }
+
+ if (v < 0 || i == 32 || v & k) {
+ switch (v) {
+ case 3:
+ /* LEA r1, [r2 + r2*2] */
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 1);
+ break;
+ case 5:
+ /* LEA r1, [r2 + r2*4] */
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 2);
+ break;
+ case 6:
+ /* LEA r1, [r2 + r2*2] */
+ /* ADD r1, r1 */
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 1);
+ x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1);
+ break;
+ case 9:
+ /* LEA r1, [r2 + r2*8] */
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 3);
+ break;
+ case 10:
+ /* LEA r1, [r2 + r2*4] */
+ /* ADD r1, r1 */
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 2);
+ x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1);
+ break;
+ case 12:
+ /* LEA r1, [r2 + r2*2] */
+ /* SHL r1, 2 */
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 1);
+ x86_shift_reg_imm (s->code, X86_SHL, tree->reg1, 2);
+ break;
+ case 25:
+ /* LEA r1, [r2 + r2*4] */
+ /* LEA r1, [r1 + r1*4] */
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 2);
+ x86_lea_memindex (s->code, tree->reg1, tree->reg1, 0, tree->reg1, 2);
+ break;
+ case 100:
+ /* LEA r1, [r2 + r2*4] */
+ /* SHL r1, 2 */
+ /* LEA r1, [r1 + r1*4] */
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 2);
+ x86_shift_reg_imm (s->code, X86_SHL, tree->reg1, 2);
+ x86_lea_memindex (s->code, tree->reg1, tree->reg1, 0, tree->reg1, 2);
+ break;
+ default:
+ x86_imul_reg_reg_imm (s->code, tree->reg1, tree->left->reg1, tree->right->data.i);
+ break;
+ }
+ } else {
+ x86_shift_reg_imm (s->code, X86_SHL, tree->left->reg1, i);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ }
}
reg: MUL (reg, reg) {
reg: MUL_OVF (reg, reg) {
x86_imul_reg_reg (s->code, tree->left->reg1, tree->right->reg1);
- EMIT_COND_EXCEPTION (X86_CC_NO, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
reg: MUL_OVF_UN (reg, reg) {
- //fixme: implement me
- g_assert_not_reached ();
-}
-
-reg: DIV (reg, reg) {
- g_assert (tree->right->reg1 != X86_EAX);
-
+ mono_assert (tree->right->reg1 != X86_EAX);
+
if (tree->left->reg1 != X86_EAX)
x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
- x86_cdq (s->code);
- x86_div_reg (s->code, tree->right->reg1, TRUE);
+ x86_mul_reg (s->code, tree->right->reg1, FALSE);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
- g_assert (tree->reg1 == X86_EAX &&
- tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
}
-reg: DIV_UN (reg, reg) {
- g_assert (tree->right->reg1 != X86_EAX);
+reg: DIV (reg, CONST_I4) {
+ unsigned int i, j, k, v;
- if (tree->left->reg1 != X86_EAX)
+ v = tree->right->data.i;
+ for (i = 0, j = 1, k = 0xfffffffe; i < 32; i++, j = j << 1, k = k << 1) {
+ if (v & j)
+ break;
+ }
+
+ x86_shift_reg_imm (s->code, X86_SAR, tree->left->reg1, i);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+
+} cost {
+ unsigned int i, j, k, v;
+
+ if (v < 0)
+ return MBMAXCOST;
+
+ v = tree->right->data.i;
+ for (i = 0, j = 1, k = 0xfffffffe; i < 32; i++, j = j << 1, k = k << 1) {
+ if (v & j)
+ break;
+ }
+
+ if (i == 32 || v & k)
+ return MBMAXCOST;
+
+ return 0;
+
+}
+
+reg: DIV (reg, reg) {
+ mono_assert (tree->right->reg1 != X86_EAX);
+
+ if (tree->left->reg1 != X86_EAX)
x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
x86_cdq (s->code);
+ x86_div_reg (s->code, tree->right->reg1, TRUE);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+reg: DIV_UN (reg, CONST_I4) {
+ unsigned int i, j, k, v;
+ double f, r;
+
+ v = tree->right->data.i;
+ for (i = 0, j = 1, k = 0xfffffffe; i < 32; i++, j = j << 1, k = k << 1) {
+ if (v & j)
+ break;
+ }
+
+ if (i == 32 || v & k) {
+ for (i = 32, j = 0x80000000; --i >= 0; j >>= 1) {
+ if (v & j) break;
+ }
+
+ /* k = 32 + number of significant bits in v - 1 */
+ k = 32 + i;
+
+ f = 1.0f / v;
+ for (i = 0; i < k; i++) f *= 2.0f;
+
+
+ r = f - floor(f);
+
+ if (r == 0) {
+ x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg1, k - 32);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ } else if (r < 0.5f) {
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+ x86_mov_reg_imm (s->code, X86_EDX, (guint32) floor(f));
+ /* x86_inc_reg (s->code, X86_EAX); */
+ /* INC is faster but we have to check for overflow. */
+ x86_alu_reg_imm (s->code, X86_ADD, X86_EAX, 1);
+ x86_branch8(s->code, X86_CC_C, 2, FALSE);
+ x86_mul_reg (s->code, X86_EDX, FALSE);
+ x86_shift_reg_imm (s->code, X86_SHR, X86_EDX, k - 32);
+ if (tree->reg1 != X86_EDX)
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EDX, 4);
+ } else {
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+ x86_mov_reg_imm (s->code, X86_EDX, (guint32) ceil(f));
+ x86_mul_reg (s->code, X86_EDX, FALSE);
+ x86_shift_reg_imm (s->code, X86_SHR, X86_EDX, k - 32);
+ if (tree->reg1 != X86_EDX)
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EDX, 4);
+ }
+ } else {
+ x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg1, i);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ }
+
+}
+
+reg: DIV_UN (reg, reg) {
+ mono_assert (tree->right->reg1 != X86_EAX);
+
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+
+ x86_mov_reg_imm (s->code, X86_EDX, 0);
x86_div_reg (s->code, tree->right->reg1, FALSE);
- g_assert (tree->reg1 == X86_EAX &&
- tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
}
reg: REM (reg, reg) {
- g_assert (tree->right->reg1 != X86_EAX);
+ mono_assert (tree->right->reg1 != X86_EAX);
+ mono_assert (tree->right->reg1 != X86_EDX);
if (tree->left->reg1 != X86_EAX)
x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
+ /* sign extend to 64bit in EAX/EDX */
x86_cdq (s->code);
x86_div_reg (s->code, tree->right->reg1, TRUE);
x86_mov_reg_reg (s->code, X86_EAX, X86_EDX, 4);
- g_assert (tree->reg1 == X86_EAX &&
- tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
}
reg: REM_UN (reg, reg) {
- g_assert (tree->right->reg1 != X86_EAX);
+ mono_assert (tree->right->reg1 != X86_EAX);
+ mono_assert (tree->right->reg1 != X86_EDX);
if (tree->left->reg1 != X86_EAX)
x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
- x86_cdq (s->code);
+ /* zero extend to 64bit in EAX/EDX */
+ x86_mov_reg_imm (s->code, X86_EDX, 0);
x86_div_reg (s->code, tree->right->reg1, FALSE);
x86_mov_reg_reg (s->code, X86_EAX, X86_EDX, 4);
- g_assert (tree->reg1 == X86_EAX &&
- tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
}
reg: ADD (reg, CONST_I4) "MB_USE_OPT1(0)" {
}
+reg: ADD (reg, LDIND_I4 (ADDR_L)) {
+ int treg = VARINFO (s, tree->right->left->data.i).reg;
+
+ x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, treg);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+} cost {
+ MBCOND ((VARINFO (data, tree->right->left->data.i).reg >= 0));
+ return 0;
+}
+
reg: ADD (reg, reg) {
x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
reg: ADD_OVF (reg, reg) {
x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
- EMIT_COND_EXCEPTION (X86_CC_NO, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
reg: ADD_OVF_UN (reg, reg) {
x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
- EMIT_COND_EXCEPTION (X86_CC_NC, FALSE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
+reg: SUB (reg, LDIND_I4 (ADDR_L)) {
+ int treg = VARINFO (s, tree->right->left->data.i).reg;
+
+ x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, treg);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+} cost {
+ MBCOND ((VARINFO (data, tree->right->left->data.i).reg >= 0));
+ return 0;
+}
+
reg: SUB (reg, reg) {
x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
reg: SUB_OVF (reg, reg) {
x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
- EMIT_COND_EXCEPTION (X86_CC_NO, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
reg: SUB_OVF_UN (reg, reg) {
x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
- EMIT_COND_EXCEPTION (X86_CC_NC, FALSE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
-reg: CEQ (reg, reg) {
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- x86_set_reg (s->code, X86_CC_EQ, tree->reg1, TRUE);
- x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
-}
+reg: CSET (cflags) {
-reg: CGT (reg, reg) {
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- x86_set_reg (s->code, X86_CC_GT, tree->reg1, TRUE);
- x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
-}
+ switch (tree->data.i) {
+ case CEE_CEQ:
+ x86_set_reg (s->code, X86_CC_EQ, tree->reg1, TRUE);
+ break;
+ case CEE_CGT:
+ x86_set_reg (s->code, X86_CC_GT, tree->reg1, TRUE);
+ break;
+ case CEE_CGT_UN:
+ x86_set_reg (s->code, X86_CC_GT, tree->reg1, FALSE);
+ break;
+ case CEE_CLT:
+ x86_set_reg (s->code, X86_CC_LT, tree->reg1, TRUE);
+ break;
+ case CEE_CLT_UN:
+ x86_set_reg (s->code, X86_CC_LT, tree->reg1, FALSE);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
-reg: CGT_UN (reg, reg) {
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- x86_set_reg (s->code, X86_CC_GT, tree->reg1, FALSE);
x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
}
-reg: CLT (reg, reg) {
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- x86_set_reg (s->code, X86_CC_LT, tree->reg1, TRUE);
- x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
-}
+reg: AND (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, tree->right->data.i);
-reg: CLT_UN (reg, reg) {
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- x86_set_reg (s->code, X86_CC_LT, tree->reg1, FALSE);
- x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
reg: AND (reg, reg) {
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
+reg: OR (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_OR, tree->left->reg1, tree->right->data.i);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
reg: OR (reg, reg) {
x86_alu_reg_reg (s->code, X86_OR, tree->left->reg1, tree->right->reg1);
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
+reg: XOR (reg, CONST_I4) "MB_USE_OPT1(0)" {
+ x86_alu_reg_imm (s->code, X86_XOR, tree->left->reg1, tree->right->data.i);
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
reg: XOR (reg, reg) {
x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg1, tree->right->reg1);
}
reg: SHL (reg, reg) {
- if (tree->right->reg1 != X86_ECX)
+ if (tree->right->reg1 != X86_ECX) {
+ x86_push_reg (s->code, X86_ECX);
x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+ }
x86_shift_reg (s->code, X86_SHL, tree->left->reg1);
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- g_assert (tree->reg1 != X86_ECX &&
- tree->left->reg1 != X86_ECX);
+ if (tree->right->reg1 != X86_ECX)
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 != X86_ECX &&
+ tree->left->reg1 != X86_ECX);
}
reg: SHR (reg, CONST_I4) {
}
reg: SHR (reg, reg) {
- if (tree->right->reg1 != X86_ECX)
+ if (tree->right->reg1 != X86_ECX) {
+ x86_push_reg (s->code, X86_ECX);
x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+ }
+
x86_shift_reg (s->code, X86_SAR, tree->left->reg1);
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- g_assert (tree->reg1 != X86_ECX &&
- tree->left->reg1 != X86_ECX);
+ if (tree->right->reg1 != X86_ECX)
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 != X86_ECX &&
+ tree->left->reg1 != X86_ECX);
}
reg: SHR_UN (reg, CONST_I4) {
}
reg: SHR_UN (reg, reg) {
- if (tree->right->reg1 != X86_ECX)
+ if (tree->right->reg1 != X86_ECX) {
+ x86_push_reg (s->code, X86_ECX);
x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+ }
+
x86_shift_reg (s->code, X86_SHR, tree->left->reg1);
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- g_assert (tree->reg1 != X86_ECX &&
- tree->left->reg1 != X86_ECX);
+ if (tree->right->reg1 != X86_ECX)
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 != X86_ECX &&
+ tree->left->reg1 != X86_ECX);
+}
+
+reg: LDSFLDA (CONST_I4) {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_imm (s->code, tree->left->data.i);
+ x86_push_imm (s->code, tree->data.klass);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldsflda);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
}
# array support
reg: LDLEN (reg) {
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 0);
- EMIT_COND_EXCEPTION (X86_CC_NE, TRUE, get_exception_null_reference ());
-
x86_mov_reg_membase (s->code, tree->reg1, tree->left->reg1,
- G_STRUCT_OFFSET (MonoArray, bounds), 4);
- x86_mov_reg_membase (s->code, tree->reg1, tree->reg1,
- G_STRUCT_OFFSET (MonoArrayBounds, length), 4);
+ G_STRUCT_OFFSET (MonoArray, max_length), 4);
}
-#reg: LDELEMA (reg, reg) {
-# x86_imul_reg_reg_imm (s->code, tree->right->reg1, tree->right->reg1, tree->data.i);
-# x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->right->reg1);
-# x86_alu_reg_imm (s->code, X86_ADD, tree->reg1, G_STRUCT_OFFSET (MonoArray, vector));
-#}
+reg: LDELEMA (reg, CONST_I4) {
+ int ind;
-reg: TOSTRING (reg) {
- guint8 *start = s->code, *l1, *le;
- int i;
+ x86_alu_membase_imm (s->code, X86_CMP, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length), tree->right->data.i);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "IndexOutOfRangeException");
+
+ ind = tree->data.i * tree->right->data.i + G_STRUCT_OFFSET (MonoArray, vector);
+
+ x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, ind);
- tree->is_jump = TRUE;
- l1 = le = NULL;
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- for (i = 0; i < 2; i++) {
- s->code = start;
+}
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 0);
- x86_branch8 (s->code, X86_CC_EQ, le - l1, FALSE);
- l1 = s->code;
+reg: LDELEMA (reg, reg) {
- if (tree->reg1 != X86_EAX)
- x86_push_reg (s->code, X86_EAX);
- x86_push_reg (s->code, X86_ECX);
- x86_push_reg (s->code, X86_EDX);
+ x86_alu_reg_membase (s->code, X86_CMP, tree->right->reg1, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length));
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, FALSE, "IndexOutOfRangeException");
- x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, mono_string_new);
- x86_call_reg (s->code, X86_EAX);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer));
+ if (tree->data.i == 1 || tree->data.i == 2 ||
+ tree->data.i == 4 || tree->data.i == 8) {
+ static int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
+ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1,
+ G_STRUCT_OFFSET (MonoArray, vector), tree->right->reg1,
+ fast_log2 [tree->data.i]);
+ } else {
+ x86_imul_reg_reg_imm (s->code, tree->right->reg1, tree->right->reg1, tree->data.i);
+ x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->right->reg1);
+ x86_alu_reg_imm (s->code, X86_ADD, tree->reg1, G_STRUCT_OFFSET (MonoArray, vector));
+ }
+}
- x86_pop_reg (s->code, X86_EDX);
- x86_pop_reg (s->code, X86_ECX);
- if (tree->reg1 != X86_EAX) {
- x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
- x86_pop_reg (s->code, X86_EAX);
- }
+reg: LDSTR {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
- le = s->code;
+ x86_push_imm (s->code, tree->data.p);
+ x86_push_imm (s->code, s->method->klass->image);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldstr_wrapper);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
}
+
+ PRINT_REG ("LDSTR", tree->reg1);
}
reg: NEWARR (reg) {
x86_push_reg (s->code, tree->left->reg1);
x86_push_imm (s->code, tree->data.p);
- x86_mov_reg_imm (s->code, X86_EAX, mono_array_new);
- x86_call_reg (s->code, X86_EAX);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer) + 4);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_array_new_wrapper);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
x86_pop_reg (s->code, X86_EDX);
x86_pop_reg (s->code, X86_ECX);
PRINT_REG ("NEWARR", tree->reg1);
}
+reg: NEWARR_SPEC (reg) {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_push_imm (s->code, tree->data.p);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_array_new_specific);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
+
+ PRINT_REG ("NEWARR_SPEC", tree->reg1);
+}
+
reg: NEWOBJ {
if (tree->reg1 != X86_EAX)
x86_push_reg (s->code, X86_EAX);
x86_push_reg (s->code, X86_EDX);
x86_push_imm (s->code, tree->data.klass);
- x86_mov_reg_imm (s->code, X86_EAX, mono_object_new);
- x86_call_reg (s->code, X86_EAX);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer));
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_object_new_wrapper);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
x86_pop_reg (s->code, X86_EDX);
x86_pop_reg (s->code, X86_ECX);
PRINT_REG ("NEWOBJ", tree->reg1);
}
-reg: NEWSTRUCT {
- int size = tree->data.i;
- int sa;
-
- g_assert (size > 0);
+reg: NEWOBJ_SPEC {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
- sa = size + 3;
- sa &= ~3;
+ x86_push_imm (s->code, tree->data.p);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_object_new_specific);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
- x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, sa);
- x86_mov_reg_reg (s->code, tree->reg1, X86_ESP, 4);
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
+ PRINT_REG ("NEWOBJ_SPEC", tree->reg1);
}
-reg: UNBOX (reg) {
- if (tree->reg1 != tree->left->reg1)
+reg: OBJADDR (reg) {
+ if (tree->left->reg1 != tree->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
-
- x86_alu_reg_imm (s->code, X86_CMP, tree->reg1, 0);
- EMIT_COND_EXCEPTION (X86_CC_NE, TRUE, get_exception_null_reference ());
- x86_alu_membase_imm (s->code, X86_CMP, tree->reg1, 0, ((int)(tree->data.klass)));
- EMIT_COND_EXCEPTION (X86_CC_EQ, TRUE, get_exception_invalid_cast ());
- x86_alu_reg_imm (s->code, X86_ADD, tree->reg1, sizeof (MonoObject));
}
-reg: CASTCLASS (reg) {
- guint8 *start = s->code, *l1, *l2, *le;
- int i;
+reg: VTADDR (ADDR_L) {
+ int offset = VARINFO (s, tree->left->data.i).offset;
- tree->is_jump = TRUE;
- l1 = l2 = le = NULL;
-
- for (i = 0; i < 2; i++) {
- s->code = start;
-
- if (tree->reg1 != X86_EAX)
- x86_push_reg (s->code, X86_EAX);
-
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 0);
- x86_branch8 (s->code, X86_CC_EQ, le - l2, FALSE);
- l2 = s->code;
- x86_push_reg (s->code, X86_ECX);
- x86_push_reg (s->code, X86_EDX);
-
- x86_push_imm (s->code, tree->data.klass);
- x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, mono_object_isinst);
- x86_call_reg (s->code, X86_EAX);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
-
- x86_pop_reg (s->code, X86_EDX);
- x86_pop_reg (s->code, X86_ECX);
+ x86_lea_membase (s->code, tree->reg1, X86_EBP, offset);
+}
- x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0);
- EMIT_COND_EXCEPTION (X86_CC_NE, TRUE, get_exception_invalid_cast ());
- if (tree->reg1 != X86_EAX) {
- x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
- x86_pop_reg (s->code, X86_EAX);
- }
+stmt: FREE (reg) {
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, g_free);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
+}
- le = s->code;
+stmt: PROC2 (reg, reg) {
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+}
- }
+stmt: PROC3 (reg, CPSRC (reg, reg)) {
+ x86_push_reg (s->code, tree->right->right->reg1);
+ x86_push_reg (s->code, tree->right->left->reg1);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
}
-reg: ISINST (reg) {
+reg: FUNC1 (reg) {
if (tree->reg1 != X86_EAX)
x86_push_reg (s->code, X86_EAX);
x86_push_reg (s->code, X86_ECX);
x86_push_reg (s->code, X86_EDX);
- x86_push_imm (s->code, tree->data.klass);
x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, mono_object_isinst);
- x86_call_reg (s->code, X86_EAX);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer));
x86_pop_reg (s->code, X86_EDX);
x86_pop_reg (s->code, X86_ECX);
x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
x86_pop_reg (s->code, X86_EAX);
}
+}
+
+reg: LOCALLOC (reg) {
+ int offset = 0;
+ /* size must be aligned to MONO_FRAME_ALIGNMENT bytes */
+ x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, MONO_FRAME_ALIGNMENT - 1);
+ x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, ~(MONO_FRAME_ALIGNMENT - 1));
+
+ /* allocate space on stack */
+ x86_alu_reg_reg (s->code, X86_SUB, X86_ESP, tree->left->reg1);
+
+ if (tree->data.i) {
+ /* initialize with zero */
+ if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX) {
+ x86_push_reg (s->code, X86_EAX);
+ offset += 4;
+ }
+ if (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX) {
+ x86_push_reg (s->code, X86_ECX);
+ offset += 4;
+ }
+ if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI) {
+ x86_push_reg (s->code, X86_EDI);
+ offset += 4;
+ }
+
+ x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg1, 2);
+ if (tree->left->reg1 != X86_ECX)
+ x86_mov_reg_imm (s->code, X86_ECX, tree->left->reg1);
+ x86_alu_reg_reg (s->code, X86_XOR, X86_EAX, X86_EAX);
+
+ x86_lea_membase (s->code, X86_EDI, X86_ESP, offset);
+ x86_cld (s->code);
+ x86_prefix (s->code, X86_REP_PREFIX);
+ x86_stosl (s->code);
+
+ if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI)
+ x86_pop_reg (s->code, X86_EDI);
+ if (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX)
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX)
+ x86_pop_reg (s->code, X86_EAX);
+ }
+
+ x86_mov_reg_reg (s->code, tree->reg1, X86_ESP, 4);
}
-stmt: INITOBJ (reg) {
- int i, j;
+reg: UNBOX (reg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- i = tree->data.i;
+ x86_push_reg (s->code, tree->reg1);
+ x86_mov_reg_membase (s->code, tree->reg1, tree->reg1, 0, 4);
+ x86_mov_reg_membase (s->code, tree->reg1, tree->reg1, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, tree->reg1,
+ G_STRUCT_OFFSET (MonoClass, element_class), ((int)(tree->data.klass->element_class)));
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "InvalidCastException");
+ x86_pop_reg (s->code, tree->reg1);
+ x86_alu_reg_imm (s->code, X86_ADD, tree->reg1, sizeof (MonoObject));
+}
- if (i == 1 || i == 2 || i == 4) {
- int t = X86_ECX;
+reg: CASTCLASS (reg) {
+ MonoClass *klass = tree->data.klass;
+ guint8 *br [2];
+ int lreg = tree->left->reg1;
+
+ x86_push_reg (s->code, lreg);
+ x86_test_reg_reg (s->code, lreg, lreg);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE);
+
+ if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ /* lreg = obj->vtable */
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+
+ x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoVTable, max_interface_id),
+ klass->interface_id);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GE, FALSE, "InvalidCastException");
+ /* lreg = obj->vtable->interface_offsets */
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
+ x86_alu_membase_imm (s->code, X86_CMP, lreg, klass->interface_id << 2, 0);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NE, FALSE, "InvalidCastException");
+ } else {
- if (tree->left->reg1 != X86_EAX)
- t = X86_EAX;
+ /* lreg = obj->vtable */
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ /* lreg = obj->vtable->klass */
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
- x86_push_reg (s->code, t);
- x86_alu_reg_reg (s->code, X86_XOR, t, t);
+ if (klass->rank) {
- switch (tree->data.i) {
- case 4:
- x86_mov_regp_reg (s->code, tree->left->reg1, t, 4);
- break;
- case 2:
- x86_mov_regp_reg (s->code, tree->left->reg1, t, 4);
- break;
- case 1:
- x86_mov_regp_reg (s->code, tree->left->reg1, t, 4);
- break;
+ x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoClass, rank), klass->rank);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "InvalidCastException");
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, element_class), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4);
+ x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->element_class->baseval);
+ x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->element_class->diffval);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, FALSE, "InvalidCastException");
+
+ } else {
+
+ if (klass->marshalbyref) {
+ /* check for transparent_proxy */
+ x86_alu_reg_imm (s->code, X86_CMP, lreg, (int)mono_defaults.transparent_proxy_class);
+ br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+
+ /* lreg = obj */
+ x86_mov_reg_membase (s->code, lreg, X86_ESP, 0, 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoTransparentProxy,
+ klass), 4);
+
+ x86_patch (br [1], s->code);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4);
+ x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->baseval);
+ x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->diffval);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, FALSE, "InvalidCastException");
}
- x86_pop_reg (s->code, t);
+ }
+
+ x86_patch (br [0], s->code);
+ x86_pop_reg (s->code, tree->reg1);
+}
+
+reg: ISINST (reg) {
+ MonoClass *klass = tree->data.klass;
+ guint8 *br [3];
+ int lreg = tree->left->reg1;
+
+ x86_push_reg (s->code, lreg);
+ x86_test_reg_reg (s->code, lreg, lreg);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE);
+
+ if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
+ /* lreg = obj->vtable */
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+
+ x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoVTable, max_interface_id),
+ klass->interface_id);
+ br [1] = s->code; x86_branch8 (s->code, X86_CC_LT, 0, FALSE);
+ /* lreg = obj->vtable->interface_offsets */
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
+ x86_alu_membase_imm (s->code, X86_CMP, lreg, klass->interface_id << 2, 0);
+ br [2] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_patch (br [1], s->code);
+ x86_mov_membase_imm (s->code, X86_ESP, 0, 0, 4);
+ x86_patch (br [2], s->code);
+
+ } else {
+ /* lreg = obj->vtable */
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ /* lreg = obj->vtable->klass */
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+
+ if (klass->rank) {
+
+ x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoClass, rank), klass->rank);
+ br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, element_class), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4);
+ x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->element_class->baseval);
+ x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->element_class->diffval);
+ br [2] = s->code; x86_branch8 (s->code, X86_CC_LE, 0, FALSE);
+ x86_patch (br [1], s->code);
+ x86_mov_membase_imm (s->code, X86_ESP, 0, 0, 4);
+ x86_patch (br [2], s->code);
+
+ } else {
+
+ if (klass->marshalbyref) {
+ /* check for transparent_proxy */
+ x86_alu_reg_imm (s->code, X86_CMP, lreg, (int)mono_defaults.transparent_proxy_class);
+ br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+
+ /* lreg = obj */
+ x86_mov_reg_membase (s->code, lreg, X86_ESP, 0, 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoTransparentProxy,
+ klass), 4);
+ x86_patch (br [1], s->code);
+ }
+
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4);
+ x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->baseval);
+ x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->diffval);
+ br [2] = s->code; x86_branch8 (s->code, X86_CC_LE, 0, FALSE);
+ x86_mov_membase_imm (s->code, X86_ESP, 0, 0, 4);
+ x86_patch (br [2], s->code);
+ }
+ }
+
+ x86_patch (br [0], s->code);
+ x86_pop_reg (s->code, tree->reg1);
+}
+
+stmt: INITOBJ (reg) {
+ int i, j;
+
+ if (!(i = tree->data.i))
+ return;
+
+ if (i == 1 || i == 2 || i == 4) {
+ x86_mov_membase_imm (s->code, tree->left->reg1, 0, 0, i);
return;
}
i = tree->data.i / 4;
j = tree->data.i % 4;
- x86_push_reg (s->code, X86_EAX);
-
if (tree->left->reg1 != X86_EDI) {
x86_push_reg (s->code, X86_EDI);
x86_mov_reg_reg (s->code, X86_EDI, tree->left->reg1, 4);
}
if (i) {
- x86_push_reg (s->code, X86_ECX);
x86_alu_reg_reg (s->code, X86_XOR, X86_EAX, X86_EAX);
x86_mov_reg_imm (s->code, X86_ECX, i);
x86_cld (s->code);
x86_prefix (s->code, X86_REP_PREFIX);
x86_stosl (s->code);
- x86_pop_reg (s->code, X86_ECX);
+
+ for (i = 0; i < j; i++)
+ x86_stosb (s->code);
+
+ } else {
+ g_assert (j == 3);
+ x86_mov_membase_imm (s->code, X86_EDI, 0, 0, 2);
+ x86_mov_membase_imm (s->code, X86_EDI, 2, 0, 1);
}
- for (i = 0; i < j; i++)
- x86_stosb (s->code);
if (tree->left->reg1 != X86_EDI)
x86_pop_reg (s->code, X86_EDI);
-
- x86_pop_reg (s->code, X86_EAX);
}
-stmt: NOP
+stmt: CPBLK (reg, CPSRC (reg, CONST_I4)) {
+ int dest_reg = tree->left->reg1;
+ int source_reg = tree->right->left->reg1;
+ int count = tree->right->right->data.i;
+ int sreg = dest_reg != X86_EAX ? X86_EAX : X86_EDX;
+ int spill_pos = 0, dest_offset = 0, source_offset = 0;
+ int save_esi = FALSE, save_edi = FALSE;
-stmt: POP (reg)
+ // TODO: handle unaligned. prefix
-stmt: BR {
- gint32 addr = tree->data.bb->addr - tree->addr - 5;
- tree->is_jump = TRUE;
+ switch (count) {
+ case 0:
+ break;
+ case 1:
+ x86_mov_reg_membase (s->code, sreg, source_reg, 0, 1);
+ x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 1);
+ break;
+ case 2:
+ x86_mov_reg_membase (s->code, sreg, source_reg, 0, 2);
+ x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 2);
+ break;
+ case 3:
+ x86_mov_reg_membase (s->code, sreg, source_reg, 0, 2);
+ x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 2);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 2, 1);
+ x86_mov_membase_reg (s->code, dest_reg, 2, sreg, 1);
+ break;
+ case 4:
+ x86_mov_reg_membase (s->code, sreg, source_reg, 0, 4);
+ x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 4);
+ break;
+ case 5:
+ x86_mov_reg_membase (s->code, sreg, source_reg, 0, 4);
+ x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 4);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 4, 1);
+ x86_mov_membase_reg (s->code, dest_reg, 4, sreg, 1);
+ break;
+ case 6:
+ x86_mov_reg_membase (s->code, sreg, source_reg, 0, 4);
+ x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 4);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 4, 2);
+ x86_mov_membase_reg (s->code, dest_reg, 4, sreg, 2);
+ break;
+ case 7:
+ x86_mov_reg_membase (s->code, sreg, source_reg, 0, 4);
+ x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 4);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 4, 2);
+ x86_mov_membase_reg (s->code, dest_reg, 4, sreg, 2);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 6, 1);
+ x86_mov_membase_reg (s->code, dest_reg, 6, sreg, 1);
+ break;
+ case 8:
+ x86_fild_membase (s->code, source_reg, 0, TRUE);
+ x86_fist_pop_membase (s->code, dest_reg, 0, TRUE);
+ break;
+ case 9:
+ x86_fild_membase (s->code, source_reg, 0, TRUE);
+ x86_fist_pop_membase (s->code, dest_reg, 0, TRUE);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 8, 1);
+ x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 1);
+ break;
+ case 10:
+ x86_fild_membase (s->code, source_reg, 0, TRUE);
+ x86_fist_pop_membase (s->code, dest_reg, 0, TRUE);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 8, 2);
+ x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 2);
+ break;
+ case 11:
+ x86_fild_membase (s->code, source_reg, 0, TRUE);
+ x86_fist_pop_membase (s->code, dest_reg, 0, TRUE);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 8, 2);
+ x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 2);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 10, 1);
+ x86_mov_membase_reg (s->code, dest_reg, 10, sreg, 1);
+ break;
+ case 12:
+ x86_fild_membase (s->code, source_reg, 0, TRUE);
+ x86_fist_pop_membase (s->code, dest_reg, 0, TRUE);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 8, 4);
+ x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 4);
+ break;
+ case 13:
+ x86_fild_membase (s->code, source_reg, 0, TRUE);
+ x86_fist_pop_membase (s->code, dest_reg, 0, TRUE);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 8, 4);
+ x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 4);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 12, 1);
+ x86_mov_membase_reg (s->code, dest_reg, 12, sreg, 1);
+ break;
+ case 14:
+ x86_fild_membase (s->code, source_reg, 0, TRUE);
+ x86_fist_pop_membase (s->code, dest_reg, 0, TRUE);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 8, 4);
+ x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 4);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 12, 2);
+ x86_mov_membase_reg (s->code, dest_reg, 12, sreg, 2);
+ break;
+ case 15:
+ x86_fild_membase (s->code, source_reg, 0, TRUE);
+ x86_fist_pop_membase (s->code, dest_reg, 0, TRUE);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 8, 4);
+ x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 4);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 12, 2);
+ x86_mov_membase_reg (s->code, dest_reg, 12, sreg, 2);
+ x86_mov_reg_membase (s->code, sreg, source_reg, 14, 1);
+ x86_mov_membase_reg (s->code, dest_reg, 14, sreg, 1);
+ break;
+ default:
+ g_assert (count > 15);
+
+ if (dest_reg != X86_ESI && source_reg != X86_ESI &&
+ mono_regset_reg_used (s->rs, X86_ESI))
+ save_esi = TRUE;
+ if (dest_reg != X86_EDI && source_reg != X86_EDI &&
+ mono_regset_reg_used (s->rs, X86_EDI))
+ save_edi = TRUE;
+
+ if (save_esi)
+ x86_push_reg (s->code, X86_ESI);
+ if (save_edi)
+ x86_push_reg (s->code, X86_EDI);
+
+ if (dest_reg == X86_ESI) {
+ dest_offset = ++spill_pos;
+ }
+ if (source_reg == X86_EDI) {
+ source_offset = ++spill_pos;
+ }
+
+ if (source_offset)
+ x86_push_reg (s->code, source_reg);
+ if (dest_offset)
+ x86_push_reg (s->code, dest_reg);
- x86_jump32 (s->code, addr);
-}
+ if (source_reg != X86_ESI) {
+ if (source_offset)
+ x86_mov_reg_membase (s->code, X86_ESI, X86_ESP, (source_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_ESI, source_reg, 4);
+ }
+ if (dest_reg != X86_EDI) {
+ if (dest_offset)
+ x86_mov_reg_membase (s->code, X86_EDI, X86_ESP, (dest_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_EDI, dest_reg, 4);
+ }
-stmt: BLT (reg, reg) 1 {
- gint32 offset;
+ x86_mov_reg_imm (s->code, X86_ECX, count >> 2);
+ x86_cld (s->code);
+ x86_prefix (s->code, X86_REP_PREFIX);
+ x86_movsd (s->code);
- tree->is_jump = TRUE;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - offset, TRUE);
-}
+ switch (count & 3) {
+ case 1:
+ x86_mov_reg_membase (s->code, sreg, X86_ESI, 0, 1);
+ x86_mov_membase_reg (s->code, X86_EDI, 0, sreg, 1);
+ break;
+ case 2:
+ x86_mov_reg_membase (s->code, sreg, X86_ESI, 0, 2);
+ x86_mov_membase_reg (s->code, X86_EDI, 0, sreg, 2);
+ break;
+ case 3:
+ x86_mov_reg_membase (s->code, sreg, X86_ESI, 0, 2);
+ x86_mov_membase_reg (s->code, X86_EDI, 0, sreg, 2);
+ x86_mov_reg_membase (s->code, sreg, X86_ESI, 2, 1);
+ x86_mov_membase_reg (s->code, X86_EDI, 2, sreg, 1);
+ break;
+ default:
+ break;
+ }
-stmt: BLT (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, spill_pos<<2);
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - offset, TRUE);
+ if (save_edi)
+ x86_pop_reg (s->code, X86_EDI);
+ if (save_esi)
+ x86_pop_reg (s->code, X86_ESI);
+
+ break;
+ }
+} cost {
+ MBCOND (mono_inline_memcpy);
+ return 0;
}
-stmt: BLT_UN (reg, reg) 1 {
- gint32 offset;
+stmt: CPBLK (reg, CPSRC (reg, reg)) {
+ int dest_reg = tree->left->reg1;
+ int source_reg = tree->right->left->reg1;
+ int size_reg = tree->right->right->reg1;
+ int spill_pos = 0, size_offset = 0, dest_offset = 0, source_offset = 0;
+ int save_esi = FALSE, save_edi = FALSE;
+
+ if (!mono_inline_memcpy) {
+ x86_push_reg (s->code, size_reg);
+ x86_push_reg (s->code, source_reg);
+ x86_push_reg (s->code, dest_reg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, memmove);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
+ } else {
+ if (dest_reg != X86_ESI && source_reg != X86_ESI && size_reg != X86_ESI &&
+ mono_regset_reg_used (s->rs, X86_ESI))
+ save_esi = TRUE;
+ if (dest_reg != X86_EDI && source_reg != X86_EDI && size_reg != X86_EDI &&
+ mono_regset_reg_used (s->rs, X86_EDI))
+ save_edi = TRUE;
+
+ if (save_esi)
+ x86_push_reg (s->code, X86_ESI);
+ if (save_edi)
+ x86_push_reg (s->code, X86_EDI);
+
+ if (size_reg == X86_EDI || size_reg == X86_ESI) {
+ size_offset = ++spill_pos;
+ }
+ if (dest_reg == X86_ECX || dest_reg == X86_ESI) {
+ dest_offset = ++spill_pos;
+ }
+ if (source_reg == X86_ECX || source_reg == X86_EDI) {
+ source_offset = ++spill_pos;
+ }
+
+ if (source_offset)
+ x86_push_reg (s->code, source_reg);
+ if (dest_offset)
+ x86_push_reg (s->code, dest_reg);
+ if (size_offset)
+ x86_push_reg (s->code, size_reg);
+
+ if (source_reg != X86_ESI) {
+ if (source_offset)
+ x86_mov_reg_membase (s->code, X86_ESI, X86_ESP, (source_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_ESI, source_reg, 4);
+ }
+ if (dest_reg != X86_EDI) {
+ if (dest_offset)
+ x86_mov_reg_membase (s->code, X86_EDI, X86_ESP, (dest_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_EDI, dest_reg, 4);
+ }
+ if (size_reg != X86_ECX) {
+ if (size_offset)
+ x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, (size_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_ECX, size_reg, 4);
+ }
- tree->is_jump = TRUE;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - offset, FALSE);
-}
+ x86_push_reg (s->code, X86_ECX);
+ x86_shift_reg_imm (s->code, X86_SHR, X86_ECX, 2);
-stmt: BLT_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
+ x86_cld (s->code);
+
+ // move whole dwords first
+ x86_prefix (s->code, X86_REP_PREFIX);
+ x86_movsd (s->code);
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - offset, FALSE);
-}
+ x86_pop_reg (s->code, X86_ECX);
+ x86_alu_reg_imm (s->code, X86_AND, X86_ECX, 3);
-stmt: BGT (reg, reg) 1 {
- gint32 offset;
+ // move remaining bytes (if any)
+ x86_prefix (s->code, X86_REP_PREFIX);
+ x86_movsb (s->code);
- tree->is_jump = TRUE;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - offset, TRUE);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, spill_pos<<2);
+
+ if (save_edi)
+ x86_pop_reg (s->code, X86_EDI);
+ if (save_esi)
+ x86_pop_reg (s->code, X86_ESI);
+ }
}
-stmt: BGT (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
+stmt: INITBLK (reg, CPSRC (reg, CONST_I4)) {
+ int dest_reg = tree->left->reg1;
+ int value_reg = tree->right->left->reg1;
+ int size = tree->right->right->data.i;
+ int spill_pos = 0, dest_offset = 0, value_offset = 0;
+ int save_edi = FALSE;
+ int i, j;
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - offset, TRUE);
-}
+ i = size / 4;
+ j = size % 4;
-stmt: BGT_UN (reg, reg) 1 {
- gint32 offset;
+ if (mono_inline_memcpy) {
- tree->is_jump = TRUE;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - offset, FALSE);
-}
+ if (dest_reg != X86_EDI && value_reg != X86_EDI &&
+ mono_regset_reg_used (s->rs, X86_EDI)) {
+ save_edi = TRUE;
+ x86_push_reg (s->code, X86_EDI);
+ }
-stmt: BGT_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
+ if (dest_reg == X86_ECX || dest_reg == X86_EAX) {
+ dest_offset = ++spill_pos;
+ }
+ if (value_reg == X86_ECX || value_reg == X86_EDI) {
+ value_offset = ++spill_pos;
+ }
+
+ if (value_offset)
+ x86_push_reg (s->code, value_reg);
+ if (dest_offset)
+ x86_push_reg (s->code, dest_reg);
+
+ if (value_reg != X86_EAX) {
+ if (value_offset)
+ x86_mov_reg_membase (s->code, X86_EAX, X86_ESP, (value_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_EAX, value_reg, 4);
+ }
+ if (dest_reg != X86_EDI) {
+ if (dest_offset)
+ x86_mov_reg_membase (s->code, X86_EDI, X86_ESP, (dest_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_EDI, dest_reg, 4);
+ }
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - offset, FALSE);
-}
+ x86_widen_reg (s->code, X86_EAX, X86_EAX, FALSE, FALSE);
+ x86_mov_reg_reg (s->code, X86_EDX, X86_EAX, 4);
+ x86_shift_reg_imm (s->code, X86_SHL, X86_EAX, 8);
+ x86_alu_reg_reg (s->code, X86_OR, X86_EAX, X86_EDX);
+ x86_mov_reg_reg (s->code, X86_EDX, X86_EAX, 4);
+ x86_shift_reg_imm (s->code, X86_SHL, X86_EAX, 16);
+ x86_alu_reg_reg (s->code, X86_OR, X86_EAX, X86_EDX);
+
+ if (i) {
+ x86_mov_reg_imm (s->code, X86_ECX, i);
+ x86_cld (s->code);
+ x86_prefix (s->code, X86_REP_PREFIX);
+ x86_stosd (s->code);
+ }
+
+ for (i = 0; i < j; i++)
+ x86_stosb (s->code);
+
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, spill_pos<<2);
-stmt: BEQ (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
+ if (save_edi)
+ x86_pop_reg (s->code, X86_EDI);
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_EQ, tree->data.bb->addr - offset, TRUE);
+ } else {
+ x86_push_imm (s->code, size);
+ x86_push_reg (s->code, value_reg);
+ x86_push_reg (s->code, dest_reg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, memset);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
+ }
+} cost {
+ MBCOND (mono_inline_memcpy);
+ return 0;
}
-stmt: BEQ (reg, reg) 1 {
- gint32 offset;
+stmt: INITBLK (reg, CPSRC (reg, reg)) {
+ int dest_reg = tree->left->reg1;
+ int value_reg = tree->right->left->reg1;
+ int size_reg = tree->right->right->reg1;
+ int spill_pos = 0, size_offset = 0, dest_offset = 0, value_offset = 0;
+ int save_edi = FALSE;
- tree->is_jump = TRUE;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_EQ, tree->data.bb->addr - offset, TRUE);
-}
+ if (mono_inline_memcpy) {
-stmt: BNE_UN (reg, reg) 1 {
- gint32 offset;
+ if (dest_reg != X86_EDI && size_reg != X86_EDI && size_reg != X86_EDI &&
+ mono_regset_reg_used (s->rs, X86_EDI)) {
+ save_edi = TRUE;
+ x86_push_reg (s->code, X86_EDI);
+ }
- tree->is_jump = TRUE;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_NE, tree->data.bb->addr - offset, FALSE);
+ if (size_reg == X86_EDI || size_reg == X86_EAX) {
+ size_offset = ++spill_pos;
+ }
+ if (dest_reg == X86_ECX || dest_reg == X86_EAX) {
+ dest_offset = ++spill_pos;
+ }
+ if (value_reg == X86_ECX || value_reg == X86_EDI) {
+ value_offset = ++spill_pos;
+ }
+
+ if (value_offset)
+ x86_push_reg (s->code, value_reg);
+ if (dest_offset)
+ x86_push_reg (s->code, dest_reg);
+ if (size_offset)
+ x86_push_reg (s->code, size_reg);
+
+ if (value_reg != X86_EAX) {
+ if (value_offset)
+ x86_mov_reg_membase (s->code, X86_EAX, X86_ESP, (value_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_EAX, value_reg, 4);
+ }
+ if (dest_reg != X86_EDI) {
+ if (dest_offset)
+ x86_mov_reg_membase (s->code, X86_EDI, X86_ESP, (dest_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_EDI, dest_reg, 4);
+ }
+ if (size_reg != X86_ECX) {
+ if (size_offset)
+ x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, (size_offset-1)<<2, 4);
+ else
+ x86_mov_reg_reg (s->code, X86_ECX, size_reg, 4);
+ }
+
+ x86_widen_reg (s->code, X86_EAX, X86_EAX, FALSE, FALSE);
+ x86_mov_reg_reg (s->code, X86_EDX, X86_EAX, 4);
+ x86_shift_reg_imm (s->code, X86_SHL, X86_EAX, 8);
+ x86_alu_reg_reg (s->code, X86_OR, X86_EAX, X86_EDX);
+ x86_mov_reg_reg (s->code, X86_EDX, X86_EAX, 4);
+ x86_shift_reg_imm (s->code, X86_SHL, X86_EAX, 16);
+ x86_alu_reg_reg (s->code, X86_OR, X86_EAX, X86_EDX);
+
+ x86_push_reg (s->code, X86_ECX);
+ x86_shift_reg_imm (s->code, X86_SHR, X86_ECX, 2);
+ x86_cld (s->code);
+
+ // init whole dwords first
+ x86_prefix (s->code, X86_REP_PREFIX);
+ x86_stosd (s->code);
+
+ x86_pop_reg (s->code, X86_ECX);
+ x86_alu_reg_imm (s->code, X86_AND, X86_ECX, 3);
+
+ // init remaining bytes (if any)
+ x86_prefix (s->code, X86_REP_PREFIX);
+ x86_stosb (s->code);
+
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, spill_pos<<2);
+
+ if (save_edi)
+ x86_pop_reg (s->code, X86_EDI);
+
+ } else {
+ x86_push_reg (s->code, size_reg);
+ x86_push_reg (s->code, value_reg);
+ x86_push_reg (s->code, dest_reg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, memset);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
+ }
}
-stmt: BNE_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
+stmt: NOP
+
+stmt: POP (reg)
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_NE, tree->data.bb->addr - offset, FALSE);
+stmt: BR {
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb);
+ x86_jump32 (s->code, 0);
}
-stmt: BGE (reg, reg) 1 {
- gint32 offset;
+cflags: COMPARE (reg, LDIND_I4 (ADDR_L)) {
+ int treg = VARINFO (s, tree->right->left->data.i).reg;
+ x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, treg);
+} cost {
+ MBCOND ((VARINFO (data, tree->right->left->data.i).reg >= 0));
+ return 0;
+}
- tree->is_jump = TRUE;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GE, tree->data.bb->addr - offset, TRUE);
+cflags: COMPARE (LDIND_I4 (ADDR_L), CONST_I4) {
+ int treg = VARINFO (s, tree->left->left->data.i).reg;
+ x86_alu_reg_imm (s->code, X86_CMP, treg, tree->right->data.i);
+} cost {
+ MBCOND ((VARINFO (data, tree->left->left->data.i).reg >= 0));
+ return 0;
}
-stmt: BGE (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
+cflags: COMPARE (LDIND_I4 (ADDR_L), reg) {
+ int treg = VARINFO (s, tree->left->left->data.i).reg;
+ x86_alu_reg_reg (s->code, X86_CMP, treg, tree->right->reg1);
+} cost {
+ MBCOND ((VARINFO (data, tree->left->left->data.i).reg >= 0));
+ return 0;
+}
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GE, tree->data.bb->addr - offset, TRUE);
+cflags: COMPARE (LDIND_I4 (ADDR_L), CONST_I4) {
+ int offset = VARINFO (s, tree->left->left->data.i).offset;
+ x86_alu_membase_imm (s->code, X86_CMP, X86_EBP, offset, tree->right->data.i);
+} cost {
+ MBCOND ((VARINFO (data, tree->left->left->data.i).reg < 0));
+ return 0;
}
-stmt: BGE_UN (reg, reg) 1 {
- gint32 offset;
+cflags: COMPARE (reg, CONST_I4) {
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
+}
- tree->is_jump = TRUE;
+cflags: COMPARE (reg, reg) {
x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GE, tree->data.bb->addr - offset, FALSE);
}
-stmt: BGE_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GE, tree->data.bb->addr - offset, FALSE);
+stmt: CBRANCH (cflags) {
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+
+ switch (tree->data.bi.cond) {
+ case CEE_BLT:
+ x86_branch32 (s->code, X86_CC_LT, 0, TRUE);
+ break;
+ case CEE_BLT_UN:
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+ break;
+ case CEE_BGT:
+ x86_branch32 (s->code, X86_CC_GT, 0, TRUE);
+ break;
+ case CEE_BGT_UN:
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+ break;
+ case CEE_BEQ:
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
+ break;
+ case CEE_BNE_UN:
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+ break;
+ case CEE_BGE:
+ x86_branch32 (s->code, X86_CC_GE, 0, TRUE);
+ break;
+ case CEE_BGE_UN:
+ x86_branch32 (s->code, X86_CC_GE, 0, FALSE);
+ break;
+ case CEE_BLE:
+ x86_branch32 (s->code, X86_CC_LE, 0, TRUE);
+ break;
+ case CEE_BLE_UN:
+ x86_branch32 (s->code, X86_CC_LE, 0, FALSE);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
}
-stmt: BLE (reg, reg) 1 {
- gint32 offset;
-
- tree->is_jump = TRUE;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LE, tree->data.bb->addr - offset, TRUE);
-}
+stmt: BRTRUE (LDIND_I4 (ADDR_L)) {
+ int treg = VARINFO (s, tree->left->left->data.i).reg;
+ int offset = VARINFO (s, tree->left->left->data.i).offset;
-stmt: BLE (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
+ if (treg >= 0)
+ x86_test_reg_reg (s->code, treg, treg);
+ else
+ x86_alu_membase_imm (s->code, X86_CMP, X86_EBP, offset, 0);
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LE, tree->data.bb->addr - offset, TRUE);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, TRUE);
}
-stmt: BLE_UN (reg, reg) 1 {
- gint32 offset;
-
- tree->is_jump = TRUE;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LE, tree->data.bb->addr - offset, FALSE);
+stmt: BRTRUE (reg) {
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_NE, 0, TRUE);
}
-stmt: BLE_UN (reg, CONST_I4) "MB_USE_OPT1(0)" {
- gint32 offset;
+stmt: BRFALSE (LDIND_I4 (ADDR_L)) {
+ int treg = VARINFO (s, tree->left->left->data.i).reg;
+ int offset = VARINFO (s, tree->left->left->data.i).offset;
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LE, tree->data.bb->addr - offset, FALSE);
-}
+ if (treg >= 0)
+ x86_test_reg_reg (s->code, treg, treg);
+ else
+ x86_alu_membase_imm (s->code, X86_CMP, X86_EBP, offset, 0);
-stmt: BRTRUE (reg) {
- gint32 offset;
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 0);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_NE, tree->data.bb->addr - offset, TRUE);
+ //{static int cx= 0; printf ("CX1 %5d\n", cx++);}
}
stmt: BRFALSE (reg) {
- gint32 offset;
-
- tree->is_jump = TRUE;
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 0);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_EQ, tree->data.bb->addr - offset, TRUE);
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb);
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
}
stmt: BREAK {
}
stmt: RET (reg) {
- gint32 offset;
-
if (tree->left->reg1 != X86_EAX)
x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
if (!tree->last_instr) {
- tree->is_jump = TRUE;
- offset = 5 + s->code - s->start;
- x86_jump32 (s->code, s->epilog - offset);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL);
+ x86_jump32 (s->code, 0);
}
}
stmt: RET_VOID {
- gint32 offset;
-
if (!tree->last_instr) {
- tree->is_jump = TRUE;
- offset = 5 + s->code - s->start;
- x86_jump32 (s->code, s->epilog - offset);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL);
+ x86_jump32 (s->code, 0);
}
}
-
stmt: ARG_I4 (LDIND_I4 (addr)) {
MBTree *at = tree->left->left;
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
switch (at->data.ainfo.amode) {
}
}
-stmt: ARG_I4 (LDIND_U4 (addr)) {
- MBTree *at = tree->left->left;
-
- switch (at->data.ainfo.amode) {
-
- case AMImmediate:
- x86_push_mem (s->code, at->data.ainfo.offset);
- break;
+stmt: ARG_I4 (LDIND_I4 (ADDR_L)) {
+ int treg = VARINFO (s, tree->left->left->data.i).reg;
+ int pad = tree->data.arg_info.pad;
- case AMBase:
- x86_push_membase (s->code, at->data.ainfo.basereg, at->data.ainfo.offset);
- break;
- case AMIndex:
- x86_push_memindex (s->code, X86_NOBASEREG, at->data.ainfo.offset,
- at->data.ainfo.indexreg, at->data.ainfo.shift);
- break;
- case AMBaseIndex:
- x86_push_memindex (s->code, at->data.ainfo.basereg,
- at->data.ainfo.offset, at->data.ainfo.indexreg,
- at->data.ainfo.shift);
- break;
- }
+ X86_ARG_PAD (pad);
+ x86_push_reg (s->code, treg);
+} cost {
+ MBCOND ((VARINFO (data, tree->left->left->data.i).reg >= 0));
+ return 0;
}
stmt: ARG_I4 (reg) {
- x86_push_reg (s->code, tree->left->reg1);
- PRINT_REG ("ARG_I4", tree->left->reg1);
-}
-
-# fixme: we must free the allocated strings somewhere
-stmt: ARG_STRING (reg) {
- x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
- x86_push_reg (s->code, X86_EAX);
- x86_push_reg (s->code, X86_ECX);
- x86_push_reg (s->code, X86_EDX);
+ int pad = tree->data.arg_info.pad;
+ X86_ARG_PAD (pad);
x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, mono_string_to_utf8);
- x86_call_reg (s->code, X86_EAX);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
-
- x86_mov_membase_reg (s->code, X86_ESP, 12, X86_EAX, 4);
-
- x86_pop_reg (s->code, X86_EDX);
- x86_pop_reg (s->code, X86_ECX);
- x86_pop_reg (s->code, X86_EAX);
}
stmt: ARG_I4 (ADDR_G) {
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
x86_push_imm (s->code, tree->left->data.p);
}
stmt: ARG_I4 (CONST_I4) "MB_USE_OPT1(0)" {
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
x86_push_imm (s->code, tree->left->data.i);
}
PRINT_REG ("THIS", tree->reg1);
}
+reg: CHECKTHIS (reg) {
+ /* try to access the vtable - this will raise an exception
+ * if the object is NULL */
+ x86_alu_membase_imm (s->code, X86_CMP, tree->left->reg1, 0, 0);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+stmt: CHECKTHIS (reg) {
+ x86_alu_membase_imm (s->code, X86_CMP, tree->left->reg1, 0, 0);
+}
+
+stmt: JMP
+{
+ int pos = -4;
+
+ /* restore callee saved registers */
+ if (mono_regset_reg_used (s->rs, X86_EBX)) {
+ x86_mov_reg_membase (s->code, X86_EBX, X86_EBP, pos, 4);
+ pos -= 4;
+ }
+ if (mono_regset_reg_used (s->rs, X86_EDI)) {
+ x86_mov_reg_membase (s->code, X86_EDI, X86_EBP, pos, 4);
+ pos -= 4;
+ }
+ if (mono_regset_reg_used (s->rs, X86_ESI)) {
+ x86_mov_reg_membase (s->code, X86_ESI, X86_EBP, pos, 4);
+ pos -= 4;
+ }
+ /* restore ESP/EBP */
+ x86_leave (s->code);
+
+ /* jump to the method */
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p);
+ x86_jump32 (s->code, 0);
+}
+
this: NOP
reg: CALL_I4 (this, reg) {
- MethodCallInfo *ci = tree->data.ci;
int treg = X86_EAX;
int lreg = tree->left->reg1;
int rreg = tree->right->reg1;
if (lreg == treg || rreg == treg)
treg = X86_ECX;
if (lreg == treg || rreg == treg)
- g_assert_not_reached ();
-
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
+ mono_assert_not_reached ();
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_call_reg (s->code, rreg);
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
-
- PRINT_REG ("CALL_I4", tree->reg1);
+ X86_CALL_END;
- g_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg1 == X86_EAX);
}
-reg: CALL_I4 (this, LDIND_REF (ADDR_G)) {
- MethodCallInfo *ci = tree->data.ci;
+reg: CALL_I4 (this, ADDR_G) {
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
-
- x86_call_mem (s->code, tree->right->left->data.p);
+ X86_CALL_BEGIN;
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p);
+ x86_call_code (s->code, 0);
- PRINT_REG ("CALL_I4", tree->reg1);
+ X86_CALL_END;
- g_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg1 == X86_EAX);
}
-reg: LDFTN (reg, INTF_ADDR) {
- int lreg = tree->left->reg1;
+reg: LDVIRTFTN (reg, INTF_ADDR) {
+ /* we cant return the value in the vtable, because it can be
+ * a magic trampoline, and we cant pass that to the outside world */
+
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
- x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
- x86_mov_reg_membase (s->code, lreg, lreg,
- G_STRUCT_OFFSET (MonoClass, interface_offsets), 4);
- x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
- x86_mov_reg_membase (s->code, tree->reg1, lreg, tree->right->data.m->slot << 2, 4);
+ x86_push_imm (s->code, tree->right->data.m->klass->interface_id);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldintftn);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
}
reg: CALL_I4 (this, INTF_ADDR) {
- MethodCallInfo *ci = tree->data.ci;
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_mov_reg_membase (s->code, lreg, lreg,
- G_STRUCT_OFFSET (MonoClass, interface_offsets), 4);
+ G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
- PRINT_REG ("CALL_I4(INTERFACE)", tree->reg1);
-
- g_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg1 == X86_EAX);
}
-reg: LDFTN (reg, VFUNC_ADDR) {
- int lreg = tree->left->reg1;
+reg: LDVIRTFTN (reg, VFUNC_ADDR) {
+ /* we cant return the value in the vtable, because it can be
+ * a magic trampoline, and we cant pass that to the outside world */
- x86_mov_reg_membase (s->code, tree->reg1, lreg, 0, 4);
-
- x86_mov_reg_membase (s->code, tree->reg1, tree->reg1, G_STRUCT_OFFSET (MonoClass, vtable) + (tree->right->data.m->slot << 2), 4);
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_imm (s->code, tree->right->data.m->slot);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldvirtftn);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
+}
+
+reg: LDFTN {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_imm (s->code, tree->data.m);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldftn);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer));
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
}
+
reg: CALL_I4 (this, VFUNC_ADDR) {
- MethodCallInfo *ci = tree->data.ci;
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_call_virtual (s->code, lreg,
- G_STRUCT_OFFSET (MonoClass, vtable) + (tree->right->data.m->slot << 2));
+ G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
- PRINT_REG ("CALL_I4(VIRTUAL)", tree->reg1);
-
- g_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg1 == X86_EAX);
}
-stmt: CALL_VOID (this, LDIND_REF (ADDR_G)) {
- MethodCallInfo *ci = tree->data.ci;
+stmt: CALL_VOID (this, ADDR_G) {
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
+ X86_CALL_BEGIN;
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p);
+ x86_call_code (s->code, 0);
+
+ X86_CALL_END;
+}
+
+stmt: CALL_VOID (this, reg) {
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+
+ if (lreg == treg || rreg == treg)
+ treg = X86_EDX;
+ if (lreg == treg || rreg == treg)
+ treg = X86_ECX;
+ if (lreg == treg || rreg == treg)
+ mono_assert_not_reached ();
+
+ X86_CALL_BEGIN;
- x86_call_mem (s->code, tree->right->left->data.p);
+ x86_call_reg (s->code, tree->right->reg1);
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
}
stmt: CALL_VOID (this, INTF_ADDR) {
- MethodCallInfo *ci = tree->data.ci;
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_mov_reg_membase (s->code, lreg, lreg,
- G_STRUCT_OFFSET (MonoClass, interface_offsets), 4);
+ G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
}
stmt: CALL_VOID (this, VFUNC_ADDR) {
- MethodCallInfo *ci = tree->data.ci;
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_call_virtual (s->code, lreg,
- G_STRUCT_OFFSET (MonoClass, vtable) + (tree->right->data.m->slot << 2));
+ G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
}
stmt: SWITCH (reg) {
guint32 offset;
guint32 *jt = (guint32 *)tree->data.p;
- tree->is_jump = TRUE;
-
x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, jt [0]);
offset = 6 + (guint32)s->code;
x86_branch32 (s->code, X86_CC_GE, jt [jt [0] + 1] - offset, FALSE);
# 64 bit integers
#
+reg: CONV_I1 (lreg) {
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, FALSE);
+}
+
+reg: CONV_U1 (lreg) {
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE);
+}
+
+reg: CONV_I2 (lreg) {
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, TRUE);
+}
+
+reg: CONV_U2 (lreg) {
+ x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE);
+}
+
reg: CONV_I4 (lreg) {
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
+reg: CONV_U4 (lreg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+
+reg: CONV_OVF_I4 (lreg){
+ guint8 *start = s->code;
+ guchar* o1, *o2, *o3, *o4, *o5;
+ int i;
+
+ /*
+ * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
+ */
+ for (i = 0; i < 2; i++) {
+ s->code = start;
+
+ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+
+ /* If the low word top bit is set, see if we are negative */
+ x86_branch8 (s->code, X86_CC_LT, o3 - o1, TRUE);
+ o1 = s->code;
+
+ /* We are not negative (no top bit set, check for our top word to be zero */
+ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2);
+ x86_branch8 (s->code, X86_CC_EQ, o4 - o2, TRUE);
+ o2 = s->code;
+
+ /* throw exception */
+ x86_push_imm (s->code, "OverflowException");
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS,
+ arch_get_throw_exception_by_name ());
+ x86_call_code (s->code, 0);
+
+ o3 = s->code;
+ /* our top bit is set, check that top word is 0xfffffff */
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg2, 0xffffffff);
+
+ o4 = s->code;
+ /* nope, emit exception */
+ x86_branch8 (s->code, X86_CC_NE, o2 - o5, TRUE);
+ o5 = s->code;
+
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ }
+}
reg: CONV_OVF_I4 (lreg){
+ guint8 *br [3], *label [1];
+
/*
* Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
*/
x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
/* If the low word top bit is set, see if we are negative */
- x86_branch8 (s->code, X86_CC_LT, 14, TRUE);
-
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_LT, 0, TRUE);
+
/* We are not negative (no top bit set, check for our top word to be zero */
x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2);
- x86_branch8 (s->code, X86_CC_EQ, 17, TRUE);
+ br [1] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, TRUE);
+ label [0] = s->code;
/* throw exception */
- x86_push_imm (s->code, get_exception_overflow ());
- x86_mov_reg_imm (s->code, X86_EAX, arch_get_throw_exception ());
- x86_call_reg (s->code, X86_EAX);
+ x86_push_imm (s->code, "OverflowException");
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS,
+ arch_get_throw_exception_by_name ());
+ x86_call_code (s->code, 0);
+ x86_patch (br [0], s->code);
/* our top bit is set, check that top word is 0xfffffff */
x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg2, 0xffffffff);
-
+
+ x86_patch (br [1], s->code);
/* nope, emit exception */
- x86_branch8 (s->code, X86_CC_NE, -17, TRUE);
+ br [2] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE);
+ x86_patch (br [2], label [0]);
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
/* Keep in sync with CONV_OVF_I4_UN below, they are the same on 32-bit machines */
/* top word must be 0 */
x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2);
- EMIT_COND_EXCEPTION (X86_CC_EQ, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
/* Keep in sync with CONV_OVF_U4 above, they are the same on 32-bit machines */
/* top word must be 0 */
x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2);
- EMIT_COND_EXCEPTION (X86_CC_EQ, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
x86_mov_reg_imm (s->code, tree->reg2, *((gint32 *)&tree->data.p + 1));
}
-reg: CONV_I1 (lreg) {
- x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, 0xff);
-
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
-}
-
lreg: CONV_I8 (CONST_I4) {
x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i);
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 0);
x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
+ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 0);
x86_branch8 (s->code, X86_CC_GE, 5, TRUE);
i1 = s->code;
x86_mov_reg_imm (s->code, tree->reg2, -1);
- g_assert ((s->code - i1) == 5);
+ mono_assert ((s->code - i1) == 5);
}
lreg: CONV_U8 (CONST_I4) 1 {
x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
}
+lreg: CONV_U8 (reg) {
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
+}
+
lreg: CONV_OVF_U8 (CONST_I4) {
if (tree->left->data.i < 0){
- x86_push_imm (s->code, get_exception_overflow ());
- x86_mov_reg_imm (s->code, X86_EAX, arch_get_throw_exception ());
- x86_call_reg (s->code, X86_EAX);
+ x86_push_imm (s->code, "OverflowException");
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS,
+ arch_get_throw_exception_by_name ());
+ x86_call_code (s->code, 0);
} else {
x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i);
x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2);
lreg: CONV_OVF_U8 (reg) {
x86_test_reg_imm (s->code, tree->left->reg1, 0x8000000);
- EMIT_COND_EXCEPTION (X86_CC_EQ, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException");
if (tree->reg1 != tree->left->reg1)
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
}
stmt: STIND_I8 (addr, lreg) {
-
+
switch (tree->left->data.ainfo.amode) {
case AMImmediate:
tree->right->reg2, 4);
break;
}
+}
+
+stmt: REMOTE_STIND_I8 (reg, lreg) {
+ guint8 *br[2];
+ int offset;
+
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_mov_reg_membase (s->code, tree->right->reg1, tree->left->reg1, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, tree->right->reg1, 0, ((int)mono_defaults.transparent_proxy_class));
+ x86_pop_reg (s->code, tree->right->reg1);
+
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+
+ /* this is a transparent proxy - remote the call */
+
+ /* save value to stack */
+ x86_push_reg (s->code, tree->right->reg2);
+ x86_push_reg (s->code, tree->right->reg1);
+
+ x86_push_reg (s->code, X86_ESP);
+ x86_push_imm (s->code, tree->data.fi.field);
+ x86_push_imm (s->code, tree->data.fi.klass);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 24);
+
+ br [1] = s->code; x86_jump8 (s->code, 0);
+ x86_patch (br [0], s->code);
+ offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) :
+ tree->data.fi.field->offset;
+ x86_mov_membase_reg (s->code, tree->left->reg1, offset, tree->right->reg1, 4);
+ x86_mov_membase_reg (s->code, tree->left->reg1, offset + 4, tree->right->reg2, 4);
+
+ x86_patch (br [1], s->code);
}
+
# an addr can use two address register (base and index register). The must take care
# that we do not override them (thus the use of x86_lea)
lreg: LDIND_I8 (addr) {
if (tree->reg2 != tree->left->reg2)
x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
} else if (tree->right->data.i < 64) {
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
- x86_shift_reg_imm (s->code, X86_SAR, tree->reg2, 31);
+ if (tree->reg1 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg2, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ x86_shift_reg_imm (s->code, X86_SAR, tree->reg2, 31);
+ x86_shift_reg_imm (s->code, X86_SAR, tree->reg1, (tree->right->data.i - 32));
+ } /* else unspecified result */
+}
+
+lreg: SHR_UN (lreg, CONST_I4) {
+ if (tree->right->data.i < 32) {
+ x86_shrd_reg_imm (s->code, tree->left->reg1, tree->left->reg2, tree->right->data.i);
+ x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg2, tree->right->data.i);
+ if (tree->reg1 != tree->left->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+ if (tree->reg2 != tree->left->reg2)
+ x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ } else if (tree->right->data.i < 64) {
x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg2, 4);
- x86_shift_reg_imm (s->code, X86_SAR, tree->reg1, (tree->right->data.i - 32));
+ x86_shift_reg_imm (s->code, X86_SHR, tree->reg1, (tree->right->data.i - 32));
+ x86_mov_reg_imm (s->code, tree->reg2, 0);
} /* else unspecified result */
-
}
lreg: SHR (lreg, reg) {
- guint8 *start = s->code;
- gint32 o1, o2, i;
+ guint8 *br [1];
+
+ if (tree->right->reg1 != X86_ECX)
+ x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
+
+ x86_shrd_reg (s->code, tree->left->reg1, tree->left->reg2);
+ x86_shift_reg (s->code, X86_SAR, tree->left->reg2);
+ x86_test_reg_imm (s->code, X86_ECX, 32);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE);
+ x86_mov_reg_reg (s->code, tree->left->reg1, tree->left->reg2, 4);
+ x86_shift_reg_imm (s->code, X86_SAR, tree->reg2, 31);
+ x86_patch (br [0], s->code);
- tree->is_jump = TRUE;
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
+}
+
+lreg: SHR_UN (lreg, reg) {
+ guint8 *br [1];
if (tree->right->reg1 != X86_ECX)
x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_shrd_reg (s->code, tree->left->reg1, tree->left->reg2);
- x86_shift_reg (s->code, X86_SAR, tree->left->reg2);
- x86_test_reg_imm (s->code, X86_ECX, 32);
- o1 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_EQ, o2 - o1, FALSE);
- x86_mov_reg_reg (s->code, tree->left->reg1, tree->left->reg2, 4);
- x86_shift_reg_imm (s->code, X86_SAR, tree->reg2, 31);
- o2 = s->code - s->start;
- }
+ x86_shrd_reg (s->code, tree->left->reg1, tree->left->reg2);
+ x86_shift_reg (s->code, X86_SHR, tree->left->reg2);
+ x86_test_reg_imm (s->code, X86_ECX, 32);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE);
+ x86_mov_reg_reg (s->code, tree->left->reg1, tree->left->reg2, 4);
+ x86_shift_reg_imm (s->code, X86_SHR, tree->reg2, 31);
+ x86_patch (br [0], s->code);
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: SHL (lreg, CONST_I4) {
}
lreg: SHL (lreg, reg) {
- guint8 *start = s->code;
- gint32 o1, o2, i;
-
- tree->is_jump = TRUE;
+ guint8 *br [1];
if (tree->right->reg1 != X86_ECX)
x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4);
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_shld_reg (s->code, tree->left->reg2, tree->left->reg1);
- x86_shift_reg (s->code, X86_SHL, tree->left->reg1);
- x86_test_reg_imm (s->code, X86_ECX, 32);
- o1 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_EQ, o2 - o1, FALSE);
- x86_mov_reg_reg (s->code, tree->left->reg2, tree->left->reg1, 4);
- x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg1, tree->left->reg1);
- o2 = s->code - s->start;
- }
+ x86_shld_reg (s->code, tree->left->reg2, tree->left->reg1);
+ x86_shift_reg (s->code, X86_SHL, tree->left->reg1);
+ x86_test_reg_imm (s->code, X86_ECX, 32);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE);
+ x86_mov_reg_reg (s->code, tree->left->reg2, tree->left->reg1, 4);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg1, tree->left->reg1);
+ x86_patch (br [0], s->code);
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: ADD (lreg, lreg) {
x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
x86_alu_reg_reg (s->code, X86_ADC, tree->left->reg2, tree->right->reg2);
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: ADD_OVF (lreg, lreg) {
x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
x86_alu_reg_reg (s->code, X86_ADC, tree->left->reg2, tree->right->reg2);
- EMIT_COND_EXCEPTION (X86_CC_NO, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: ADD_OVF_UN (lreg, lreg) {
x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1);
x86_alu_reg_reg (s->code, X86_ADC, tree->left->reg2, tree->right->reg2);
- EMIT_COND_EXCEPTION (X86_CC_NC, FALSE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException");
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: SUB (lreg, lreg) {
x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
x86_alu_reg_reg (s->code, X86_SBB, tree->left->reg2, tree->right->reg2);
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: SUB_OVF (lreg, lreg) {
x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
x86_alu_reg_reg (s->code, X86_SBB, tree->left->reg2, tree->right->reg2);
- EMIT_COND_EXCEPTION (X86_CC_NO, TRUE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException");
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: SUB_OVF_UN (lreg, lreg) {
x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1);
x86_alu_reg_reg (s->code, X86_SBB, tree->left->reg2, tree->right->reg2);
- EMIT_COND_EXCEPTION (X86_CC_NC, FALSE, get_exception_overflow ());
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException");
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: AND (lreg, lreg) {
x86_alu_reg_reg (s->code, X86_AND, tree->left->reg1, tree->right->reg1);
x86_alu_reg_reg (s->code, X86_AND, tree->left->reg2, tree->right->reg2);
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: OR (lreg, lreg) {
x86_alu_reg_reg (s->code, X86_OR, tree->left->reg1, tree->right->reg1);
x86_alu_reg_reg (s->code, X86_OR, tree->left->reg2, tree->right->reg2);
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
+}
+
+lreg: XOR (lreg, lreg) {
+ x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg1, tree->right->reg1);
+ x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg2, tree->right->reg2);
+
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
}
lreg: NEG (lreg) {
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
x86_neg_reg (s->code, tree->reg1);
x86_alu_reg_imm (s->code, X86_ADC, tree->reg2, 0);
}
lreg: NOT (lreg) {
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
- if (tree->reg2 != tree->left->reg2)
- x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4);
+ MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2);
- x86_not_reg (s->code, tree->reg1);
- x86_not_reg (s->code, tree->reg2);
+ x86_not_reg (s->code, tree->reg1);
+ x86_not_reg (s->code, tree->reg2);
}
lreg: MUL (lreg, lreg) {
x86_push_reg (s->code, tree->right->reg1);
x86_push_reg (s->code, tree->left->reg2);
x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, mono_llmult);
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llmult);
+ x86_call_code (s->code, 0);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
if (mono_regset_reg_used (s->rs, X86_ECX))
x86_pop_reg (s->code, X86_ECX);
- g_assert (tree->reg1 == X86_EAX &&
+ mono_assert (tree->reg1 == X86_EAX &&
tree->reg2 == X86_EDX);
}
+lreg: MUL_OVF (lreg, lreg) {
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_push_reg (s->code, X86_ECX);
+
+ x86_push_reg (s->code, tree->right->reg2);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ /* pass a pointer to store the resulting exception -
+ * ugly, but it works */
+ x86_push_reg (s->code, X86_ESP);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llmult_ovf);
+ x86_call_code (s->code, 0);
+ x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, 4, 4);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_ECX, 0);
+
+ /* cond. emit exception */
+ x86_branch8 (s->code, X86_CC_EQ, 7, FALSE);
+ x86_push_reg (s->code, X86_ECX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, arch_get_throw_exception ());
+ x86_call_code (s->code, 0);
+
+ if (mono_regset_reg_used (s->rs, X86_ECX))
+ x86_pop_reg (s->code, X86_ECX);
+
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
lreg: MUL_OVF_UN (lreg, lreg) {
if (mono_regset_reg_used (s->rs, X86_ECX))
x86_push_reg (s->code, X86_ECX);
/* pass a pointer to store the resulting exception -
* ugly, but it works */
x86_push_reg (s->code, X86_ESP);
- x86_mov_reg_imm (s->code, X86_EAX, mono_llmult_ovf_un);
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llmult_ovf_un);
+ x86_call_code (s->code, 0);
x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, 4, 4);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20);
x86_alu_reg_imm (s->code, X86_CMP, X86_ECX, 0);
/* cond. emit exception */
- x86_branch8 (s->code, X86_CC_EQ, 9, FALSE);
+ x86_branch8 (s->code, X86_CC_EQ, 7, FALSE);
x86_push_reg (s->code, X86_ECX);
- x86_mov_reg_imm (s->code, X86_EAX, arch_get_throw_exception ());
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, arch_get_throw_exception ());
+ x86_call_code (s->code, 0);
if (mono_regset_reg_used (s->rs, X86_ECX))
x86_pop_reg (s->code, X86_ECX);
- g_assert (tree->reg1 == X86_EAX &&
- tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
}
lreg: DIV (lreg, lreg) {
x86_push_reg (s->code, tree->right->reg1);
x86_push_reg (s->code, tree->left->reg2);
x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, mono_lldiv);
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_lldiv);
+ x86_call_code (s->code, 0);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
if (mono_regset_reg_used (s->rs, X86_ECX))
x86_pop_reg (s->code, X86_ECX);
- g_assert (tree->reg1 == X86_EAX &&
- tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
}
lreg: REM (lreg, lreg) {
x86_push_reg (s->code, tree->right->reg1);
x86_push_reg (s->code, tree->left->reg2);
x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, mono_llrem);
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llrem);
+ x86_call_code (s->code, 0);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
if (mono_regset_reg_used (s->rs, X86_ECX))
x86_pop_reg (s->code, X86_ECX);
- g_assert (tree->reg1 == X86_EAX &&
- tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
}
lreg: DIV_UN (lreg, lreg) {
x86_push_reg (s->code, tree->right->reg1);
x86_push_reg (s->code, tree->left->reg2);
x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, mono_lldiv_un);
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_lldiv_un);
+ x86_call_code (s->code, 0);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
if (mono_regset_reg_used (s->rs, X86_ECX))
x86_pop_reg (s->code, X86_ECX);
- g_assert (tree->reg1 == X86_EAX &&
- tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
}
lreg: REM_UN (lreg, lreg) {
x86_push_reg (s->code, tree->right->reg1);
x86_push_reg (s->code, tree->left->reg2);
x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, mono_llrem_un);
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llrem_un);
+ x86_call_code (s->code, 0);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
if (mono_regset_reg_used (s->rs, X86_ECX))
x86_pop_reg (s->code, X86_ECX);
- g_assert (tree->reg1 == X86_EAX &&
- tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX &&
+ tree->reg2 == X86_EDX);
+}
+
+lreg: CALL_I8 (this, reg) {
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+
+ if (lreg == treg || rreg == treg)
+ treg = X86_EDX;
+ if (lreg == treg || rreg == treg)
+ treg = X86_ECX;
+ if (lreg == treg || rreg == treg)
+ mono_assert_not_reached ();
+
+ X86_CALL_BEGIN;
+
+ x86_call_reg (s->code, rreg);
+
+ X86_CALL_END;
+
+ mono_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg2 == X86_EDX);
}
-lreg: CALL_I8 (this, LDIND_REF (ADDR_G)) {
- MethodCallInfo *ci = tree->data.ci;
+lreg: CALL_I8 (this, ADDR_G) {
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
- x86_call_mem (s->code, tree->right->left->data.p);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p);
+ x86_call_code (s->code, 0);
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
- g_assert (tree->reg1 == X86_EAX);
- g_assert (tree->reg2 == X86_EDX);
+ mono_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg2 == X86_EDX);
}
lreg: CALL_I8 (this, VFUNC_ADDR) {
- MethodCallInfo *ci = tree->data.ci;
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_call_virtual (s->code, lreg,
- G_STRUCT_OFFSET (MonoClass, vtable) + (tree->right->data.m->slot << 2));
+ G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
- PRINT_REG ("CALL0_I8(VIRTUAL)", tree->reg1);
- PRINT_REG ("CALL1_I8(VIRTUAL)", tree->reg2);
+ mono_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg2 == X86_EDX);
+}
+
+lreg: CALL_I8 (this, INTF_ADDR) {
+ int lreg = tree->left->reg1;
+ int treg = X86_EAX;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ X86_CALL_BEGIN;
- g_assert (tree->reg1 == X86_EAX);
- g_assert (tree->reg2 == X86_EDX);
+ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
+ x86_mov_reg_membase (s->code, lreg, lreg,
+ G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
+ x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
+
+ X86_CALL_END;
+
+ mono_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg2 == X86_EDX);
}
stmt: RET (lreg) {
- gint32 offset;
-
if (tree->left->reg1 != X86_EAX) {
if (tree->left->reg2 != X86_EAX) {
x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
}
if (!tree->last_instr) {
- tree->is_jump = TRUE;
- offset = 5 + s->code - s->start;
- x86_jump32 (s->code, s->epilog - offset);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL);
+ x86_jump32 (s->code, 0);
}
}
stmt: ARG_I8 (lreg) {
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
x86_push_reg (s->code, tree->left->reg2);
x86_push_reg (s->code, tree->left->reg1);
}
-reg: CEQ (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, i;
+reg: CSET (COMPARE (lreg, lreg)) {
+ guint8 *br [4];
+ int lreg1, lreg2, rreg1, rreg2;
- tree->is_jump = TRUE;
+ lreg1 = tree->left->left->reg1;
+ lreg2 = tree->left->left->reg2;
+ rreg1 = tree->left->right->reg1;
+ rreg2 = tree->left->right->reg2;
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- o1 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, o2 - o1, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = s->code - s->start;
+
+ if (tree->data.i == CEE_CEQ) {
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ x86_patch (br [0], s->code);
x86_set_reg (s->code, X86_CC_EQ, tree->reg1, FALSE);
x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+ return;
}
-}
-
-reg: CLT (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, o3, o4, o5, i;
-
- tree->is_jump = TRUE;
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_XOR, tree->reg1, tree->reg1);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o1 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_GT, o5 - o1, TRUE);
- o2 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, o4 - o2, TRUE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- o3 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_LE, o4 - o3, FALSE);
- o5 = s->code - s->start;
- x86_mov_reg_imm (s->code, tree->reg1, 1);
- o4 = s->code - s->start;
- }
-}
-
-stmt: BEQ (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, i;
-
- tree->is_jump = TRUE;
-
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- o1 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, o2 - o1, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_EQ, tree->data.bb->addr - o2, TRUE);
+ switch (tree->data.i) {
+ case CEE_CGT:
+ x86_alu_reg_reg (s->code, X86_CMP, rreg2, lreg2);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_GT, 0, TRUE);
+ br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, rreg1, lreg1);
+ br [2] = s->code; x86_branch8 (s->code, X86_CC_GE, 0, FALSE);
+ break;
+ case CEE_CGT_UN:
+ x86_alu_reg_reg (s->code, X86_CMP, rreg2, lreg2);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_GT, 0, FALSE);
+ br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, rreg1, lreg1);
+ br [2] = s->code; x86_branch8 (s->code, X86_CC_GE, 0, FALSE);
+ break;
+ case CEE_CLT:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_GT, 0, TRUE);
+ br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ br [2] = s->code; x86_branch8 (s->code, X86_CC_GE, 0, FALSE);
+ break;
+ case CEE_CLT_UN:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_GT, 0, FALSE);
+ br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ br [2] = s->code; x86_branch8 (s->code, X86_CC_GE, 0, FALSE);
+ break;
+ default:
+ g_assert_not_reached ();
}
-}
-
-stmt: BNE_UN (lreg, lreg) {
- gint32 offset;
- tree->is_jump = TRUE;
-
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- offset = 6 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, tree->data.bb->addr - offset, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_NE, tree->data.bb->addr - offset, FALSE);
+ /* set result to 1 */
+ x86_patch (br [1], s->code);
+ x86_mov_reg_imm (s->code, tree->reg1, 1);
+ br [3] = s->code; x86_jump8 (s->code, 0);
+
+ /* set result to 0 */
+ x86_patch (br [0], s->code);
+ x86_patch (br [2], s->code);
+ x86_mov_reg_imm (s->code, tree->reg1, 0);
+
+ x86_patch (br [3], s->code);
+}
+
+stmt: CBRANCH (COMPARE (lreg, lreg)) {
+ guint8 *br [1];
+ int lreg1, lreg2, rreg1, rreg2;
+
+ lreg1 = tree->left->left->reg1;
+ lreg2 = tree->left->left->reg2;
+ rreg1 = tree->left->right->reg1;
+ rreg2 = tree->left->right->reg2;
+
+ switch (tree->data.bi.cond) {
+ case CEE_BLT:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_LT, 0, TRUE);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+ x86_patch (br [0], s->code);
+ break;
+ case CEE_BLT_UN:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+ x86_patch (br [0], s->code);
+ break;
+ case CEE_BGT:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_GT, 0, TRUE);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+ x86_patch (br [0], s->code);
+ break;
+ case CEE_BGT_UN:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+ x86_patch (br [0], s->code);
+ break;
+ case CEE_BEQ:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
+ x86_patch (br [0], s->code);
+ break;
+ case CEE_BNE_UN:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+ break;
+ case CEE_BGE:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_GT, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_GE, 0, FALSE);
+ x86_patch (br [0], s->code);
+ break;
+ case CEE_BGE_UN:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_GT, 0, FALSE);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_GE, 0, FALSE);
+ x86_patch (br [0], s->code);
+ break;
+ case CEE_BLE:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_LT, 0, TRUE);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_LE, 0, FALSE);
+ x86_patch (br [0], s->code);
+ break;
+ case CEE_BLE_UN:
+ x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_LT, 0, FALSE);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_LE, 0, FALSE);
+ x86_patch (br [0], s->code);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
}
-stmt: BGE (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, oe, i;
+#
+# floating point
- tree->is_jump = TRUE;
+#stmt: STLOC (CONV_I4 (freg)) {
+# // fixme: set CW
+# x86_fist_pop_membase (s->code, X86_EBP, tree->data.i, FALSE);
+#}
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o1 = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - o1, TRUE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, oe - o2, TRUE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- oe = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GE, tree->data.bb->addr - oe, FALSE);
+reg: CONV_I1 (freg) {
+ if (mono_use_fast_iconv) {
+ mono_emit_fast_iconv(s, tree);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, TRUE, FALSE);
+ } else {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_push_reg (s->code, X86_EAX); // SP = SP - 4
+ x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, TRUE, FALSE);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
}
}
-stmt: BGE_UN (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, oe, i;
-
- tree->is_jump = TRUE;
-
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o1 = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - o1, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, oe - o2, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- oe = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GE, tree->data.bb->addr - oe, FALSE);
+reg: CONV_U1 (freg) {
+ if (mono_use_fast_iconv) {
+ mono_emit_fast_iconv(s, tree);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+ } else {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_push_reg (s->code, X86_EAX); // SP = SP - 4
+ x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
}
}
-stmt: BGT (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, oe, i;
-
- tree->is_jump = TRUE;
-
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o1 = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - o1, TRUE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, oe - o2, TRUE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- oe = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - oe, FALSE);
+reg: CONV_I2 (freg) {
+ if (mono_use_fast_iconv) {
+ mono_emit_fast_iconv(s, tree);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, TRUE, TRUE);
+ } else {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_push_reg (s->code, X86_EAX); // SP = SP - 4
+ x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, TRUE, TRUE);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
}
}
-stmt: BGT_UN (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, oe, i;
-
- tree->is_jump = TRUE;
-
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o1 = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - o1, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, oe - o2, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- oe = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_GT, tree->data.bb->addr - oe, FALSE);
+reg: CONV_U2 (freg) {
+ if (mono_use_fast_iconv) {
+ mono_emit_fast_iconv(s, tree);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, TRUE);
+ } else {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_push_reg (s->code, X86_EAX); // SP = SP - 4
+ x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, TRUE);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
}
}
-stmt: BLT (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, oe, i;
-
- tree->is_jump = TRUE;
-
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o1 = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - o1, TRUE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, oe - o2, TRUE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- oe = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - oe, FALSE);
+reg: CONV_I4 (freg) {
+ if (mono_use_fast_iconv) {
+ mono_emit_fast_iconv(s, tree);
+ } else {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_push_reg (s->code, X86_EAX); // SP = SP - 4
+ x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
}
}
-stmt: BLT_UN (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, oe, i;
-
- tree->is_jump = TRUE;
-
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o1 = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - o1, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, oe - o2, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- oe = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - oe, FALSE);
+reg: CONV_U4 (freg) {
+ if (mono_use_fast_iconv) {
+ mono_emit_fast_iconv(s, tree);
+ } else {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_push_reg (s->code, X86_EAX); // SP = SP - 4
+ x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
}
}
-stmt: BLE (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, oe, i;
-
- tree->is_jump = TRUE;
-
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o1 = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - o1, TRUE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, oe - o2, TRUE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- oe = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LE, tree->data.bb->addr - oe, FALSE);
+lreg: CONV_I8 (freg) {
+ if (mono_use_fast_iconv) {
+ mono_emit_fast_iconv_i8(s, tree);
+ } else {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
+ x86_fist_pop_membase (s->code, X86_ESP, 0, TRUE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_pop_reg (s->code, tree->reg2);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
}
}
-stmt: BLE_UN (lreg, lreg) {
- guint8 *start = s->code;
- gint32 o1, o2, oe, i;
-
- tree->is_jump = TRUE;
-
- for (i = 0; i < 2; i ++) {
- s->code = start;
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o1 = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LT, tree->data.bb->addr - o1, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg2, tree->right->reg2);
- o2 = 2 + s->code - s->start;
- x86_branch8 (s->code, X86_CC_NE, oe - o2, FALSE);
- x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1);
- oe = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_LE, tree->data.bb->addr - oe, FALSE);
+lreg: CONV_U8 (freg) {
+ if (mono_use_fast_iconv) {
+ mono_emit_fast_iconv_i8(s, tree);
+ } else {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fnstcw_membase(s->code, X86_ESP, 0);
+ x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2);
+ x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00);
+ x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2);
+ x86_fldcw_membase (s->code, X86_ESP, 2);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
+ x86_fist_pop_membase (s->code, X86_ESP, 0, TRUE);
+ x86_pop_reg (s->code, tree->reg1);
+ x86_pop_reg (s->code, tree->reg2);
+ x86_fldcw_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
}
}
-#
-# floating point
-
-#stmt: STLOC (CONV_I4 (freg)) {
-# // fixme: set CW
-# x86_fist_pop_membase (s->code, X86_EBP, tree->data.i, FALSE);
-#}
-
-reg: CONV_I4 (freg) {
- x86_push_reg (s->code, X86_EAX); // SP = SP - 4
- x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE);
- x86_pop_reg (s->code, tree->reg1);
-}
-
-reg: CEQ (freg, freg) {
+reg: CSET (COMPARE (freg, freg)) {
int treg = tree->reg1;
-
+
if (treg != X86_EAX)
- x86_push_reg (s->code, X86_EAX); // save EAX
-
+ x86_push_reg (s->code, X86_EAX);
+
x86_fcompp (s->code);
x86_fnstsw (s->code);
x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
- x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000);
- x86_set_reg (s->code, X86_CC_EQ, tree->reg1, TRUE);
- x86_widen_reg (s->code, treg, treg, FALSE, FALSE);
+ switch (tree->data.i) {
+ case CEE_CEQ:
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000);
+ x86_set_reg (s->code, X86_CC_EQ, treg, TRUE);
+ x86_widen_reg (s->code, treg, treg, FALSE, FALSE);
+ break;
+ case CEE_CGT:
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ x86_set_reg (s->code, X86_CC_EQ, treg, TRUE);
+ x86_widen_reg (s->code, treg, treg, FALSE, FALSE);
+ break;
+ case CEE_CGT_UN:
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ x86_set_reg (s->code, X86_CC_EQ, treg, TRUE);
+ x86_widen_reg (s->code, treg, treg, FALSE, FALSE);
+ break;
+ case CEE_CLT:
+ x86_set_reg (s->code, X86_CC_EQ, treg, TRUE);
+ x86_widen_reg (s->code, treg, treg, FALSE, FALSE);
+ break;
+ case CEE_CLT_UN:
+ x86_set_reg (s->code, X86_CC_EQ, tree->reg1, TRUE);
+ x86_widen_reg (s->code, treg, treg, FALSE, FALSE);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
if (treg != X86_EAX)
- x86_pop_reg (s->code, X86_EAX); // save EAX
+ x86_pop_reg (s->code, X86_EAX);
}
freg: CONV_R8 (freg) {
x86_fild (s->code, tree->left->left->data.p, FALSE);
}
+freg: CONV_R4 (reg) {
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_fild_membase (s->code, X86_ESP, 0, FALSE);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
+}
+
freg: CONV_R8 (reg) {
- /* I found no direct way to move an integer register to
- * the floating point stack, so we need to store the register
- * to memory
- */
x86_push_reg (s->code, tree->left->reg1);
x86_fild_membase (s->code, X86_ESP, 0, FALSE);
- x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
}
-freg: CONV_R8 (lreg) {
- /* I found no direct way to move an integer register to
- * the floating point stack, so we need to store the register
- * to memory
- */
+freg: CONV_R_UN (reg) {
+ x86_push_imm (s->code, 0);
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_fild_membase (s->code, X86_ESP, 0, TRUE);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+}
+
+freg: CONV_R_UN (lreg) {
+ static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
+ guint8 *br [1];
+
+ /* load 64bit integer to FP stack */
+ x86_push_imm (s->code, 0);
x86_push_reg (s->code, tree->left->reg2);
x86_push_reg (s->code, tree->left->reg1);
x86_fild_membase (s->code, X86_ESP, 0, TRUE);
- x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
+ /* store as 80bit FP value */
+ x86_fst80_membase (s->code, X86_ESP, 0);
+
+ /* test if lreg is negative */
+ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2);
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_GEZ, 0, TRUE);
+
+ /* add correction constant mn */
+ x86_fld80_mem (s->code, mn);
+ x86_fld80_membase (s->code, X86_ESP, 0);
+ x86_fp_op_reg (s->code, X86_FADD, 1, TRUE);
+ x86_fst80_membase (s->code, X86_ESP, 0);
+ //x86_breakpoint (s->code);
+ x86_patch (br [0], s->code);
+
+ x86_fld80_membase (s->code, X86_ESP, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
}
-freg: CONV_R4 (reg) {
- /* I found no direct way to move an integer register to
- * the floating point stack, so we need to store the register
- * to memory
- */
+freg: CONV_R4 (lreg) {
+ x86_push_reg (s->code, tree->left->reg2);
x86_push_reg (s->code, tree->left->reg1);
- x86_fild_membase (s->code, X86_ESP, 0, FALSE);
- x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_fild_membase (s->code, X86_ESP, 0, TRUE);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+}
+
+freg: CONV_R8 (lreg) {
+ x86_push_reg (s->code, tree->left->reg2);
+ x86_push_reg (s->code, tree->left->reg1);
+ x86_fild_membase (s->code, X86_ESP, 0, TRUE);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
}
freg: CONST_R4 {
x86_fld (s->code, tree->data.p, TRUE);
}
-freg: LDIND_R4 (reg) {
- x86_fld_membase (s->code, tree->left->reg1, 0, FALSE);
+freg: LDIND_R4 (addr) {
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_fld (s->code, tree->left->data.ainfo.offset, FALSE);
+ break;
+
+ case AMBase:
+ x86_fld_membase (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, FALSE);
+ break;
+ case AMIndex:
+ x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, X86_NOBASEREG,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift);
+ x86_fld_membase (s->code, tree->left->data.ainfo.indexreg, 0, FALSE);
+ break;
+ case AMBaseIndex:
+ x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift);
+ x86_fld_membase (s->code, tree->left->data.ainfo.indexreg, 0, FALSE);
+ break;
+ }
}
-freg: LDIND_R8 (reg) {
- x86_fld_membase (s->code, tree->left->reg1, 0, TRUE);
+freg: LDIND_R8 (addr) {
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_fld (s->code, tree->left->data.ainfo.offset, TRUE);
+ break;
+
+ case AMBase:
+ x86_fld_membase (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, TRUE);
+ break;
+ case AMIndex:
+ x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, X86_NOBASEREG,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift);
+ x86_fld_membase (s->code, tree->left->data.ainfo.indexreg, 0, TRUE);
+ break;
+ case AMBaseIndex:
+ x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift);
+ x86_fld_membase (s->code, tree->left->data.ainfo.indexreg, 0, TRUE);
+ break;
+ }
}
+
freg: ADD (freg, freg) {
x86_fp_op_reg (s->code, X86_FADD, 1, TRUE);
}
x86_fp_op_reg (s->code, X86_FDIV, 1, TRUE);
}
-#freg: REM (freg, freg) {
-# this does not work, since it does not pop a value from the stack,
-# and we need to test if the instruction is ready
-# x86_fprem1 (s->code);
-#}
+freg: CKFINITE (freg) {
+ x86_push_reg (s->code, X86_EAX);
+ x86_fxam (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4100);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ x86_pop_reg (s->code, X86_EAX);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NE, FALSE, "ArithmeticException");
+}
+
+freg: REM (freg, freg) {
+ guint8 *l1, *l2;
+
+ /* we need to exchange ST(0) with ST(1) */
+ x86_fxch (s->code, 1);
+
+ /* this requires a loop, because fprem1 somtimes
+ * returns a partial remainder */
+ l1 = s->code;
+ x86_fprem1 (s->code);
+ x86_fnstsw (s->code);
+ x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x0400);
+ l2 = s->code + 2;
+ x86_branch8 (s->code, X86_CC_NE, l1 - l2, FALSE);
+
+ /* pop result */
+ x86_fstp (s->code, 1);
+}
freg: NEG (freg) {
x86_fchs (s->code);
stmt: POP (freg)
-stmt: STIND_R4 (ADDR_L, freg) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, tree->left->data.i).offset;
- x86_fst_membase (s->code, X86_EBP, offset, FALSE, TRUE);
-}
+stmt: STIND_R4 (addr, freg) {
-stmt: STIND_R4 (reg, freg) {
- x86_fst_membase (s->code, tree->left->reg1, 0, FALSE, TRUE);
-}
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_fst (s->code, tree->left->data.ainfo.offset, FALSE, TRUE);
+ break;
-stmt: STIND_R8 (ADDR_L, freg) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, tree->left->data.i).offset;
- x86_fst_membase (s->code, X86_EBP, offset, TRUE, TRUE);
+ case AMBase:
+ x86_fst_membase (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset,
+ FALSE, TRUE);
+ break;
+ case AMIndex:
+ x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, X86_NOBASEREG,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift);
+ x86_fst_membase (s->code, tree->left->data.ainfo.indexreg, 0, FALSE, TRUE);
+ break;
+ case AMBaseIndex:
+ x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift);
+ x86_fst_membase (s->code, tree->left->data.ainfo.indexreg, 0, FALSE, TRUE);
+ break;
+ }
}
-stmt: STIND_R8 (reg, freg) {
- x86_fst_membase (s->code, tree->left->reg1, 0, TRUE, TRUE);
+stmt: STIND_R8 (addr, freg) {
+
+ switch (tree->left->data.ainfo.amode) {
+
+ case AMImmediate:
+ x86_fst (s->code, tree->left->data.ainfo.offset, TRUE, TRUE);
+ break;
+
+ case AMBase:
+ x86_fst_membase (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset,
+ TRUE, TRUE);
+ break;
+ case AMIndex:
+ x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, X86_NOBASEREG,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift);
+ x86_fst_membase (s->code, tree->left->data.ainfo.indexreg, 0, TRUE, TRUE);
+ break;
+ case AMBaseIndex:
+ x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.basereg,
+ tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg,
+ tree->left->data.ainfo.shift);
+ x86_fst_membase (s->code, tree->left->data.ainfo.indexreg, 0, TRUE, TRUE);
+ break;
+ }
}
-stmt: ARG_R4 (freg) {
+stmt: REMOTE_STIND_R4 (reg, freg) {
+ guint8 *br[2];
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int offset;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ x86_mov_reg_membase (s->code, treg, lreg, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class));
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+
+ /* this is a transparent proxy - remote the call */
+
+ /* save value to stack */
x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
x86_fst_membase (s->code, X86_ESP, 0, FALSE, TRUE);
+
+ x86_push_reg (s->code, X86_ESP);
+ x86_push_imm (s->code, tree->data.fi.field);
+ x86_push_imm (s->code, tree->data.fi.klass);
+ x86_push_reg (s->code, lreg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20);
+
+ br [1] = s->code; x86_jump8 (s->code, 0);
+
+ x86_patch (br [0], s->code);
+ offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) :
+ tree->data.fi.field->offset;
+ x86_fst_membase (s->code, lreg, offset, FALSE, TRUE);
+
+ x86_patch (br [1], s->code);
}
-stmt: ARG_R8 (freg) {
+stmt: REMOTE_STIND_R8 (reg, freg) {
+ guint8 *br[2];
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int offset;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ x86_mov_reg_membase (s->code, treg, lreg, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class));
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+
+ /* this is a transparent proxy - remote the call */
+
+ /* save value to stack */
x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
x86_fst_membase (s->code, X86_ESP, 0, TRUE, TRUE);
-}
-stmt: BEQ (freg, freg) {
- gint32 offset;
+ x86_push_reg (s->code, X86_ESP);
+ x86_push_imm (s->code, tree->data.fi.field);
+ x86_push_imm (s->code, tree->data.fi.klass);
+ x86_push_reg (s->code, lreg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 24);
- tree->is_jump = TRUE;
- x86_fcompp (s->code);
- x86_fnstsw (s->code);
- x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
- x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_EQ, tree->data.bb->addr - offset, TRUE);
-}
+ br [1] = s->code; x86_jump8 (s->code, 0);
-stmt: BNE_UN (freg, freg) {
- gint32 offset;
+ x86_patch (br [0], s->code);
+ offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) :
+ tree->data.fi.field->offset;
+ x86_fst_membase (s->code, lreg, offset, TRUE, TRUE);
- tree->is_jump = TRUE;
- x86_fcompp (s->code);
- x86_fnstsw (s->code);
- x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
- x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_NE, tree->data.bb->addr - offset, FALSE);
+ x86_patch (br [1], s->code);
}
-stmt: BLT (freg, freg) {
- gint32 offset;
+stmt: ARG_R4 (freg) {
+ int pad = tree->data.arg_info.pad;
- tree->is_jump = TRUE;
- x86_fcompp (s->code);
- x86_fnstsw (s->code);
- x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_EQ, tree->data.bb->addr - offset, FALSE);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4 + pad);
+ x86_fst_membase (s->code, X86_ESP, 0, FALSE, TRUE);
}
-stmt: BLT_UN (freg, freg) {
- gint32 offset;
+stmt: ARG_R8 (freg) {
+ int pad = tree->data.arg_info.pad;
- tree->is_jump = TRUE;
- x86_fcompp (s->code);
- x86_fnstsw (s->code);
- x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_EQ, tree->data.bb->addr - offset, FALSE);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8 + pad);
+ x86_fst_membase (s->code, X86_ESP, 0, TRUE, TRUE);
}
-stmt: BGE_UN (freg, freg) {
- gint32 offset;
+# fixme: we need to implement unordered and ordered compares
+
+stmt: CBRANCH (COMPARE (freg, freg)) {
- tree->is_jump = TRUE;
x86_fcompp (s->code);
x86_fnstsw (s->code);
x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_NE, tree->data.bb->addr - offset, FALSE);
+
+ switch (tree->data.bi.cond) {
+ case CEE_BLT:
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_EQ, 0, FALSE);
+ break;
+ case CEE_BLT_UN:
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_EQ, 0, FALSE);
+ break;
+ case CEE_BGT:
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_EQ, 0, FALSE);
+ break;
+ case CEE_BGT_UN:
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_EQ, 0, FALSE);
+ break;
+ case CEE_BEQ:
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_EQ, 0, TRUE);
+ break;
+ case CEE_BNE_UN:
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+ break;
+ case CEE_BGE:
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+ break;
+ case CEE_BGE_UN:
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+ break;
+ case CEE_BLE:
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+ break;
+ case CEE_BLE_UN:
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target);
+ x86_branch32 (s->code, X86_CC_NE, 0, FALSE);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
}
-stmt: BGT_UN (freg, freg) {
- gint32 offset;
+freg: CALL_R8 (this, reg) {
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+
+ if (lreg == treg || rreg == treg)
+ treg = X86_EDX;
+ if (lreg == treg || rreg == treg)
+ treg = X86_ECX;
+ if (lreg == treg || rreg == treg)
+ mono_assert_not_reached ();
- tree->is_jump = TRUE;
- x86_fcompp (s->code);
- x86_fnstsw (s->code);
- x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
- x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_EQ, tree->data.bb->addr - offset, FALSE);
-}
+ X86_CALL_BEGIN;
-stmt: BLE_UN (freg, freg) {
- gint32 offset;
+ x86_call_reg (s->code, rreg);
- tree->is_jump = TRUE;
- x86_fcompp (s->code);
- x86_fnstsw (s->code);
- x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500);
- x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100);
- offset = 6 + s->code - s->start;
- x86_branch32 (s->code, X86_CC_NE, tree->data.bb->addr - offset, FALSE);
+ X86_CALL_END;
}
-freg: CALL_R8 (this, LDIND_REF (ADDR_G)) {
- MethodCallInfo *ci = tree->data.ci;
+freg: CALL_R8 (this, ADDR_G) {
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
- x86_call_mem (s->code, tree->right->left->data.p);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p);
+ x86_call_code (s->code, 0);
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
}
freg: CALL_R8 (this, INTF_ADDR) {
- MethodCallInfo *ci = tree->data.ci;
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_mov_reg_membase (s->code, lreg, lreg,
- G_STRUCT_OFFSET (MonoClass, interface_offsets), 4);
+ G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4);
x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
}
freg: CALL_R8 (this, VFUNC_ADDR) {
- MethodCallInfo *ci = tree->data.ci;
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- g_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (ci->vtype_num) {
- int offset = g_array_index (s->varinfo, MonoVarInfo, ci->vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_call_virtual (s->code, lreg,
- G_STRUCT_OFFSET (MonoClass, vtable) + (tree->right->data.m->slot << 2));
+ G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
- if (ci->args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, ci->args_size);
+ X86_CALL_END;
}
stmt: RET (freg) {
- gint32 offset;
-
if (!tree->last_instr) {
- tree->is_jump = TRUE;
- offset = 5 + s->code - s->start;
- x86_jump32 (s->code, s->epilog - offset);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL);
+ x86_jump32 (s->code, 0);
}
}
+freg: SIN (freg) {
+ x86_fsin (s->code);
+}
+
+freg: COS (freg) {
+ x86_fcos (s->code);
+}
+
+freg: SQRT (freg) {
+ x86_fsqrt (s->code);
+}
+
# support for value types
reg: LDIND_OBJ (reg) {
}
stmt: STIND_OBJ (reg, reg) {
- x86_push_reg (s->code, X86_EAX);
- x86_push_reg (s->code, X86_EDX);
- x86_push_reg (s->code, X86_ECX);
+ mono_assert (tree->data.i > 0);
- g_assert (tree->data.i > 0);
x86_push_imm (s->code, tree->data.i);
x86_push_reg (s->code, tree->right->reg1);
x86_push_reg (s->code, tree->left->reg1);
- x86_mov_reg_imm (s->code, X86_EAX, MEMCOPY);
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY);
+ x86_call_code (s->code, 0);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
+}
- x86_pop_reg (s->code, X86_ECX);
- x86_pop_reg (s->code, X86_EDX);
- x86_pop_reg (s->code, X86_EAX);
+stmt: REMOTE_STIND_OBJ (reg, reg) {
+ guint8 *br[2];
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+ int size, offset;
+
+ if (lreg == treg)
+ treg = X86_EDX;
+
+ if (rreg == treg)
+ treg = X86_ECX;
+
+ x86_mov_reg_membase (s->code, treg, lreg, 0, 4);
+ x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class));
+ br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
+
+ /* this is a transparent proxy - remote the call */
+
+ x86_push_reg (s->code, rreg);
+ x86_push_imm (s->code, tree->data.fi.field);
+ x86_push_imm (s->code, tree->data.fi.klass);
+ x86_push_reg (s->code, lreg);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16);
+
+ br [1] = s->code; x86_jump8 (s->code, 0);
+
+ x86_patch (br [0], s->code);
+
+ offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) :
+ tree->data.fi.field->offset;
+
+ size = mono_class_value_size (tree->data.fi.field->type->data.klass, NULL);
+ x86_push_imm (s->code, size);
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, offset);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
+
+ x86_patch (br [1], s->code);
}
stmt: ARG_OBJ (CONST_I4) {
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
x86_push_imm (s->code, tree->left->data.i);
}
stmt: ARG_OBJ (reg) {
- int size = tree->data.i;
+ int size = tree->data.arg_info.size;
+ int pad = tree->data.arg_info.pad;
int sa;
- g_assert (size > 0);
+ if (!size)
+ return;
- sa = size + 3;
- sa &= ~3;
+ sa = size + pad;
/* reserve space for the argument */
x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, sa);
x86_lea_membase (s->code, X86_EAX, X86_ESP, 5*4);
x86_push_reg (s->code, X86_EAX);
- x86_mov_reg_imm (s->code, X86_EAX, MEMCOPY);
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY);
+ x86_call_code (s->code, 0);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
x86_pop_reg (s->code, X86_ECX);
}
stmt: RET_OBJ (reg) {
- gint32 offset;
int size = tree->data.i;
x86_push_imm (s->code, size);
x86_push_reg (s->code, tree->left->reg1);
x86_push_membase (s->code, X86_EBP, 8);
- x86_mov_reg_imm (s->code, X86_EAX, MEMCOPY);
- x86_call_reg (s->code, X86_EAX);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY);
+ x86_call_code (s->code, 0);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
if (!tree->last_instr) {
- tree->is_jump = TRUE;
- offset = 5 + s->code - s->start;
- x86_jump32 (s->code, s->epilog - offset);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL);
+ x86_jump32 (s->code, 0);
}
}
}
guint64
-mono_llmult_ovf_un (gpointer *exc, guint32 al, guint32 ah, guint32 bl, gint32 bh)
+mono_llmult_ovf_un (gpointer *exc, guint32 al, guint32 ah, guint32 bl, guint32 bh)
{
guint64 res, t1;
if (t1 > 0xffffffff)
goto raise_exception;
- res += (guint32)t1;
+ res += ((guint64)t1) << 32;
*exc = NULL;
return res;
raise_exception:
- *exc = get_exception_overflow ();
+ *exc = mono_get_exception_overflow ();
+ return 0;
+}
+
+guint64
+mono_llmult_ovf (gpointer *exc, guint32 al, gint32 ah, guint32 bl, gint32 bh)
+{
+ gint64 res, t1;
+
+ // fixme: check for overflow
+
+ res = (gint64)al * (gint64)bl;
+
+ t1 = (gint64)ah * bl + al * (gint64)bh;
+
+ res += ((gint64)t1) << 32;
+
+ *exc = NULL;
+ return res;
+/*
+ raise_exception:
+ *exc = mono_get_exception_overflow ();
return 0;
+*/
}
gint64
return a % b;
}
-MBTree *
-mono_ctree_new (MonoMemPool *mp, int op, MBTree *left, MBTree *right)
+MonoArray*
+mono_array_new_wrapper (MonoClass *eclass, guint32 n)
{
- MBTree *t = mono_mempool_alloc0 (mp, sizeof (MBTree));
+ MonoDomain *domain = mono_domain_get ();
- t->op = op;
- t->left = left;
- t->right = right;
- t->reg1 = -1;
- t->reg2 = -1;
- t->reg3 = -1;
- t->svt = VAL_UNKNOWN;
- t->cli_addr = -1;
- return t;
+ return mono_array_new (domain, eclass, n);
}
-MBTree *
-mono_ctree_new_leaf (MonoMemPool *mp, int op)
+MonoObject *
+mono_object_new_wrapper (MonoClass *klass)
{
- return mono_ctree_new (mp, op, NULL, NULL);
+ MonoDomain *domain = mono_domain_get ();
+
+ return mono_object_new (domain, klass);
}
-gpointer
-arch_get_lmf_addr (void)
+MonoString*
+mono_ldstr_wrapper (MonoImage *image, guint32 ind)
{
- gpointer *lmf;
-
- if ((lmf = TlsGetValue (lmf_thread_id)))
- return lmf;
+ MonoDomain *domain = mono_domain_get ();
- lmf = g_malloc (sizeof (gpointer));
- *lmf = NULL;
+ return mono_ldstr (domain, image, ind);
+}
- TlsSetValue (lmf_thread_id, lmf);
+gpointer
+mono_ldsflda (MonoClass *klass, int offset)
+{
+ MonoDomain *domain = mono_domain_get ();
+ MonoVTable *vt;
+ gpointer addr;
+
+ vt = mono_class_vtable (domain, klass);
+ addr = (char*)(vt->data) + offset;
- return lmf;
+ return addr;
}
-
-#ifdef DEBUG
void *
-MEMCOPY (void *dest, const void *src, size_t n)
+debug_memcopy (void *dest, const void *src, size_t n)
{
int i, l = n;
return memcpy (dest, src, n);
}
-#endif
+
+void mono_emit_fast_iconv (MBCGEN_TYPE* s, MBTREE_TYPE* tree)
+{
+ guint8* br [3];
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 12);
+ x86_fist_membase (s->code, X86_ESP, 8, TRUE); // rounded value
+ x86_fst_membase (s->code, X86_ESP, 0, FALSE, FALSE); // float value
+ x86_fp_int_op_membase (s->code, X86_FSUB, X86_ESP, 8, TRUE);
+ x86_fst_membase (s->code, X86_ESP, 4, FALSE, TRUE); // diff
+
+ x86_pop_reg (s->code, tree->reg1); // float value
+ x86_test_reg_reg (s->code, tree->reg1, tree->reg1);
+ br[0] = s->code; x86_branch8 (s->code, X86_CC_S, 0, TRUE);
+
+ x86_pop_reg (s->code, tree->reg1); // diff
+ x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1);
+ x86_pop_reg (s->code, tree->reg1); // rounded value
+ x86_alu_reg_imm (s->code, X86_SBB, tree->reg1, 0);
+ br[1] = s->code; x86_jump8 (s->code, 0);
+
+ // freg is negative
+ x86_patch (br[0], s->code);
+
+ x86_pop_reg (s->code, tree->reg1); // diff
+ x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1);
+ x86_pop_reg (s->code, tree->reg1); // rounded value
+ br[2] = s->code; x86_branch8 (s->code, X86_CC_Z, 0, FALSE);
+ x86_alu_reg_imm (s->code, X86_SBB, tree->reg1, -1);
+ x86_patch (br[1], s->code);
+ x86_patch (br[2], s->code);
+}
+
+void mono_emit_fast_iconv_i8 (MBCGEN_TYPE* s, MBTREE_TYPE* tree)
+{
+ guint8* br [3];
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 16);
+ x86_fld_reg (s->code, 0);
+ x86_fist_pop_membase (s->code, X86_ESP, 8, TRUE); // rounded value (qword)
+ x86_fst_membase (s->code, X86_ESP, 0, FALSE, FALSE); // float value
+ x86_fild_membase (s->code, X86_ESP, 8, TRUE);
+ x86_fp_op_reg (s->code, X86_FSUB, 1, TRUE); // diff
+ x86_fst_membase (s->code, X86_ESP, 4, FALSE, TRUE); // diff
+
+ x86_pop_reg (s->code, tree->reg1); // float value
+ x86_test_reg_reg (s->code, tree->reg1, tree->reg1);
+ br[0] = s->code; x86_branch8 (s->code, X86_CC_S, 0, TRUE);
+
+ x86_pop_reg (s->code, tree->reg1); // diff
+ x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1);
+ x86_pop_reg (s->code, tree->reg1); // rounded value
+ x86_pop_reg (s->code, tree->reg2);
+ x86_alu_reg_imm (s->code, X86_SBB, tree->reg1, 0);
+ x86_alu_reg_imm (s->code, X86_SBB, tree->reg2, 0);
+ br[1] = s->code; x86_jump8 (s->code, 0);
+
+ // freg is negative
+ x86_patch (br[0], s->code);
+
+ x86_pop_reg (s->code, tree->reg1); // diff
+ x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1);
+ x86_pop_reg (s->code, tree->reg1); // rounded value
+ x86_pop_reg (s->code, tree->reg2);
+ br[2] = s->code; x86_branch8 (s->code, X86_CC_Z, 0, FALSE);
+ x86_alu_reg_imm (s->code, X86_SBB, tree->reg1, -1);
+ x86_alu_reg_imm (s->code, X86_SBB, tree->reg2, -1);
+ x86_patch (br[1], s->code);
+ x86_patch (br[2], s->code);
+}
+
+gpointer
+mono_ldvirtftn (MonoObject *this, int slot)
+{
+ MonoClass *class;
+ MonoMethod *m;
+ gpointer addr;
+ gboolean is_proxy = FALSE;
+ g_assert (this);
+
+ if ((class = this->vtable->klass) == mono_defaults.transparent_proxy_class) {
+ class = ((MonoTransparentProxy *)this)->klass;
+ is_proxy = TRUE;
+ }
+
+
+ g_assert (slot <= class->vtable_size);
+
+ m = class->vtable [slot];
+
+ if (is_proxy) {
+ return mono_jit_create_remoting_trampoline (m);
+ } else {
+ EnterCriticalSection (metadata_section);
+ addr = mono_compile_method (m);
+ LeaveCriticalSection (metadata_section);
+ return addr;
+ }
+}
+
+gpointer
+mono_ldintftn (MonoObject *this, int slot)
+{
+ MonoClass *class;
+ MonoMethod *m;
+ gpointer addr;
+ gboolean is_proxy = FALSE;
+ g_assert (this);
+
+ if ((class = this->vtable->klass) == mono_defaults.transparent_proxy_class) {
+ class = ((MonoTransparentProxy *)this)->klass;
+ is_proxy = TRUE;
+ }
+
+ g_assert (slot < class->interface_count);
+
+ slot = class->interface_offsets [slot];
+
+ m = class->vtable [slot];
+
+ if (is_proxy) {
+ return mono_jit_create_remoting_trampoline (m);
+ } else {
+ EnterCriticalSection (metadata_section);
+ addr = mono_compile_method (m);
+ LeaveCriticalSection (metadata_section);
+ return addr;
+ }
+}
+
+gpointer mono_ldftn (MonoMethod *method)
+{
+ gpointer addr;
+
+ EnterCriticalSection (metadata_section);
+ addr = mono_compile_method (method);
+ LeaveCriticalSection (metadata_section);
+
+ return addr;
+}