/* * x86.brg: X86 code generator * * Author: * Dietmar Maurer (dietmar@ximian.com) * * (C) 2001 Ximian, Inc. */ #include #include #include #include #include #ifndef PLATFORM_WIN32 #include #include #endif #include #include #include #include #include #include #include #include #include #include "regset.h" #include "jit.h" /* * Pull the list of opcodes */ #define OPDEF(a,b,c,d,e,f,g,h,i,j) \ a = i, enum { #include "mono/cil/opcode.def" LAST = 0xff }; #undef OPDEF /* alignment of activation frames */ #define MONO_FRAME_ALIGNMENT 4 void print_lmf (void); #define MBTREE_TYPE MBTree #define MBCGEN_TYPE MonoFlowGraph #define MBCOST_DATA MonoFlowGraph #define MBALLOC_STATE mono_mempool_alloc (data->mp, sizeof (MBState)) typedef enum { AMImmediate = 0, // ptr AMBase = 1, // V[REG] AMIndex = 2, // V[REG*X] AMBaseIndex = 3, // V[REG*X][REG] } X86AddMode; typedef struct { int offset; X86AddMode amode:2; unsigned int shift:2; gint8 basereg; gint8 indexreg; } X86AddressInfo; struct _MBTree { guint16 op; unsigned last_instr:1; unsigned spilled:1; MBTree *left, *right; gpointer state; gpointer emit; gint32 addr; gint32 cli_addr; gint8 reg1; gint8 reg2; gint8 reg3; MonoValueType svt; union { gint32 i; gint64 l; gpointer p; MonoMethod *m; MonoBBlock *bb; MonoClass *klass; MonoClassField *field; X86AddressInfo ainfo; MonoJitFieldInfo fi; MonoJitBranchInfo bi; MonoJitCallInfo call_info; MonoJitArgumentInfo arg_info; } data; }; gint64 mono_llmult (gint64 a, gint64 b); guint64 mono_llmult_ovf (gpointer *exc, guint32 al, gint32 ah, guint32 bl, gint32 bh); guint64 mono_llmult_ovf_un (gpointer *exc, guint32 al, guint32 ah, guint32 bl, guint32 bh); gint64 mono_lldiv (gint64 a, gint64 b); gint64 mono_llrem (gint64 a, gint64 b); guint64 mono_lldiv_un (guint64 a, guint64 b); guint64 mono_llrem_un (guint64 a, guint64 b); gpointer mono_ldsflda (MonoClass *klass, int offset); gpointer mono_ldvirtftn (MonoObject *this, int slot); gpointer mono_ldintftn (MonoObject *this, int slot); gpointer mono_ldftn (MonoMethod *method); void mono_emit_fast_iconv (MBCGEN_TYPE* s, MBTREE_TYPE* tree); void mono_emit_fast_iconv_i8 (MBCGEN_TYPE* s, MBTREE_TYPE* tree); void mono_emit_stack_alloc (MBCGEN_TYPE* s, MBTREE_TYPE* tree); void mono_emit_stack_alloc_const (MBCGEN_TYPE* s, MBTREE_TYPE* tree, int size); MonoArray* mono_array_new_wrapper (MonoClass *eclass, guint32 n); MonoObject * mono_object_new_wrapper (MonoClass *klass); MonoString* mono_ldstr_wrapper (MonoImage *image, guint32 ind); gpointer get_mono_object_isinst (void); #define MB_OPT_LEVEL 1 #if MB_OPT_LEVEL == 0 #define MB_USE_OPT1(c) 65535 #define MB_USE_OPT2(c) 65535 #endif #if MB_OPT_LEVEL == 1 #define MB_USE_OPT1(c) c #define MB_USE_OPT2(c) 65535 #endif #if MB_OPT_LEVEL >= 2 #define MB_USE_OPT1(c) c #define MB_USE_OPT2(c) c #endif //#define DEBUG #define REAL_PRINT_REG(text,reg) \ mono_assert (reg >= 0); \ x86_push_reg (s->code, X86_EAX); \ x86_push_reg (s->code, X86_EDX); \ x86_push_reg (s->code, X86_ECX); \ x86_push_reg (s->code, reg); \ x86_push_imm (s->code, reg); \ x86_push_imm (s->code, text " %d %p\n"); \ x86_mov_reg_imm (s->code, X86_EAX, printf); \ x86_call_reg (s->code, X86_EAX); \ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 3*4); \ x86_pop_reg (s->code, X86_ECX); \ x86_pop_reg (s->code, X86_EDX); \ x86_pop_reg (s->code, X86_EAX); void * debug_memcopy (void *dest, const void *src, size_t n); #ifdef DEBUG #define MEMCOPY debug_memcopy #define PRINT_REG(text,reg) REAL_PRINT_REG(text,reg) #else #define MEMCOPY memcpy #define PRINT_REG(x,y) #endif /* The call instruction for virtual functions must have a known * size (used by x86_magic_trampoline) */ #define x86_call_virtual(inst,basereg,disp) \ do { \ *(inst)++ = (unsigned char)0xff; \ x86_address_byte ((inst), 2, 2, (basereg)); \ x86_imm_emit32 ((inst), (disp)); \ } while (0) /* emit an exception if condition is fail */ #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \ do { \ gpointer t; \ x86_branch8 (s->code, cond, 10, signed); \ x86_push_imm (s->code, exc_name); \ t = arch_get_throw_exception_by_name (); \ mono_add_jump_info (s, s->code, \ MONO_JUMP_INFO_ABS, t); \ x86_call_code (s->code, 0); \ } while (0); #define X86_ARG_PAD(pad) do { \ if (pad) { \ if (pad == 4) \ x86_push_reg (s->code, X86_EAX); \ else \ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, pad); \ } \ } while (0) #define X86_CALL_END do { \ int size = tree->data.call_info.frame_size; \ if (size) \ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, size); \ } while (0) #define X86_CALL_BEGIN do { \ int pad = tree->data.call_info.pad; \ X86_ARG_PAD (pad); \ if (tree->left->op != MB_TERM_NOP) { \ mono_assert (lreg >= 0); \ x86_push_reg (s->code, lreg); \ x86_alu_membase_imm (s->code, X86_CMP, lreg, 0, 0); \ } \ if (tree->data.call_info.vtype_num) { \ int offset = VARINFO (s, tree->data.call_info.vtype_num).offset; \ x86_lea_membase (s->code, treg, X86_EBP, offset); \ x86_push_reg (s->code, treg); \ } \ } while (0) /* we use this macro to move one lreg to another - source and destination may overlap, but the register allocator has to make sure that ((d1 < d2) && (s1 < s2)) */ #define MOVE_LREG(d1,d2,s1,s2) \ do { \ g_assert ((d1 < d2) && (s1 < s2)); \ if ((d1) <= (s1)) { \ if ((d1) != (s1)) \ x86_mov_reg_reg (s->code, d1, s1, 4); \ if ((d2) != (s2)) \ x86_mov_reg_reg (s->code, d2, s2, 4); \ } else { \ if ((d2) != (s2)) \ x86_mov_reg_reg (s->code, d2, s2, 4); \ if ((d1) != (s1)) \ x86_mov_reg_reg (s->code, d1, s1, 4); \ } \ } while (0); %% # # terminal definitions # # constants %term CONST_I4 CONST_I8 CONST_R4 CONST_R8 %term LDIND_I1 LDIND_U1 LDIND_I2 LDIND_U2 LDIND_I4 LDIND_I8 LDIND_R4 LDIND_R8 LDIND_OBJ %term STIND_I1 STIND_I2 STIND_I4 STIND_I8 STIND_R4 STIND_R8 STIND_OBJ %term ADDR_L ADDR_G ARG_I4 ARG_I8 ARG_R4 ARG_R8 ARG_OBJ CALL_I4 CALL_I8 CALL_R8 CALL_VOID %term BREAK SWITCH BR RET_VOID RET RET_OBJ ENDFINALLY ENDFILTER JMP %term ADD ADD_OVF ADD_OVF_UN SUB SUB_OVF SUB_OVF_UN MUL MUL_OVF MUL_OVF_UN %term DIV DIV_UN REM REM_UN AND OR XOR SHL SHR SHR_UN NEG NOT CKFINITE %term COMPARE CBRANCH BRTRUE BRFALSE CSET %term CONV_I4 CONV_I1 CONV_I2 CONV_I8 CONV_U1 CONV_U2 CONV_U4 CONV_U8 CONV_R4 CONV_R8 CONV_R_UN %term INTF_ADDR VFUNC_ADDR NOP NEWARR NEWARR_SPEC NEWOBJ NEWOBJ_SPEC %term INITBLK CPBLK CPSRC POP INITOBJ LOCALLOC %term ISINST CASTCLASS UNBOX %term CONV_OVF_I1 CONV_OVF_U1 CONV_OVF_I2 CONV_OVF_U2 CONV_OVF_U4 CONV_OVF_U8 CONV_OVF_I4 %term CONV_OVF_I4_UN CONV_OVF_U1_UN CONV_OVF_U2_UN %term CONV_OVF_I2_UN CONV_OVF_I8_UN CONV_OVF_I1_UN %term EXCEPTION THROW RETHROW HANDLER CHECKTHIS RETHROW_ABORT %term LDLEN LDELEMA LDFTN LDVIRTFTN LDSTR LDSFLDA %term REMOTE_LDFLDA REMOTE_STIND_I1 REMOTE_STIND_I2 REMOTE_STIND_I4 %term REMOTE_STIND_I8 REMOTE_STIND_R4 REMOTE_STIND_R8 REMOTE_STIND_OBJ %term SIN COS SQRT %term FUNC1 PROC2 PROC3 FREE OBJADDR VTADDR # # we start at stmt # %start stmt # # tree definitions # # # x86 adressing mode # acon: CONST_I4 { tree->data.ainfo.offset = tree->data.i; tree->data.ainfo.amode = AMImmediate; } acon: ADDR_G { tree->data.ainfo.offset = tree->data.i; tree->data.ainfo.amode = AMImmediate; } acon: ADD (ADDR_G, CONST_I4) { tree->data.ainfo.offset = (unsigned)tree->left->data.p + tree->right->data.i; tree->data.ainfo.amode = AMImmediate; } base: acon base: reg { tree->data.ainfo.offset = 0; tree->data.ainfo.basereg = tree->reg1; tree->data.ainfo.amode = AMBase; } base: ADD (reg, CONST_I4) { tree->data.ainfo.offset = tree->right->data.i; tree->data.ainfo.basereg = tree->left->reg1; tree->data.ainfo.amode = AMBase; } base: ADDR_L { tree->data.ainfo.offset = VARINFO (s, tree->data.i).offset; tree->data.ainfo.basereg = X86_EBP; tree->data.ainfo.amode = AMBase; } cost { MBCOND (VARINFO (data, tree->data.i).reg < 0); return 0; } index: reg { tree->data.ainfo.offset = 0; tree->data.ainfo.indexreg = tree->reg1; tree->data.ainfo.shift = 0; tree->data.ainfo.amode = AMIndex; } index: SHL (reg, CONST_I4) { tree->data.ainfo.offset = 0; tree->data.ainfo.amode = AMIndex; tree->data.ainfo.indexreg = tree->left->reg1; tree->data.ainfo.shift = tree->right->data.i; } cost { MBCOND (tree->right->data.i == 0 || tree->right->data.i == 1 || tree->right->data.i == 2 || tree->right->data.i == 3); return 0; } index: MUL (reg, CONST_I4) { static int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 }; tree->data.ainfo.offset = 0; tree->data.ainfo.amode = AMIndex; tree->data.ainfo.indexreg = tree->left->reg1; tree->data.ainfo.shift = fast_log2 [tree->right->data.i]; } cost { MBCOND (tree->right->data.i == 1 || tree->right->data.i == 2 || tree->right->data.i == 4 || tree->right->data.i == 8); return 0; } addr: base addr: index addr: ADD (index, base) { tree->data.ainfo.offset = tree->right->data.ainfo.offset; tree->data.ainfo.basereg = tree->right->data.ainfo.basereg; tree->data.ainfo.amode = tree->left->data.ainfo.amode | tree->right->data.ainfo.amode; tree->data.ainfo.shift = tree->left->data.ainfo.shift; tree->data.ainfo.indexreg = tree->left->data.ainfo.indexreg; } # we pass exception in ECX to catch handler reg: EXCEPTION { int offset = VARINFO (s, tree->data.i).offset; if (tree->reg1 != X86_ECX) x86_mov_reg_reg (s->code, tree->reg1, X86_ECX, 4); /* store it so that we can RETHROW it later */ x86_mov_membase_reg (s->code, X86_EBP, offset, tree->reg1, 4); } stmt: THROW (reg) { gpointer target; x86_push_reg (s->code, tree->left->reg1); target = arch_get_throw_exception (); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, target); x86_call_code (s->code, target); } stmt: RETHROW { int offset = VARINFO (s, tree->data.i).offset; gpointer target; x86_push_membase (s->code, X86_EBP, offset); target = arch_get_throw_exception (); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, target); x86_call_code (s->code, target); } stmt: RETHROW_ABORT { guint8 *br; gpointer target; target = mono_thread_current; mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, target); x86_call_code (s->code, target); x86_mov_reg_membase (s->code, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoThread, abort_exc), 4); x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0); /* check for NULL */ br = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE); x86_push_reg (s->code, X86_EAX); target = arch_get_throw_exception (); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, target); x86_call_code (s->code, target); x86_patch (br, s->code); } stmt: HANDLER { /* save ESP (used by ENDFINALLY) */ x86_mov_membase_reg (s->code, X86_EBP, mono_exc_esp_offset, X86_ESP, 4); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb); x86_call_imm (s->code, 0); } stmt: ENDFINALLY { /* restore ESP - which can be modified when we allocate value types * in the finally handler */ x86_mov_reg_membase (s->code, X86_ESP, X86_EBP, mono_exc_esp_offset, 4); x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_ret (s->code); } stmt: ENDFILTER (reg) { /* restore ESP - which can be modified when we allocate value types * in the filter */ x86_mov_reg_membase (s->code, X86_ESP, X86_EBP, mono_exc_esp_offset, 4); x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); if (tree->left->reg1 != X86_EAX) x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); x86_ret (s->code); } stmt: STIND_I4 (ADDR_L, ADD (LDIND_I4 (ADDR_L), CONST_I4)) { int vn = tree->left->data.i; int treg = VARINFO (s, vn).reg; int offset = VARINFO (s, vn).offset; int data = tree->right->right->data.i; if (data == 1) { if (treg >= 0) x86_inc_reg (s->code, treg); else x86_inc_membase (s->code, X86_EBP, offset); } else { if (treg >= 0) x86_alu_reg_imm (s->code, X86_ADD, treg, data); else x86_alu_membase_imm (s->code, X86_ADD, X86_EBP, offset, data); } } cost { MBCOND (tree->right->left->left->data.i == tree->left->data.i); return 0; } stmt: STIND_I4 (ADDR_L, SUB (LDIND_I4 (ADDR_L), CONST_I4)) { int vn = tree->left->data.i; int treg = VARINFO (s, vn).reg; int offset = VARINFO (s, vn).offset; int data = tree->right->right->data.i; if (data == 1) { if (treg >= 0) x86_dec_reg (s->code, treg); else x86_dec_membase (s->code, X86_EBP, offset); } else { if (treg >= 0) x86_alu_reg_imm (s->code, X86_SUB, treg, data); else x86_alu_membase_imm (s->code, X86_SUB, X86_EBP, offset, data); } } cost { MBCOND (tree->right->left->left->data.i == tree->left->data.i); return 0; } stmt: STIND_I4 (ADDR_L, ADD (LDIND_I4 (ADDR_L), reg)) { int vn = tree->left->data.i; int treg = VARINFO (s, vn).reg; int sreg = tree->right->right->reg1; int offset = VARINFO (s, vn).offset; if (treg >= 0) x86_alu_reg_reg (s->code, X86_ADD, treg, sreg); else x86_alu_membase_reg (s->code, X86_ADD, X86_EBP, offset, sreg); } cost { MBCOND (tree->right->left->left->data.i == tree->left->data.i); return 0; } stmt: STIND_I4 (ADDR_L, LDIND_I4 (ADDR_L)) { int treg1 = VARINFO (s, tree->left->data.i).reg; int treg2 = VARINFO (s, tree->right->left->data.i).reg; int offset1 = VARINFO (s, tree->left->data.i).offset; int offset2 = VARINFO (s, tree->right->left->data.i).offset; //{static int cx= 0; printf ("CX %5d\n", cx++);} if (treg1 >= 0 && treg2 >= 0) { x86_mov_reg_reg (s->code, treg1, treg2, 4); return; } if (treg1 >= 0 && treg2 < 0) { x86_mov_reg_membase (s->code, treg1, X86_EBP, offset2, 4); return; } if (treg1 < 0 && treg2 >= 0) { x86_mov_membase_reg (s->code, X86_EBP, offset1, treg2, 4); return; } g_assert_not_reached (); } cost { MBCOND (VARINFO (data, tree->left->data.i).reg >= 0 || VARINFO (data, tree->right->left->data.i).reg >= 0); return 0; } stmt: STIND_I4 (addr, CONST_I4) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_mov_mem_imm (s->code, tree->left->data.ainfo.offset, tree->right->data.i, 4); break; case AMBase: x86_mov_membase_imm (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->right->data.i, 4); break; case AMIndex: x86_mov_memindex_imm (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->data.i, 4); break; case AMBaseIndex: x86_mov_memindex_imm (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->data.i, 4); break; } } stmt: STIND_I4 (addr, reg) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 4); break; case AMBase: x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->right->reg1, 4); break; case AMIndex: x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg1, 4); break; case AMBaseIndex: x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg1, 4); break; } } stmt: REMOTE_STIND_I4 (reg, reg) { guint8 *br[2]; int treg = X86_EAX; int lreg = tree->left->reg1; int rreg = tree->right->reg1; int offset; if (lreg == treg) treg = X86_EDX; if (rreg == treg) treg = X86_ECX; x86_mov_reg_membase (s->code, treg, lreg, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class)); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* this is a transparent proxy - remote the call */ /* save value to stack */ x86_push_reg (s->code, rreg); x86_push_reg (s->code, X86_ESP); x86_push_imm (s->code, tree->data.fi.field); x86_push_imm (s->code, tree->data.fi.klass); x86_push_reg (s->code, lreg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20); br [1] = s->code; x86_jump8 (s->code, 0); x86_patch (br [0], s->code); offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) : tree->data.fi.field->offset; x86_mov_membase_reg (s->code, lreg, offset, rreg, 4); x86_patch (br [1], s->code); } stmt: STIND_I1 (addr, reg) { PRINT_REG ("STIND_I1", tree->right->reg1); switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 1); break; case AMBase: x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->right->reg1, 1); break; case AMIndex: x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg1, 1); break; case AMBaseIndex: x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg1, 1); break; } } stmt: REMOTE_STIND_I1 (reg, reg) { guint8 *br[2]; int treg = X86_EAX; int lreg = tree->left->reg1; int rreg = tree->right->reg1; int offset; if (lreg == treg) treg = X86_EDX; if (rreg == treg) treg = X86_ECX; x86_mov_reg_membase (s->code, treg, lreg, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class)); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* this is a transparent proxy - remote the call */ /* save value to stack */ x86_push_reg (s->code, rreg); x86_push_reg (s->code, X86_ESP); x86_push_imm (s->code, tree->data.fi.field); x86_push_imm (s->code, tree->data.fi.klass); x86_push_reg (s->code, lreg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20); br [1] = s->code; x86_jump8 (s->code, 0); x86_patch (br [0], s->code); offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) : tree->data.fi.field->offset; x86_mov_membase_reg (s->code, lreg, offset, rreg, 1); x86_patch (br [1], s->code); } stmt: STIND_I2 (addr, reg) { PRINT_REG ("STIND_I2", tree->right->reg1); switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 2); break; case AMBase: x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->right->reg1, 2); break; case AMIndex: x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg1, 2); break; case AMBaseIndex: x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg1, 2); break; } } stmt: REMOTE_STIND_I2 (reg, reg) { guint8 *br[2]; int treg = X86_EAX; int lreg = tree->left->reg1; int rreg = tree->right->reg1; int offset; if (lreg == treg) treg = X86_EDX; if (rreg == treg) treg = X86_ECX; x86_mov_reg_membase (s->code, treg, lreg, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class)); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* this is a transparent proxy - remote the call */ /* save value to stack */ x86_push_reg (s->code, rreg); x86_push_reg (s->code, X86_ESP); x86_push_imm (s->code, tree->data.fi.field); x86_push_imm (s->code, tree->data.fi.klass); x86_push_reg (s->code, lreg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20); br [1] = s->code; x86_jump8 (s->code, 0); x86_patch (br [0], s->code); offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) : tree->data.fi.field->offset; x86_mov_membase_reg (s->code, lreg, offset, rreg, 2); x86_patch (br [1], s->code); } reg: LDIND_I4 (ADDR_L) { int treg = VARINFO (s, tree->left->data.i).reg; if (treg != tree->reg1) x86_mov_reg_reg (s->code, tree->reg1, treg, 4); } cost { MBCOND ((VARINFO (data, tree->left->data.i).reg >= 0)); return 0; } stmt: STIND_I4 (ADDR_L, CONST_I4) { int treg = VARINFO (s, tree->left->data.i).reg; x86_mov_reg_imm (s->code, treg, tree->right->data.i); } cost { MBCOND ((VARINFO (data, tree->left->data.i).reg >= 0)); return 0; } stmt: STIND_I4 (ADDR_L, LDIND_I4 (ADDR_L)) { int treg = VARINFO (s, tree->left->data.i).reg; int offset = VARINFO (s, tree->right->left->data.i).offset; x86_mov_reg_membase (s->code, treg, X86_EBP, offset, 4); } cost { MBCOND ((VARINFO (data, tree->left->data.i).reg >= 0)); MBCOND ((VARINFO (data, tree->right->left->data.i).reg < 0)); return 0; } stmt: STIND_I4 (ADDR_L, reg) { int treg = VARINFO (s, tree->left->data.i).reg; if (treg != tree->right->reg1) x86_mov_reg_reg (s->code, treg, tree->right->reg1, 4); } cost { MBCOND ((VARINFO (data, tree->left->data.i).reg >= 0)); return 0; } reg: LDIND_I4 (addr) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_mov_reg_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, 4); break; case AMBase: x86_mov_reg_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, 4); break; case AMIndex: x86_mov_reg_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, 4); break; case AMBaseIndex: x86_mov_reg_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, 4); break; } PRINT_REG ("LDIND_I4", tree->reg1); } reg: LDIND_I1 (addr) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_widen_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, TRUE, FALSE); break; case AMBase: x86_widen_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, TRUE, FALSE); break; case AMIndex: x86_widen_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, TRUE, FALSE); break; case AMBaseIndex: x86_widen_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, TRUE, FALSE); break; } PRINT_REG ("LDIND_I1", tree->reg1); } reg: LDIND_U1 (addr) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_widen_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, FALSE, FALSE); break; case AMBase: x86_widen_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, FALSE, FALSE); break; case AMIndex: x86_widen_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, FALSE, FALSE); break; case AMBaseIndex: x86_widen_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, FALSE, FALSE); break; } PRINT_REG ("LDIND_U1", tree->reg1); } reg: LDIND_I2 (addr) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_widen_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, TRUE, TRUE); break; case AMBase: x86_widen_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, TRUE, TRUE); break; case AMIndex: x86_widen_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, TRUE, TRUE); break; case AMBaseIndex: x86_widen_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, TRUE, TRUE); break; } PRINT_REG ("LDIND_U2", tree->reg1); } reg: LDIND_U2 (addr) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_widen_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, FALSE, TRUE); break; case AMBase: x86_widen_membase (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, FALSE, TRUE); break; case AMIndex: x86_widen_memindex (s->code, tree->reg1, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, FALSE, TRUE); break; case AMBaseIndex: x86_widen_memindex (s->code, tree->reg1, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, FALSE, TRUE); break; } PRINT_REG ("LDIND_U2", tree->reg1); } reg: REMOTE_LDFLDA (reg) { guint8 *br[2]; int treg = X86_EAX; int lreg = tree->left->reg1; if (lreg == X86_EAX) treg = X86_EDX; if (tree->reg1 != treg) x86_push_reg (s->code, treg); x86_mov_reg_membase (s->code, treg, lreg, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class)); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* this is a transparent proxy - remote the call */ if (treg != X86_EAX) x86_push_reg (s->code, X86_EAX); if (treg != X86_EDX) x86_push_reg (s->code, X86_EDX); x86_push_reg (s->code, X86_ECX); x86_push_imm (s->code, 0); x86_push_imm (s->code, tree->data.fi.field); x86_push_imm (s->code, tree->data.fi.klass); x86_push_reg (s->code, lreg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_load_remote_field); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16); if (treg != X86_EAX) x86_mov_reg_reg (s->code, treg, X86_EAX, 4); x86_pop_reg (s->code, X86_ECX); if (treg != X86_EDX) x86_pop_reg (s->code, X86_EDX); if (treg != X86_EAX) x86_pop_reg (s->code, X86_EAX); x86_mov_reg_reg (s->code, tree->reg1, treg, 4); br [1] = s->code; x86_jump8 (s->code, 0); x86_patch (br [0], s->code); if (tree->data.fi.klass->valuetype) x86_lea_membase (s->code, tree->reg1, lreg, tree->data.fi.field->offset - sizeof (MonoObject)); else x86_lea_membase (s->code, tree->reg1, lreg, tree->data.fi.field->offset); x86_patch (br [1], s->code); if (tree->reg1 != treg) x86_pop_reg (s->code, treg); } reg: ADDR_L { int offset = VARINFO (s, tree->data.i).offset; x86_lea_membase (s->code, tree->reg1, X86_EBP, offset); PRINT_REG ("ADDR_L", tree->reg1); } cost { MBCOND (VARINFO (data, tree->data.i).reg < 0); return 5; } reg: ADDR_G 5 { x86_mov_reg_imm (s->code, tree->reg1, tree->data.p); } reg: CONV_I1 (reg) { x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, FALSE); } reg: CONV_U1 (reg) { x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE); } reg: CONV_I2 (reg) { x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, TRUE); } reg: CONV_U2 (reg) { x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE); } reg: CONST_I4 1 { x86_mov_reg_imm (s->code, tree->reg1, tree->data.i); } reg: CONV_I4 (reg) { if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); PRINT_REG ("CONV_I4", tree->left->reg1); } reg: CONV_U4 (reg) { if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); PRINT_REG ("CONV_U4", tree->left->reg1); } reg: CONV_OVF_I4 (reg) { if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); PRINT_REG ("CONV_OVF_I4", tree->left->reg1); } reg: CONV_OVF_U4 (reg) { /* Keep in sync with CONV_OVF_I4_UN below, they are the same on 32-bit machines */ x86_test_reg_imm (s->code, tree->left->reg1, 0x8000000); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: CONV_OVF_I4_UN (reg) { /* Keep in sync with CONV_OVF_U4 above, they are the same on 32-bit machines */ x86_test_reg_imm (s->code, tree->left->reg1, 0x8000000); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: CONV_OVF_I1 (reg) { /* probe value to be within -128 to 127 */ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 127); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, TRUE, "OverflowException"); x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, -128); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, TRUE, "OverflowException"); x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, FALSE); } reg: CONV_OVF_I1_UN (reg) { /* probe values between 0 to 128 */ x86_test_reg_imm (s->code, tree->left->reg1, 0xffffff80); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE); } reg: CONV_OVF_U1 (reg) { /* Keep in sync with CONV_OVF_U1_UN routine below, they are the same on 32-bit machines */ /* probe value to be within 0 to 255 */ x86_test_reg_imm (s->code, tree->left->reg1, 0xffffff00); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE); } reg: CONV_OVF_U1_UN (reg) { /* Keep in sync with CONV_OVF_U1 routine above, they are the same on 32-bit machines */ /* probe value to be within 0 to 255 */ x86_test_reg_imm (s->code, tree->left->reg1, 0xffffff00); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE); } reg: CONV_OVF_I2 (reg) { /* Probe value to be within -32768 and 32767 */ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 32767); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, TRUE, "OverflowException"); x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, -32768); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GE, TRUE, "OverflowException"); x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, TRUE); } reg: CONV_OVF_U2 (reg) { /* Keep in sync with CONV_OVF_U2_UN below, they are the same on 32-bit machines */ /* Probe value to be within 0 and 65535 */ x86_test_reg_imm (s->code, tree->left->reg1, 0xffff0000); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException"); x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE); } reg: CONV_OVF_U2_UN (reg) { /* Keep in sync with CONV_OVF_U2 above, they are the same on 32-bit machines */ /* Probe value to be within 0 and 65535 */ x86_test_reg_imm (s->code, tree->left->reg1, 0xffff0000); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE); } reg: CONV_OVF_I2_UN (reg) { /* Convert uint value into short, value within 0 and 32767 */ x86_test_reg_imm (s->code, tree->left->reg1, 0xffff8000); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE); } reg: MUL (reg, CONST_I4) "MB_USE_OPT1(0)" { unsigned int i, j, k, v; v = tree->right->data.i; for (i = 0, j = 1, k = 0xfffffffe; i < 32; i++, j = j << 1, k = k << 1) { if (v & j) break; } if (v < 0 || i == 32 || v & k) { switch (v) { case 3: /* LEA r1, [r2 + r2*2] */ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 1); break; case 5: /* LEA r1, [r2 + r2*4] */ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 2); break; case 6: /* LEA r1, [r2 + r2*2] */ /* ADD r1, r1 */ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 1); x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1); break; case 9: /* LEA r1, [r2 + r2*8] */ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 3); break; case 10: /* LEA r1, [r2 + r2*4] */ /* ADD r1, r1 */ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 2); x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1); break; case 12: /* LEA r1, [r2 + r2*2] */ /* SHL r1, 2 */ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 1); x86_shift_reg_imm (s->code, X86_SHL, tree->reg1, 2); break; case 25: /* LEA r1, [r2 + r2*4] */ /* LEA r1, [r1 + r1*4] */ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 2); x86_lea_memindex (s->code, tree->reg1, tree->reg1, 0, tree->reg1, 2); break; case 100: /* LEA r1, [r2 + r2*4] */ /* SHL r1, 2 */ /* LEA r1, [r1 + r1*4] */ x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, 0, tree->left->reg1, 2); x86_shift_reg_imm (s->code, X86_SHL, tree->reg1, 2); x86_lea_memindex (s->code, tree->reg1, tree->reg1, 0, tree->reg1, 2); break; default: x86_imul_reg_reg_imm (s->code, tree->reg1, tree->left->reg1, tree->right->data.i); break; } } else { x86_shift_reg_imm (s->code, X86_SHL, tree->left->reg1, i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } } reg: MUL (reg, reg) { x86_imul_reg_reg (s->code, tree->left->reg1, tree->right->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: MUL_OVF (reg, reg) { x86_imul_reg_reg (s->code, tree->left->reg1, tree->right->reg1); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: MUL_OVF_UN (reg, reg) { mono_assert (tree->right->reg1 != X86_EAX); if (tree->left->reg1 != X86_EAX) x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); x86_mul_reg (s->code, tree->right->reg1, FALSE); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException"); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } reg: DIV (reg, CONST_I4) { unsigned int i, j, k, v; v = tree->right->data.i; for (i = 0, j = 1, k = 0xfffffffe; i < 32; i++, j = j << 1, k = k << 1) { if (v & j) break; } x86_shift_reg_imm (s->code, X86_SAR, tree->left->reg1, i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } cost { unsigned int i, j, k, v; if (v < 0) return MBMAXCOST; v = tree->right->data.i; for (i = 0, j = 1, k = 0xfffffffe; i < 32; i++, j = j << 1, k = k << 1) { if (v & j) break; } if (i == 32 || v & k) return MBMAXCOST; return 0; } reg: DIV (reg, reg) { mono_assert (tree->right->reg1 != X86_EAX); if (tree->left->reg1 != X86_EAX) x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); x86_cdq (s->code); x86_div_reg (s->code, tree->right->reg1, TRUE); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } reg: DIV_UN (reg, CONST_I4) { unsigned int i, j, k, v; double f, r; v = tree->right->data.i; for (i = 0, j = 1, k = 0xfffffffe; i < 32; i++, j = j << 1, k = k << 1) { if (v & j) break; } if (i == 32 || v & k) { for (i = 32, j = 0x80000000; --i >= 0; j >>= 1) { if (v & j) break; } /* k = 32 + number of significant bits in v - 1 */ k = 32 + i; f = 1.0f / v; for (i = 0; i < k; i++) f *= 2.0f; r = f - floor(f); if (r == 0) { x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg1, k - 32); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } else if (r < 0.5f) { if (tree->left->reg1 != X86_EAX) x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); x86_mov_reg_imm (s->code, X86_EDX, (guint32) floor(f)); /* x86_inc_reg (s->code, X86_EAX); */ /* INC is faster but we have to check for overflow. */ x86_alu_reg_imm (s->code, X86_ADD, X86_EAX, 1); x86_branch8(s->code, X86_CC_C, 2, FALSE); x86_mul_reg (s->code, X86_EDX, FALSE); x86_shift_reg_imm (s->code, X86_SHR, X86_EDX, k - 32); if (tree->reg1 != X86_EDX) x86_mov_reg_reg (s->code, tree->reg1, X86_EDX, 4); } else { if (tree->left->reg1 != X86_EAX) x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); x86_mov_reg_imm (s->code, X86_EDX, (guint32) ceil(f)); x86_mul_reg (s->code, X86_EDX, FALSE); x86_shift_reg_imm (s->code, X86_SHR, X86_EDX, k - 32); if (tree->reg1 != X86_EDX) x86_mov_reg_reg (s->code, tree->reg1, X86_EDX, 4); } } else { x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg1, i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } } reg: DIV_UN (reg, reg) { mono_assert (tree->right->reg1 != X86_EAX); if (tree->left->reg1 != X86_EAX) x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); x86_mov_reg_imm (s->code, X86_EDX, 0); x86_div_reg (s->code, tree->right->reg1, FALSE); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } reg: REM (reg, reg) { mono_assert (tree->right->reg1 != X86_EAX); mono_assert (tree->right->reg1 != X86_EDX); if (tree->left->reg1 != X86_EAX) x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); /* sign extend to 64bit in EAX/EDX */ x86_cdq (s->code); x86_div_reg (s->code, tree->right->reg1, TRUE); x86_mov_reg_reg (s->code, X86_EAX, X86_EDX, 4); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } reg: REM_UN (reg, reg) { mono_assert (tree->right->reg1 != X86_EAX); mono_assert (tree->right->reg1 != X86_EDX); if (tree->left->reg1 != X86_EAX) x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); /* zero extend to 64bit in EAX/EDX */ x86_mov_reg_imm (s->code, X86_EDX, 0); x86_div_reg (s->code, tree->right->reg1, FALSE); x86_mov_reg_reg (s->code, X86_EAX, X86_EDX, 4); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } reg: ADD (reg, CONST_I4) "MB_USE_OPT1(0)" { if (tree->right->data.i == 1) x86_inc_reg (s->code, tree->left->reg1); else x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: ADD (reg, LDIND_I4 (ADDR_L)) { int treg = VARINFO (s, tree->right->left->data.i).reg; x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, treg); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } cost { MBCOND ((VARINFO (data, tree->right->left->data.i).reg >= 0)); return 0; } reg: ADD (reg, reg) { x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: ADD_OVF (reg, reg) { x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: ADD_OVF_UN (reg, reg) { x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: SUB (reg, CONST_I4) "MB_USE_OPT1(0)" { if (tree->right->data.i == 1) x86_dec_reg (s->code, tree->left->reg1); else x86_alu_reg_imm (s->code, X86_SUB, tree->left->reg1, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: SUB (reg, LDIND_I4 (ADDR_L)) { int treg = VARINFO (s, tree->right->left->data.i).reg; x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, treg); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } cost { MBCOND ((VARINFO (data, tree->right->left->data.i).reg >= 0)); return 0; } reg: SUB (reg, reg) { x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: SUB_OVF (reg, reg) { x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: SUB_OVF_UN (reg, reg) { x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: CSET (cflags) { switch (tree->data.i) { case CEE_CEQ: x86_set_reg (s->code, X86_CC_EQ, tree->reg1, TRUE); break; case CEE_CGT: x86_set_reg (s->code, X86_CC_GT, tree->reg1, TRUE); break; case CEE_CGT_UN: x86_set_reg (s->code, X86_CC_GT, tree->reg1, FALSE); break; case CEE_CLT: x86_set_reg (s->code, X86_CC_LT, tree->reg1, TRUE); break; case CEE_CLT_UN: x86_set_reg (s->code, X86_CC_LT, tree->reg1, FALSE); break; default: g_assert_not_reached (); } x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE); } reg: AND (reg, CONST_I4) "MB_USE_OPT1(0)" { x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: AND (reg, reg) { x86_alu_reg_reg (s->code, X86_AND, tree->left->reg1, tree->right->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: OR (reg, CONST_I4) "MB_USE_OPT1(0)" { x86_alu_reg_imm (s->code, X86_OR, tree->left->reg1, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: OR (reg, reg) { x86_alu_reg_reg (s->code, X86_OR, tree->left->reg1, tree->right->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: XOR (reg, CONST_I4) "MB_USE_OPT1(0)" { x86_alu_reg_imm (s->code, X86_XOR, tree->left->reg1, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: XOR (reg, reg) { x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg1, tree->right->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: NEG (reg) { x86_neg_reg (s->code, tree->left->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: NOT (reg) { x86_not_reg (s->code, tree->left->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: SHL (reg, CONST_I4) { x86_shift_reg_imm (s->code, X86_SHL, tree->left->reg1, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: SHL (reg, reg) { if (tree->right->reg1 != X86_ECX) { x86_push_reg (s->code, X86_ECX); x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4); } x86_shift_reg (s->code, X86_SHL, tree->left->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); if (tree->right->reg1 != X86_ECX) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX); } reg: SHR (reg, CONST_I4) { x86_shift_reg_imm (s->code, X86_SAR, tree->left->reg1, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: SHR (reg, reg) { if (tree->right->reg1 != X86_ECX) { x86_push_reg (s->code, X86_ECX); x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4); } x86_shift_reg (s->code, X86_SAR, tree->left->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); if (tree->right->reg1 != X86_ECX) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX); } reg: SHR_UN (reg, CONST_I4) { x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg1, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: SHR_UN (reg, reg) { if (tree->right->reg1 != X86_ECX) { x86_push_reg (s->code, X86_ECX); x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4); } x86_shift_reg (s->code, X86_SHR, tree->left->reg1); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); if (tree->right->reg1 != X86_ECX) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX); } reg: LDSFLDA (CONST_I4) { if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_imm (s->code, tree->left->data.i); x86_push_imm (s->code, tree->data.klass); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldsflda); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } } # array support reg: LDLEN (reg) { x86_mov_reg_membase (s->code, tree->reg1, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length), 4); } reg: LDELEMA (reg, CONST_I4) { int ind; if (mono_jit_boundcheck){ x86_alu_membase_imm (s->code, X86_CMP, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length), tree->right->data.i); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "IndexOutOfRangeException"); } ind = tree->data.i * tree->right->data.i + G_STRUCT_OFFSET (MonoArray, vector); x86_lea_membase (s->code, tree->reg1, tree->left->reg1, ind); } reg: LDELEMA (reg, reg) { if (mono_jit_boundcheck){ x86_alu_reg_membase (s->code, X86_CMP, tree->right->reg1, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length)); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, FALSE, "IndexOutOfRangeException"); } if (tree->data.i == 1 || tree->data.i == 2 || tree->data.i == 4 || tree->data.i == 8) { static int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 }; x86_lea_memindex (s->code, tree->reg1, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, vector), tree->right->reg1, fast_log2 [tree->data.i]); } else { x86_imul_reg_reg_imm (s->code, tree->right->reg1, tree->right->reg1, tree->data.i); x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->right->reg1); x86_alu_reg_imm (s->code, X86_ADD, tree->reg1, G_STRUCT_OFFSET (MonoArray, vector)); } } reg: LDSTR { if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_imm (s->code, tree->data.p); x86_push_imm (s->code, s->method->klass->image); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldstr_wrapper); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } PRINT_REG ("LDSTR", tree->reg1); } reg: NEWARR (reg) { if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_reg (s->code, tree->left->reg1); x86_push_imm (s->code, tree->data.p); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_array_new_wrapper); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } PRINT_REG ("NEWARR", tree->reg1); } reg: NEWARR_SPEC (reg) { if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_reg (s->code, tree->left->reg1); x86_push_imm (s->code, tree->data.p); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_array_new_specific); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } PRINT_REG ("NEWARR_SPEC", tree->reg1); } reg: NEWOBJ { if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_imm (s->code, tree->data.klass); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_object_new_wrapper); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } PRINT_REG ("NEWOBJ", tree->reg1); } reg: NEWOBJ_SPEC { if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_imm (s->code, tree->data.p); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_object_new_specific); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } PRINT_REG ("NEWOBJ_SPEC", tree->reg1); } reg: OBJADDR (reg) { if (tree->left->reg1 != tree->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: VTADDR (ADDR_L) { int offset = VARINFO (s, tree->left->data.i).offset; x86_lea_membase (s->code, tree->reg1, X86_EBP, offset); } stmt: FREE (reg) { x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, g_free); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } stmt: PROC2 (reg, reg) { x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); } stmt: PROC3 (reg, CPSRC (reg, reg)) { x86_push_reg (s->code, tree->right->right->reg1); x86_push_reg (s->code, tree->right->left->reg1); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12); } reg: FUNC1 (reg) { if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer)); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } } reg: LOCALLOC (CONST_I4) { int size; int offset; size = (tree->left->data.i + (MONO_FRAME_ALIGNMENT - 1)) & ~(MONO_FRAME_ALIGNMENT - 1); // align to MONO_FRAME_ALIGNMENT boundary offset = 0; if (size) { mono_emit_stack_alloc_const (s, tree, size); if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI) { x86_push_reg (s->code, X86_EDI); offset += 4; } if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX) { x86_push_reg (s->code, X86_EAX); offset += 4; } if (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX) { x86_push_reg (s->code, X86_ECX); offset += 4; } x86_mov_reg_imm (s->code, X86_ECX, size >> 2); x86_alu_reg_reg (s->code, X86_SUB, X86_EAX, X86_EAX); x86_lea_membase (s->code, X86_EDI, X86_ESP, offset); x86_cld (s->code); x86_prefix (s->code, X86_REP_PREFIX); x86_stosd (s->code); if (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX) x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX) x86_pop_reg (s->code, X86_EAX); if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI) x86_pop_reg (s->code, X86_EDI); } x86_mov_reg_reg (s->code, tree->reg1, X86_ESP, 4); } reg: LOCALLOC (reg) { int offset = 0; /* size must be aligned to MONO_FRAME_ALIGNMENT bytes */ x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, MONO_FRAME_ALIGNMENT - 1); x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, ~(MONO_FRAME_ALIGNMENT - 1)); /* allocate space on stack */ mono_emit_stack_alloc (s, tree); if (tree->data.i) { /* initialize with zero */ if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX) { x86_push_reg (s->code, X86_EAX); offset += 4; } if (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX) { x86_push_reg (s->code, X86_ECX); offset += 4; } if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI) { x86_push_reg (s->code, X86_EDI); offset += 4; } x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg1, 2); if (tree->left->reg1 != X86_ECX) x86_mov_reg_imm (s->code, X86_ECX, tree->left->reg1); x86_alu_reg_reg (s->code, X86_XOR, X86_EAX, X86_EAX); x86_lea_membase (s->code, X86_EDI, X86_ESP, offset); x86_cld (s->code); x86_prefix (s->code, X86_REP_PREFIX); x86_stosl (s->code); if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI) x86_pop_reg (s->code, X86_EDI); if (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX) x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX) x86_pop_reg (s->code, X86_EAX); } x86_mov_reg_reg (s->code, tree->reg1, X86_ESP, 4); } reg: UNBOX (reg) { if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); x86_push_reg (s->code, tree->reg1); x86_mov_reg_membase (s->code, tree->reg1, tree->reg1, 0, 4); x86_mov_reg_membase (s->code, tree->reg1, tree->reg1, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, tree->reg1, G_STRUCT_OFFSET (MonoClass, element_class), ((int)(tree->data.klass->element_class))); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "InvalidCastException"); x86_pop_reg (s->code, tree->reg1); x86_alu_reg_imm (s->code, X86_ADD, tree->reg1, sizeof (MonoObject)); } reg: CASTCLASS (reg) { MonoClass *klass = tree->data.klass; guint8 *br [2]; int lreg = tree->left->reg1; x86_push_reg (s->code, lreg); x86_test_reg_reg (s->code, lreg, lreg); br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE); if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) { /* lreg = obj->vtable */ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoVTable, max_interface_id), klass->interface_id); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GE, FALSE, "InvalidCastException"); /* lreg = obj->vtable->interface_offsets */ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4); x86_alu_membase_imm (s->code, X86_CMP, lreg, klass->interface_id << 2, 0); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NE, FALSE, "InvalidCastException"); } else { /* lreg = obj->vtable */ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); /* lreg = obj->vtable->klass */ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); if (klass->rank) { x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoClass, rank), klass->rank); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "InvalidCastException"); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, cast_class), 4); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4); x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->cast_class->baseval); x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->cast_class->diffval); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, FALSE, "InvalidCastException"); } else { if (klass->marshalbyref) { /* check for transparent_proxy */ x86_alu_reg_imm (s->code, X86_CMP, lreg, (int)mono_defaults.transparent_proxy_class); br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* lreg = obj */ x86_mov_reg_membase (s->code, lreg, X86_ESP, 0, 4); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoTransparentProxy, klass), 4); x86_patch (br [1], s->code); } x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4); x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->baseval); x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->diffval); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, FALSE, "InvalidCastException"); } } x86_patch (br [0], s->code); x86_pop_reg (s->code, tree->reg1); } reg: ISINST (reg) { MonoClass *klass = tree->data.klass; guint8 *br [3]; int lreg = tree->left->reg1; x86_push_reg (s->code, lreg); x86_test_reg_reg (s->code, lreg, lreg); br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE); if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) { /* lreg = obj->vtable */ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoVTable, max_interface_id), klass->interface_id); br [1] = s->code; x86_branch8 (s->code, X86_CC_LT, 0, FALSE); /* lreg = obj->vtable->interface_offsets */ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4); x86_alu_membase_imm (s->code, X86_CMP, lreg, klass->interface_id << 2, 0); br [2] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_patch (br [1], s->code); x86_mov_membase_imm (s->code, X86_ESP, 0, 0, 4); x86_patch (br [2], s->code); } else { /* lreg = obj->vtable */ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); /* lreg = obj->vtable->klass */ x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); if (klass->rank) { x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoClass, rank), klass->rank); br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, cast_class), 4); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4); x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->cast_class->baseval); x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->cast_class->diffval); br [2] = s->code; x86_branch8 (s->code, X86_CC_LE, 0, FALSE); x86_patch (br [1], s->code); x86_mov_membase_imm (s->code, X86_ESP, 0, 0, 4); x86_patch (br [2], s->code); } else { if (klass->marshalbyref) { /* check for transparent_proxy */ x86_alu_reg_imm (s->code, X86_CMP, lreg, (int)mono_defaults.transparent_proxy_class); br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* lreg = obj */ x86_mov_reg_membase (s->code, lreg, X86_ESP, 0, 4); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoTransparentProxy, klass), 4); x86_patch (br [1], s->code); } x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4); x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->baseval); x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->diffval); br [2] = s->code; x86_branch8 (s->code, X86_CC_LE, 0, FALSE); x86_mov_membase_imm (s->code, X86_ESP, 0, 0, 4); x86_patch (br [2], s->code); } } x86_patch (br [0], s->code); x86_pop_reg (s->code, tree->reg1); } stmt: INITOBJ (reg) { int i, j; if (!(i = tree->data.i)) return; if (i == 1 || i == 2 || i == 4) { x86_mov_membase_imm (s->code, tree->left->reg1, 0, 0, i); return; } i = tree->data.i / 4; j = tree->data.i % 4; if (tree->left->reg1 != X86_EDI) { x86_push_reg (s->code, X86_EDI); x86_mov_reg_reg (s->code, X86_EDI, tree->left->reg1, 4); } if (i) { x86_alu_reg_reg (s->code, X86_XOR, X86_EAX, X86_EAX); x86_mov_reg_imm (s->code, X86_ECX, i); x86_cld (s->code); x86_prefix (s->code, X86_REP_PREFIX); x86_stosl (s->code); for (i = 0; i < j; i++) x86_stosb (s->code); } else { g_assert (j == 3); x86_mov_membase_imm (s->code, X86_EDI, 0, 0, 2); x86_mov_membase_imm (s->code, X86_EDI, 2, 0, 1); } if (tree->left->reg1 != X86_EDI) x86_pop_reg (s->code, X86_EDI); } stmt: CPBLK (reg, CPSRC (reg, CONST_I4)) { int dest_reg = tree->left->reg1; int source_reg = tree->right->left->reg1; int count = tree->right->right->data.i; int sreg = dest_reg != X86_EAX ? X86_EAX : X86_EDX; int spill_pos = 0, dest_offset = 0, source_offset = 0; int save_esi = FALSE, save_edi = FALSE; // TODO: handle unaligned. prefix switch (count) { case 0: break; case 1: x86_mov_reg_membase (s->code, sreg, source_reg, 0, 1); x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 1); break; case 2: x86_mov_reg_membase (s->code, sreg, source_reg, 0, 2); x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 2); break; case 3: x86_mov_reg_membase (s->code, sreg, source_reg, 0, 2); x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 2); x86_mov_reg_membase (s->code, sreg, source_reg, 2, 1); x86_mov_membase_reg (s->code, dest_reg, 2, sreg, 1); break; case 4: x86_mov_reg_membase (s->code, sreg, source_reg, 0, 4); x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 4); break; case 5: x86_mov_reg_membase (s->code, sreg, source_reg, 0, 4); x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 4); x86_mov_reg_membase (s->code, sreg, source_reg, 4, 1); x86_mov_membase_reg (s->code, dest_reg, 4, sreg, 1); break; case 6: x86_mov_reg_membase (s->code, sreg, source_reg, 0, 4); x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 4); x86_mov_reg_membase (s->code, sreg, source_reg, 4, 2); x86_mov_membase_reg (s->code, dest_reg, 4, sreg, 2); break; case 7: x86_mov_reg_membase (s->code, sreg, source_reg, 0, 4); x86_mov_membase_reg (s->code, dest_reg, 0, sreg, 4); x86_mov_reg_membase (s->code, sreg, source_reg, 4, 2); x86_mov_membase_reg (s->code, dest_reg, 4, sreg, 2); x86_mov_reg_membase (s->code, sreg, source_reg, 6, 1); x86_mov_membase_reg (s->code, dest_reg, 6, sreg, 1); break; case 8: x86_fild_membase (s->code, source_reg, 0, TRUE); x86_fist_pop_membase (s->code, dest_reg, 0, TRUE); break; case 9: x86_fild_membase (s->code, source_reg, 0, TRUE); x86_fist_pop_membase (s->code, dest_reg, 0, TRUE); x86_mov_reg_membase (s->code, sreg, source_reg, 8, 1); x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 1); break; case 10: x86_fild_membase (s->code, source_reg, 0, TRUE); x86_fist_pop_membase (s->code, dest_reg, 0, TRUE); x86_mov_reg_membase (s->code, sreg, source_reg, 8, 2); x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 2); break; case 11: x86_fild_membase (s->code, source_reg, 0, TRUE); x86_fist_pop_membase (s->code, dest_reg, 0, TRUE); x86_mov_reg_membase (s->code, sreg, source_reg, 8, 2); x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 2); x86_mov_reg_membase (s->code, sreg, source_reg, 10, 1); x86_mov_membase_reg (s->code, dest_reg, 10, sreg, 1); break; case 12: x86_fild_membase (s->code, source_reg, 0, TRUE); x86_fist_pop_membase (s->code, dest_reg, 0, TRUE); x86_mov_reg_membase (s->code, sreg, source_reg, 8, 4); x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 4); break; case 13: x86_fild_membase (s->code, source_reg, 0, TRUE); x86_fist_pop_membase (s->code, dest_reg, 0, TRUE); x86_mov_reg_membase (s->code, sreg, source_reg, 8, 4); x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 4); x86_mov_reg_membase (s->code, sreg, source_reg, 12, 1); x86_mov_membase_reg (s->code, dest_reg, 12, sreg, 1); break; case 14: x86_fild_membase (s->code, source_reg, 0, TRUE); x86_fist_pop_membase (s->code, dest_reg, 0, TRUE); x86_mov_reg_membase (s->code, sreg, source_reg, 8, 4); x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 4); x86_mov_reg_membase (s->code, sreg, source_reg, 12, 2); x86_mov_membase_reg (s->code, dest_reg, 12, sreg, 2); break; case 15: x86_fild_membase (s->code, source_reg, 0, TRUE); x86_fist_pop_membase (s->code, dest_reg, 0, TRUE); x86_mov_reg_membase (s->code, sreg, source_reg, 8, 4); x86_mov_membase_reg (s->code, dest_reg, 8, sreg, 4); x86_mov_reg_membase (s->code, sreg, source_reg, 12, 2); x86_mov_membase_reg (s->code, dest_reg, 12, sreg, 2); x86_mov_reg_membase (s->code, sreg, source_reg, 14, 1); x86_mov_membase_reg (s->code, dest_reg, 14, sreg, 1); break; default: g_assert (count > 15); if (dest_reg != X86_ESI && source_reg != X86_ESI && mono_regset_reg_used (s->rs, X86_ESI)) save_esi = TRUE; if (dest_reg != X86_EDI && source_reg != X86_EDI && mono_regset_reg_used (s->rs, X86_EDI)) save_edi = TRUE; if (save_esi) x86_push_reg (s->code, X86_ESI); if (save_edi) x86_push_reg (s->code, X86_EDI); if (dest_reg == X86_ESI) { dest_offset = ++spill_pos; } if (source_reg == X86_EDI) { source_offset = ++spill_pos; } if (source_offset) x86_push_reg (s->code, source_reg); if (dest_offset) x86_push_reg (s->code, dest_reg); if (source_reg != X86_ESI) { if (source_offset) x86_mov_reg_membase (s->code, X86_ESI, X86_ESP, (source_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_ESI, source_reg, 4); } if (dest_reg != X86_EDI) { if (dest_offset) x86_mov_reg_membase (s->code, X86_EDI, X86_ESP, (dest_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_EDI, dest_reg, 4); } x86_mov_reg_imm (s->code, X86_ECX, count >> 2); x86_cld (s->code); x86_prefix (s->code, X86_REP_PREFIX); x86_movsd (s->code); switch (count & 3) { case 1: x86_mov_reg_membase (s->code, sreg, X86_ESI, 0, 1); x86_mov_membase_reg (s->code, X86_EDI, 0, sreg, 1); break; case 2: x86_mov_reg_membase (s->code, sreg, X86_ESI, 0, 2); x86_mov_membase_reg (s->code, X86_EDI, 0, sreg, 2); break; case 3: x86_mov_reg_membase (s->code, sreg, X86_ESI, 0, 2); x86_mov_membase_reg (s->code, X86_EDI, 0, sreg, 2); x86_mov_reg_membase (s->code, sreg, X86_ESI, 2, 1); x86_mov_membase_reg (s->code, X86_EDI, 2, sreg, 1); break; default: break; } x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, spill_pos<<2); if (save_edi) x86_pop_reg (s->code, X86_EDI); if (save_esi) x86_pop_reg (s->code, X86_ESI); break; } } cost { MBCOND (mono_inline_memcpy); return 0; } stmt: CPBLK (reg, CPSRC (reg, reg)) { int dest_reg = tree->left->reg1; int source_reg = tree->right->left->reg1; int size_reg = tree->right->right->reg1; int spill_pos = 0, size_offset = 0, dest_offset = 0, source_offset = 0; int save_esi = FALSE, save_edi = FALSE; if (!mono_inline_memcpy) { x86_push_reg (s->code, size_reg); x86_push_reg (s->code, source_reg); x86_push_reg (s->code, dest_reg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, memmove); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12); } else { if (dest_reg != X86_ESI && source_reg != X86_ESI && size_reg != X86_ESI && mono_regset_reg_used (s->rs, X86_ESI)) save_esi = TRUE; if (dest_reg != X86_EDI && source_reg != X86_EDI && size_reg != X86_EDI && mono_regset_reg_used (s->rs, X86_EDI)) save_edi = TRUE; if (save_esi) x86_push_reg (s->code, X86_ESI); if (save_edi) x86_push_reg (s->code, X86_EDI); if (size_reg == X86_EDI || size_reg == X86_ESI) { size_offset = ++spill_pos; } if (dest_reg == X86_ECX || dest_reg == X86_ESI) { dest_offset = ++spill_pos; } if (source_reg == X86_ECX || source_reg == X86_EDI) { source_offset = ++spill_pos; } if (source_offset) x86_push_reg (s->code, source_reg); if (dest_offset) x86_push_reg (s->code, dest_reg); if (size_offset) x86_push_reg (s->code, size_reg); if (source_reg != X86_ESI) { if (source_offset) x86_mov_reg_membase (s->code, X86_ESI, X86_ESP, (source_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_ESI, source_reg, 4); } if (dest_reg != X86_EDI) { if (dest_offset) x86_mov_reg_membase (s->code, X86_EDI, X86_ESP, (dest_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_EDI, dest_reg, 4); } if (size_reg != X86_ECX) { if (size_offset) x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, (size_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_ECX, size_reg, 4); } x86_push_reg (s->code, X86_ECX); x86_shift_reg_imm (s->code, X86_SHR, X86_ECX, 2); x86_cld (s->code); // move whole dwords first x86_prefix (s->code, X86_REP_PREFIX); x86_movsd (s->code); x86_pop_reg (s->code, X86_ECX); x86_alu_reg_imm (s->code, X86_AND, X86_ECX, 3); // move remaining bytes (if any) x86_prefix (s->code, X86_REP_PREFIX); x86_movsb (s->code); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, spill_pos<<2); if (save_edi) x86_pop_reg (s->code, X86_EDI); if (save_esi) x86_pop_reg (s->code, X86_ESI); } } stmt: INITBLK (reg, CPSRC (reg, CONST_I4)) { int dest_reg = tree->left->reg1; int value_reg = tree->right->left->reg1; int size = tree->right->right->data.i; int spill_pos = 0, dest_offset = 0, value_offset = 0; int save_edi = FALSE; int i, j; i = size / 4; j = size % 4; if (mono_inline_memcpy) { if (dest_reg != X86_EDI && value_reg != X86_EDI && mono_regset_reg_used (s->rs, X86_EDI)) { save_edi = TRUE; x86_push_reg (s->code, X86_EDI); } if (dest_reg == X86_ECX || dest_reg == X86_EAX) { dest_offset = ++spill_pos; } if (value_reg == X86_ECX || value_reg == X86_EDI) { value_offset = ++spill_pos; } if (value_offset) x86_push_reg (s->code, value_reg); if (dest_offset) x86_push_reg (s->code, dest_reg); if (value_reg != X86_EAX) { if (value_offset) x86_mov_reg_membase (s->code, X86_EAX, X86_ESP, (value_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_EAX, value_reg, 4); } if (dest_reg != X86_EDI) { if (dest_offset) x86_mov_reg_membase (s->code, X86_EDI, X86_ESP, (dest_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_EDI, dest_reg, 4); } x86_widen_reg (s->code, X86_EAX, X86_EAX, FALSE, FALSE); x86_mov_reg_reg (s->code, X86_EDX, X86_EAX, 4); x86_shift_reg_imm (s->code, X86_SHL, X86_EAX, 8); x86_alu_reg_reg (s->code, X86_OR, X86_EAX, X86_EDX); x86_mov_reg_reg (s->code, X86_EDX, X86_EAX, 4); x86_shift_reg_imm (s->code, X86_SHL, X86_EAX, 16); x86_alu_reg_reg (s->code, X86_OR, X86_EAX, X86_EDX); if (i) { x86_mov_reg_imm (s->code, X86_ECX, i); x86_cld (s->code); x86_prefix (s->code, X86_REP_PREFIX); x86_stosd (s->code); } for (i = 0; i < j; i++) x86_stosb (s->code); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, spill_pos<<2); if (save_edi) x86_pop_reg (s->code, X86_EDI); } else { x86_push_imm (s->code, size); x86_push_reg (s->code, value_reg); x86_push_reg (s->code, dest_reg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, memset); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12); } } cost { MBCOND (mono_inline_memcpy); return 0; } stmt: INITBLK (reg, CPSRC (reg, reg)) { int dest_reg = tree->left->reg1; int value_reg = tree->right->left->reg1; int size_reg = tree->right->right->reg1; int spill_pos = 0, size_offset = 0, dest_offset = 0, value_offset = 0; int save_edi = FALSE; if (mono_inline_memcpy) { if (dest_reg != X86_EDI && size_reg != X86_EDI && size_reg != X86_EDI && mono_regset_reg_used (s->rs, X86_EDI)) { save_edi = TRUE; x86_push_reg (s->code, X86_EDI); } if (size_reg == X86_EDI || size_reg == X86_EAX) { size_offset = ++spill_pos; } if (dest_reg == X86_ECX || dest_reg == X86_EAX) { dest_offset = ++spill_pos; } if (value_reg == X86_ECX || value_reg == X86_EDI) { value_offset = ++spill_pos; } if (value_offset) x86_push_reg (s->code, value_reg); if (dest_offset) x86_push_reg (s->code, dest_reg); if (size_offset) x86_push_reg (s->code, size_reg); if (value_reg != X86_EAX) { if (value_offset) x86_mov_reg_membase (s->code, X86_EAX, X86_ESP, (value_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_EAX, value_reg, 4); } if (dest_reg != X86_EDI) { if (dest_offset) x86_mov_reg_membase (s->code, X86_EDI, X86_ESP, (dest_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_EDI, dest_reg, 4); } if (size_reg != X86_ECX) { if (size_offset) x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, (size_offset-1)<<2, 4); else x86_mov_reg_reg (s->code, X86_ECX, size_reg, 4); } x86_widen_reg (s->code, X86_EAX, X86_EAX, FALSE, FALSE); x86_mov_reg_reg (s->code, X86_EDX, X86_EAX, 4); x86_shift_reg_imm (s->code, X86_SHL, X86_EAX, 8); x86_alu_reg_reg (s->code, X86_OR, X86_EAX, X86_EDX); x86_mov_reg_reg (s->code, X86_EDX, X86_EAX, 4); x86_shift_reg_imm (s->code, X86_SHL, X86_EAX, 16); x86_alu_reg_reg (s->code, X86_OR, X86_EAX, X86_EDX); x86_push_reg (s->code, X86_ECX); x86_shift_reg_imm (s->code, X86_SHR, X86_ECX, 2); x86_cld (s->code); // init whole dwords first x86_prefix (s->code, X86_REP_PREFIX); x86_stosd (s->code); x86_pop_reg (s->code, X86_ECX); x86_alu_reg_imm (s->code, X86_AND, X86_ECX, 3); // init remaining bytes (if any) x86_prefix (s->code, X86_REP_PREFIX); x86_stosb (s->code); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, spill_pos<<2); if (save_edi) x86_pop_reg (s->code, X86_EDI); } else { x86_push_reg (s->code, size_reg); x86_push_reg (s->code, value_reg); x86_push_reg (s->code, dest_reg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, memset); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12); } } stmt: NOP stmt: POP (reg) stmt: BR { mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb); x86_jump32 (s->code, 0); } cflags: COMPARE (reg, LDIND_I4 (ADDR_L)) { int treg = VARINFO (s, tree->right->left->data.i).reg; x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, treg); } cost { MBCOND ((VARINFO (data, tree->right->left->data.i).reg >= 0)); return 0; } cflags: COMPARE (LDIND_I4 (ADDR_L), CONST_I4) { int treg = VARINFO (s, tree->left->left->data.i).reg; x86_alu_reg_imm (s->code, X86_CMP, treg, tree->right->data.i); } cost { MBCOND ((VARINFO (data, tree->left->left->data.i).reg >= 0)); return 0; } cflags: COMPARE (LDIND_I4 (ADDR_L), reg) { int treg = VARINFO (s, tree->left->left->data.i).reg; x86_alu_reg_reg (s->code, X86_CMP, treg, tree->right->reg1); } cost { MBCOND ((VARINFO (data, tree->left->left->data.i).reg >= 0)); return 0; } cflags: COMPARE (LDIND_I4 (ADDR_L), CONST_I4) { int offset = VARINFO (s, tree->left->left->data.i).offset; x86_alu_membase_imm (s->code, X86_CMP, X86_EBP, offset, tree->right->data.i); } cost { MBCOND ((VARINFO (data, tree->left->left->data.i).reg < 0)); return 0; } cflags: COMPARE (reg, CONST_I4) { x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, tree->right->data.i); } cflags: COMPARE (reg, reg) { x86_alu_reg_reg (s->code, X86_CMP, tree->left->reg1, tree->right->reg1); } stmt: CBRANCH (cflags) { mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); switch (tree->data.bi.cond) { case CEE_BLT: x86_branch32 (s->code, X86_CC_LT, 0, TRUE); break; case CEE_BLT_UN: x86_branch32 (s->code, X86_CC_LT, 0, FALSE); break; case CEE_BGT: x86_branch32 (s->code, X86_CC_GT, 0, TRUE); break; case CEE_BGT_UN: x86_branch32 (s->code, X86_CC_GT, 0, FALSE); break; case CEE_BEQ: x86_branch32 (s->code, X86_CC_EQ, 0, TRUE); break; case CEE_BNE_UN: x86_branch32 (s->code, X86_CC_NE, 0, FALSE); break; case CEE_BGE: x86_branch32 (s->code, X86_CC_GE, 0, TRUE); break; case CEE_BGE_UN: x86_branch32 (s->code, X86_CC_GE, 0, FALSE); break; case CEE_BLE: x86_branch32 (s->code, X86_CC_LE, 0, TRUE); break; case CEE_BLE_UN: x86_branch32 (s->code, X86_CC_LE, 0, FALSE); break; default: g_assert_not_reached (); } } stmt: BRTRUE (LDIND_I4 (ADDR_L)) { int treg = VARINFO (s, tree->left->left->data.i).reg; int offset = VARINFO (s, tree->left->left->data.i).offset; if (treg >= 0) x86_test_reg_reg (s->code, treg, treg); else x86_alu_membase_imm (s->code, X86_CMP, X86_EBP, offset, 0); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb); x86_branch32 (s->code, X86_CC_NE, 0, TRUE); } stmt: BRTRUE (reg) { x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb); x86_branch32 (s->code, X86_CC_NE, 0, TRUE); } stmt: BRFALSE (LDIND_I4 (ADDR_L)) { int treg = VARINFO (s, tree->left->left->data.i).reg; int offset = VARINFO (s, tree->left->left->data.i).offset; if (treg >= 0) x86_test_reg_reg (s->code, treg, treg); else x86_alu_membase_imm (s->code, X86_CMP, X86_EBP, offset, 0); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb); x86_branch32 (s->code, X86_CC_EQ, 0, TRUE); //{static int cx= 0; printf ("CX1 %5d\n", cx++);} } stmt: BRFALSE (reg) { x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb); x86_branch32 (s->code, X86_CC_EQ, 0, TRUE); } stmt: BREAK { x86_breakpoint (s->code); } stmt: RET (reg) { if (tree->left->reg1 != X86_EAX) x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); if (!tree->last_instr) { mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL); x86_jump32 (s->code, 0); } } stmt: RET_VOID { if (!tree->last_instr) { mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL); x86_jump32 (s->code, 0); } } stmt: ARG_I4 (LDIND_I4 (addr)) { MBTree *at = tree->left->left; int pad = tree->data.arg_info.pad; X86_ARG_PAD (pad); switch (at->data.ainfo.amode) { case AMImmediate: x86_push_mem (s->code, at->data.ainfo.offset); break; case AMBase: x86_push_membase (s->code, at->data.ainfo.basereg, at->data.ainfo.offset); break; case AMIndex: x86_push_memindex (s->code, X86_NOBASEREG, at->data.ainfo.offset, at->data.ainfo.indexreg, at->data.ainfo.shift); break; case AMBaseIndex: x86_push_memindex (s->code, at->data.ainfo.basereg, at->data.ainfo.offset, at->data.ainfo.indexreg, at->data.ainfo.shift); break; } } stmt: ARG_I4 (LDIND_I4 (ADDR_L)) { int treg = VARINFO (s, tree->left->left->data.i).reg; int pad = tree->data.arg_info.pad; X86_ARG_PAD (pad); x86_push_reg (s->code, treg); } cost { MBCOND ((VARINFO (data, tree->left->left->data.i).reg >= 0)); return 0; } stmt: ARG_I4 (reg) { int pad = tree->data.arg_info.pad; X86_ARG_PAD (pad); x86_push_reg (s->code, tree->left->reg1); } stmt: ARG_I4 (ADDR_G) { int pad = tree->data.arg_info.pad; X86_ARG_PAD (pad); x86_push_imm (s->code, tree->left->data.p); } stmt: ARG_I4 (CONST_I4) "MB_USE_OPT1(0)" { int pad = tree->data.arg_info.pad; X86_ARG_PAD (pad); x86_push_imm (s->code, tree->left->data.i); } this: reg { PRINT_REG ("THIS", tree->reg1); } reg: CHECKTHIS (reg) { /* try to access the vtable - this will raise an exception * if the object is NULL */ x86_alu_membase_imm (s->code, X86_CMP, tree->left->reg1, 0, 0); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } stmt: CHECKTHIS (reg) { x86_alu_membase_imm (s->code, X86_CMP, tree->left->reg1, 0, 0); } stmt: JMP { int pos = -4; /* restore callee saved registers */ if (mono_regset_reg_used (s->rs, X86_EBX)) { x86_mov_reg_membase (s->code, X86_EBX, X86_EBP, pos, 4); pos -= 4; } if (mono_regset_reg_used (s->rs, X86_EDI)) { x86_mov_reg_membase (s->code, X86_EDI, X86_EBP, pos, 4); pos -= 4; } if (mono_regset_reg_used (s->rs, X86_ESI)) { x86_mov_reg_membase (s->code, X86_ESI, X86_EBP, pos, 4); pos -= 4; } /* restore ESP/EBP */ x86_leave (s->code); /* jump to the method */ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p); x86_jump32 (s->code, 0); } this: NOP reg: CALL_I4 (this, reg) { int treg = X86_EAX; int lreg = tree->left->reg1; int rreg = tree->right->reg1; if (lreg == treg || rreg == treg) treg = X86_EDX; if (lreg == treg || rreg == treg) treg = X86_ECX; if (lreg == treg || rreg == treg) mono_assert_not_reached (); X86_CALL_BEGIN; x86_call_reg (s->code, rreg); X86_CALL_END; mono_assert (tree->reg1 == X86_EAX); } reg: CALL_I4 (this, ADDR_G) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p); x86_call_code (s->code, 0); X86_CALL_END; mono_assert (tree->reg1 == X86_EAX); } reg: LDVIRTFTN (reg, INTF_ADDR) { /* we cant return the value in the vtable, because it can be * a magic trampoline, and we cant pass that to the outside world */ if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_imm (s->code, tree->right->data.m->klass->interface_id); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldintftn); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } } reg: CALL_I4 (this, INTF_ADDR) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4); x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4); x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2); X86_CALL_END; mono_assert (tree->reg1 == X86_EAX); } reg: LDVIRTFTN (reg, VFUNC_ADDR) { /* we cant return the value in the vtable, because it can be * a magic trampoline, and we cant pass that to the outside world */ if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_imm (s->code, tree->right->data.m->slot); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldvirtftn); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } } reg: LDFTN { if (tree->reg1 != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, X86_EDX); x86_push_imm (s->code, tree->data.m); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_ldftn); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer)); x86_pop_reg (s->code, X86_EDX); x86_pop_reg (s->code, X86_ECX); if (tree->reg1 != X86_EAX) { x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4); x86_pop_reg (s->code, X86_EAX); } } reg: CALL_I4 (this, VFUNC_ADDR) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_call_virtual (s->code, lreg, G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2)); X86_CALL_END; mono_assert (tree->reg1 == X86_EAX); } stmt: CALL_VOID (this, ADDR_G) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p); x86_call_code (s->code, 0); X86_CALL_END; } stmt: CALL_VOID (this, reg) { int treg = X86_EAX; int lreg = tree->left->reg1; int rreg = tree->right->reg1; if (lreg == treg || rreg == treg) treg = X86_EDX; if (lreg == treg || rreg == treg) treg = X86_ECX; if (lreg == treg || rreg == treg) mono_assert_not_reached (); X86_CALL_BEGIN; x86_call_reg (s->code, tree->right->reg1); X86_CALL_END; } stmt: CALL_VOID (this, INTF_ADDR) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4); x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4); x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2); X86_CALL_END; } stmt: CALL_VOID (this, VFUNC_ADDR) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_call_virtual (s->code, lreg, G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2)); X86_CALL_END; } stmt: SWITCH (reg) { guint32 offset; guint32 *jt = (guint32 *)tree->data.p; x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, jt [0]); offset = 6 + (guint32)s->code; x86_branch32 (s->code, X86_CC_GE, jt [jt [0] + 1] - offset, FALSE); x86_mov_reg_memindex (s->code, X86_EAX, X86_NOBASEREG, tree->data.i + 4, tree->left->reg1, 2, 4); x86_jump_reg (s->code, X86_EAX); } # # 64 bit integers # reg: CONV_I1 (lreg) { x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, FALSE); } reg: CONV_U1 (lreg) { x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, FALSE); } reg: CONV_I2 (lreg) { x86_widen_reg (s->code, tree->reg1, tree->left->reg1, TRUE, TRUE); } reg: CONV_U2 (lreg) { x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE); } reg: CONV_I4 (lreg) { if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: CONV_U4 (lreg) { if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: CONV_OVF_I4 (lreg){ guint8 *start = s->code; guchar* o1, *o2, *o3, *o4, *o5; int i; /* * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ for (i = 0; i < 2; i++) { s->code = start; x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1); /* If the low word top bit is set, see if we are negative */ x86_branch8 (s->code, X86_CC_LT, o3 - o1, TRUE); o1 = s->code; /* We are not negative (no top bit set, check for our top word to be zero */ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2); x86_branch8 (s->code, X86_CC_EQ, o4 - o2, TRUE); o2 = s->code; /* throw exception */ x86_push_imm (s->code, "OverflowException"); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, arch_get_throw_exception_by_name ()); x86_call_code (s->code, 0); o3 = s->code; /* our top bit is set, check that top word is 0xfffffff */ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg2, 0xffffffff); o4 = s->code; /* nope, emit exception */ x86_branch8 (s->code, X86_CC_NE, o2 - o5, TRUE); o5 = s->code; if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } } reg: CONV_OVF_I4 (lreg){ guint8 *br [3], *label [1]; /* * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1); /* If the low word top bit is set, see if we are negative */ br [0] = s->code; x86_branch8 (s->code, X86_CC_LT, 0, TRUE); /* We are not negative (no top bit set, check for our top word to be zero */ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2); br [1] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, TRUE); label [0] = s->code; /* throw exception */ x86_push_imm (s->code, "OverflowException"); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, arch_get_throw_exception_by_name ()); x86_call_code (s->code, 0); x86_patch (br [0], s->code); /* our top bit is set, check that top word is 0xfffffff */ x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg2, 0xffffffff); x86_patch (br [1], s->code); /* nope, emit exception */ br [2] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE); x86_patch (br [2], label [0]); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: CONV_OVF_U4 (lreg) { /* Keep in sync with CONV_OVF_I4_UN below, they are the same on 32-bit machines */ /* top word must be 0 */ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } reg: CONV_OVF_I4_UN (lreg) { /* Keep in sync with CONV_OVF_U4 above, they are the same on 32-bit machines */ /* top word must be 0 */ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } stmt: POP (lreg) lreg: CONST_I8 1 { x86_mov_reg_imm (s->code, tree->reg1, *((gint32 *)&tree->data.p)); x86_mov_reg_imm (s->code, tree->reg2, *((gint32 *)&tree->data.p + 1)); } lreg: CONV_I8 (CONST_I4) { x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i); if (tree->left->data.i >= 0) x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2); else x86_mov_reg_imm (s->code, tree->reg2, -1); } lreg: CONV_I8 (reg) { guint8 *i1; if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2); x86_alu_reg_imm (s->code, X86_CMP, tree->left->reg1, 0); x86_branch8 (s->code, X86_CC_GE, 5, TRUE); i1 = s->code; x86_mov_reg_imm (s->code, tree->reg2, -1); mono_assert ((s->code - i1) == 5); } lreg: CONV_U8 (CONST_I4) 1 { x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i); x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2); } lreg: CONV_U8 (reg) { if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2); } lreg: CONV_OVF_U8 (CONST_I4) { if (tree->left->data.i < 0){ x86_push_imm (s->code, "OverflowException"); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, arch_get_throw_exception_by_name ()); x86_call_code (s->code, 0); } else { x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i); x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2); } } lreg: CONV_OVF_I8_UN (CONST_I4) { x86_mov_reg_imm (s->code, tree->reg1, tree->left->data.i); x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2); } lreg: CONV_OVF_U8 (reg) { x86_test_reg_imm (s->code, tree->left->reg1, 0x8000000); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "OverflowException"); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2); } lreg: CONV_OVF_I8_UN (reg) { /* Convert uint value into int64, we pass everything */ if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); x86_alu_reg_reg (s->code, X86_XOR, tree->reg2, tree->reg2); } stmt: STIND_I8 (addr, lreg) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset, tree->right->reg1, 4); x86_mov_mem_reg (s->code, tree->left->data.ainfo.offset + 4, tree->right->reg2, 4); break; case AMBase: x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->right->reg1, 4); x86_mov_membase_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset + 4, tree->right->reg2, 4); break; case AMIndex: x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg1, 4); x86_mov_memindex_reg (s->code, X86_NOBASEREG, tree->left->data.ainfo.offset + 4, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg2, 4); break; case AMBaseIndex: x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg1, 4); x86_mov_memindex_reg (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset + 4, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift, tree->right->reg2, 4); break; } } stmt: REMOTE_STIND_I8 (reg, lreg) { guint8 *br[2]; int offset; x86_push_reg (s->code, tree->right->reg1); x86_mov_reg_membase (s->code, tree->right->reg1, tree->left->reg1, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, tree->right->reg1, 0, ((int)mono_defaults.transparent_proxy_class)); x86_pop_reg (s->code, tree->right->reg1); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* this is a transparent proxy - remote the call */ /* save value to stack */ x86_push_reg (s->code, tree->right->reg2); x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, X86_ESP); x86_push_imm (s->code, tree->data.fi.field); x86_push_imm (s->code, tree->data.fi.klass); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 24); br [1] = s->code; x86_jump8 (s->code, 0); x86_patch (br [0], s->code); offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) : tree->data.fi.field->offset; x86_mov_membase_reg (s->code, tree->left->reg1, offset, tree->right->reg1, 4); x86_mov_membase_reg (s->code, tree->left->reg1, offset + 4, tree->right->reg2, 4); x86_patch (br [1], s->code); } # an addr can use two address register (base and index register). The must take care # that we do not override them (thus the use of x86_lea) lreg: LDIND_I8 (addr) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_mov_reg_mem (s->code, tree->reg1, tree->left->data.ainfo.offset, 4); x86_mov_reg_mem (s->code, tree->reg2, tree->left->data.ainfo.offset + 4, 4); break; case AMBase: x86_lea_membase (s->code, tree->reg2, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset); x86_mov_reg_membase (s->code, tree->reg1, tree->reg2, 0, 4); x86_mov_reg_membase (s->code, tree->reg2, tree->reg2, 4, 4); break; case AMIndex: x86_lea_memindex (s->code, tree->reg2, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_mov_reg_membase (s->code, tree->reg1, tree->reg2, 0, 4); x86_mov_reg_membase (s->code, tree->reg2, tree->reg2, 4, 4); break; case AMBaseIndex: x86_lea_memindex (s->code, tree->reg2, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_mov_reg_membase (s->code, tree->reg1, tree->reg2, 0, 4); x86_mov_reg_membase (s->code, tree->reg2, tree->reg2, 4, 4); break; } PRINT_REG ("LDIND_I8_0", tree->reg1); PRINT_REG ("LDIND_I8_1", tree->reg2); } lreg: SHR (lreg, CONST_I4) { if (tree->right->data.i < 32) { x86_shrd_reg_imm (s->code, tree->left->reg1, tree->left->reg2, tree->right->data.i); x86_shift_reg_imm (s->code, X86_SAR, tree->left->reg2, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); if (tree->reg2 != tree->left->reg2) x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4); } else if (tree->right->data.i < 64) { if (tree->reg1 != tree->left->reg2) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg2, 4); if (tree->reg2 != tree->left->reg2) x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4); x86_shift_reg_imm (s->code, X86_SAR, tree->reg2, 31); x86_shift_reg_imm (s->code, X86_SAR, tree->reg1, (tree->right->data.i - 32)); } /* else unspecified result */ } lreg: SHR_UN (lreg, CONST_I4) { if (tree->right->data.i < 32) { x86_shrd_reg_imm (s->code, tree->left->reg1, tree->left->reg2, tree->right->data.i); x86_shift_reg_imm (s->code, X86_SHR, tree->left->reg2, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); if (tree->reg2 != tree->left->reg2) x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4); } else if (tree->right->data.i < 64) { x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg2, 4); x86_shift_reg_imm (s->code, X86_SHR, tree->reg1, (tree->right->data.i - 32)); x86_mov_reg_imm (s->code, tree->reg2, 0); } /* else unspecified result */ } lreg: SHR (lreg, reg) { guint8 *br [1]; if (tree->right->reg1 != X86_ECX) x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4); x86_shrd_reg (s->code, tree->left->reg1, tree->left->reg2); x86_shift_reg (s->code, X86_SAR, tree->left->reg2); x86_test_reg_imm (s->code, X86_ECX, 32); br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE); x86_mov_reg_reg (s->code, tree->left->reg1, tree->left->reg2, 4); x86_shift_reg_imm (s->code, X86_SAR, tree->reg2, 31); x86_patch (br [0], s->code); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: SHR_UN (lreg, reg) { guint8 *br [1]; if (tree->right->reg1 != X86_ECX) x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4); x86_shrd_reg (s->code, tree->left->reg1, tree->left->reg2); x86_shift_reg (s->code, X86_SHR, tree->left->reg2); x86_test_reg_imm (s->code, X86_ECX, 32); br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE); x86_mov_reg_reg (s->code, tree->left->reg1, tree->left->reg2, 4); x86_shift_reg_imm (s->code, X86_SHR, tree->reg2, 31); x86_patch (br [0], s->code); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: SHL (lreg, CONST_I4) { if (tree->right->data.i < 32) { x86_shld_reg_imm (s->code, tree->left->reg2, tree->left->reg1, tree->right->data.i); x86_shift_reg_imm (s->code, X86_SHL, tree->left->reg1, tree->right->data.i); if (tree->reg1 != tree->left->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); if (tree->reg2 != tree->left->reg2) x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg2, 4); } else if (tree->right->data.i < 64) { x86_mov_reg_reg (s->code, tree->reg2, tree->left->reg1, 4); x86_shift_reg_imm (s->code, X86_SHL, tree->reg2, (tree->right->data.i - 32)); x86_alu_reg_reg (s->code, X86_XOR, tree->reg1, tree->reg1); } /* else unspecified result */ } lreg: SHL (lreg, reg) { guint8 *br [1]; if (tree->right->reg1 != X86_ECX) x86_mov_reg_reg (s->code, X86_ECX, tree->right->reg1, 4); x86_shld_reg (s->code, tree->left->reg2, tree->left->reg1); x86_shift_reg (s->code, X86_SHL, tree->left->reg1); x86_test_reg_imm (s->code, X86_ECX, 32); br [0] = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE); x86_mov_reg_reg (s->code, tree->left->reg2, tree->left->reg1, 4); x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg1, tree->left->reg1); x86_patch (br [0], s->code); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: ADD (lreg, lreg) { x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1); x86_alu_reg_reg (s->code, X86_ADC, tree->left->reg2, tree->right->reg2); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: ADD_OVF (lreg, lreg) { x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1); x86_alu_reg_reg (s->code, X86_ADC, tree->left->reg2, tree->right->reg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException"); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: ADD_OVF_UN (lreg, lreg) { x86_alu_reg_reg (s->code, X86_ADD, tree->left->reg1, tree->right->reg1); x86_alu_reg_reg (s->code, X86_ADC, tree->left->reg2, tree->right->reg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException"); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: SUB (lreg, lreg) { x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1); x86_alu_reg_reg (s->code, X86_SBB, tree->left->reg2, tree->right->reg2); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: SUB_OVF (lreg, lreg) { x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1); x86_alu_reg_reg (s->code, X86_SBB, tree->left->reg2, tree->right->reg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NO, TRUE, "OverflowException"); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: SUB_OVF_UN (lreg, lreg) { x86_alu_reg_reg (s->code, X86_SUB, tree->left->reg1, tree->right->reg1); x86_alu_reg_reg (s->code, X86_SBB, tree->left->reg2, tree->right->reg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NC, FALSE, "OverflowException"); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: AND (lreg, lreg) { x86_alu_reg_reg (s->code, X86_AND, tree->left->reg1, tree->right->reg1); x86_alu_reg_reg (s->code, X86_AND, tree->left->reg2, tree->right->reg2); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: OR (lreg, lreg) { x86_alu_reg_reg (s->code, X86_OR, tree->left->reg1, tree->right->reg1); x86_alu_reg_reg (s->code, X86_OR, tree->left->reg2, tree->right->reg2); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: XOR (lreg, lreg) { x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg1, tree->right->reg1); x86_alu_reg_reg (s->code, X86_XOR, tree->left->reg2, tree->right->reg2); MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); } lreg: NEG (lreg) { MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); x86_neg_reg (s->code, tree->reg1); x86_alu_reg_imm (s->code, X86_ADC, tree->reg2, 0); x86_neg_reg (s->code, tree->reg2); } lreg: NOT (lreg) { MOVE_LREG (tree->reg1, tree->reg2, tree->left->reg1, tree->left->reg2); x86_not_reg (s->code, tree->reg1); x86_not_reg (s->code, tree->reg2); } lreg: MUL (lreg, lreg) { if (mono_regset_reg_used (s->rs, X86_ECX)) x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, tree->right->reg2); x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llmult); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16); if (mono_regset_reg_used (s->rs, X86_ECX)) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } lreg: MUL_OVF (lreg, lreg) { if (mono_regset_reg_used (s->rs, X86_ECX)) x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, tree->right->reg2); x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); /* pass a pointer to store the resulting exception - * ugly, but it works */ x86_push_reg (s->code, X86_ESP); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llmult_ovf); x86_call_code (s->code, 0); x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, 4, 4); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20); x86_alu_reg_imm (s->code, X86_CMP, X86_ECX, 0); /* cond. emit exception */ x86_branch8 (s->code, X86_CC_EQ, 7, FALSE); x86_push_reg (s->code, X86_ECX); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, arch_get_throw_exception ()); x86_call_code (s->code, 0); if (mono_regset_reg_used (s->rs, X86_ECX)) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } lreg: MUL_OVF_UN (lreg, lreg) { if (mono_regset_reg_used (s->rs, X86_ECX)) x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, tree->right->reg2); x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); /* pass a pointer to store the resulting exception - * ugly, but it works */ x86_push_reg (s->code, X86_ESP); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llmult_ovf_un); x86_call_code (s->code, 0); x86_mov_reg_membase (s->code, X86_ECX, X86_ESP, 4, 4); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20); x86_alu_reg_imm (s->code, X86_CMP, X86_ECX, 0); /* cond. emit exception */ x86_branch8 (s->code, X86_CC_EQ, 7, FALSE); x86_push_reg (s->code, X86_ECX); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, arch_get_throw_exception ()); x86_call_code (s->code, 0); if (mono_regset_reg_used (s->rs, X86_ECX)) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } lreg: DIV (lreg, lreg) { if (mono_regset_reg_used (s->rs, X86_ECX)) x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, tree->right->reg2); x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_lldiv); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16); if (mono_regset_reg_used (s->rs, X86_ECX)) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } lreg: REM (lreg, lreg) { if (mono_regset_reg_used (s->rs, X86_ECX)) x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, tree->right->reg2); x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llrem); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16); if (mono_regset_reg_used (s->rs, X86_ECX)) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } lreg: DIV_UN (lreg, lreg) { if (mono_regset_reg_used (s->rs, X86_ECX)) x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, tree->right->reg2); x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_lldiv_un); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16); if (mono_regset_reg_used (s->rs, X86_ECX)) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } lreg: REM_UN (lreg, lreg) { if (mono_regset_reg_used (s->rs, X86_ECX)) x86_push_reg (s->code, X86_ECX); x86_push_reg (s->code, tree->right->reg2); x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_llrem_un); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16); if (mono_regset_reg_used (s->rs, X86_ECX)) x86_pop_reg (s->code, X86_ECX); mono_assert (tree->reg1 == X86_EAX && tree->reg2 == X86_EDX); } lreg: CALL_I8 (this, reg) { int treg = X86_EAX; int lreg = tree->left->reg1; int rreg = tree->right->reg1; if (lreg == treg || rreg == treg) treg = X86_EDX; if (lreg == treg || rreg == treg) treg = X86_ECX; if (lreg == treg || rreg == treg) mono_assert_not_reached (); X86_CALL_BEGIN; x86_call_reg (s->code, rreg); X86_CALL_END; mono_assert (tree->reg1 == X86_EAX); mono_assert (tree->reg2 == X86_EDX); } lreg: CALL_I8 (this, ADDR_G) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p); x86_call_code (s->code, 0); X86_CALL_END; mono_assert (tree->reg1 == X86_EAX); mono_assert (tree->reg2 == X86_EDX); } lreg: CALL_I8 (this, VFUNC_ADDR) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_call_virtual (s->code, lreg, G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2)); X86_CALL_END; mono_assert (tree->reg1 == X86_EAX); mono_assert (tree->reg2 == X86_EDX); } lreg: CALL_I8 (this, INTF_ADDR) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4); x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4); x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2); X86_CALL_END; mono_assert (tree->reg1 == X86_EAX); mono_assert (tree->reg2 == X86_EDX); } stmt: RET (lreg) { if (tree->left->reg1 != X86_EAX) { if (tree->left->reg2 != X86_EAX) { x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); if (tree->left->reg2 != X86_EDX) x86_mov_reg_reg (s->code, X86_EDX, tree->left->reg2, 4); } else { x86_mov_reg_reg (s->code, X86_ECX, tree->left->reg2, 4); x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4); x86_mov_reg_reg (s->code, X86_EDX, X86_ECX, 4); } } else if (tree->left->reg2 != X86_EDX) { x86_mov_reg_reg (s->code, X86_EDX, tree->left->reg2, 4); } if (!tree->last_instr) { mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL); x86_jump32 (s->code, 0); } } stmt: ARG_I8 (lreg) { int pad = tree->data.arg_info.pad; X86_ARG_PAD (pad); x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); } reg: CSET (COMPARE (lreg, lreg)) { guint8 *br [4]; int lreg1, lreg2, rreg1, rreg2; lreg1 = tree->left->left->reg1; lreg2 = tree->left->left->reg2; rreg1 = tree->left->right->reg1; rreg2 = tree->left->right->reg2; if (tree->data.i == CEE_CEQ) { x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); x86_patch (br [0], s->code); x86_set_reg (s->code, X86_CC_EQ, tree->reg1, FALSE); x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE); return; } switch (tree->data.i) { case CEE_CGT: x86_alu_reg_reg (s->code, X86_CMP, rreg2, lreg2); br [0] = s->code; x86_branch8 (s->code, X86_CC_GT, 0, TRUE); br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE); x86_alu_reg_reg (s->code, X86_CMP, rreg1, lreg1); br [2] = s->code; x86_branch8 (s->code, X86_CC_GE, 0, FALSE); break; case CEE_CGT_UN: x86_alu_reg_reg (s->code, X86_CMP, rreg2, lreg2); br [0] = s->code; x86_branch8 (s->code, X86_CC_GT, 0, FALSE); br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_alu_reg_reg (s->code, X86_CMP, rreg1, lreg1); br [2] = s->code; x86_branch8 (s->code, X86_CC_GE, 0, FALSE); break; case CEE_CLT: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); br [0] = s->code; x86_branch8 (s->code, X86_CC_GT, 0, TRUE); br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); br [2] = s->code; x86_branch8 (s->code, X86_CC_GE, 0, FALSE); break; case CEE_CLT_UN: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); br [0] = s->code; x86_branch8 (s->code, X86_CC_GT, 0, FALSE); br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); br [2] = s->code; x86_branch8 (s->code, X86_CC_GE, 0, FALSE); break; default: g_assert_not_reached (); } /* set result to 1 */ x86_patch (br [1], s->code); x86_mov_reg_imm (s->code, tree->reg1, 1); br [3] = s->code; x86_jump8 (s->code, 0); /* set result to 0 */ x86_patch (br [0], s->code); x86_patch (br [2], s->code); x86_mov_reg_imm (s->code, tree->reg1, 0); x86_patch (br [3], s->code); } stmt: CBRANCH (COMPARE (lreg, lreg)) { guint8 *br [1]; int lreg1, lreg2, rreg1, rreg2; lreg1 = tree->left->left->reg1; lreg2 = tree->left->left->reg2; rreg1 = tree->left->right->reg1; rreg2 = tree->left->right->reg2; switch (tree->data.bi.cond) { case CEE_BLT: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_LT, 0, TRUE); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_LT, 0, FALSE); x86_patch (br [0], s->code); break; case CEE_BLT_UN: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_LT, 0, FALSE); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_LT, 0, FALSE); x86_patch (br [0], s->code); break; case CEE_BGT: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_GT, 0, TRUE); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_GT, 0, FALSE); x86_patch (br [0], s->code); break; case CEE_BGT_UN: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_GT, 0, FALSE); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_GT, 0, FALSE); x86_patch (br [0], s->code); break; case CEE_BEQ: x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_EQ, 0, TRUE); x86_patch (br [0], s->code); break; case CEE_BNE_UN: x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_NE, 0, FALSE); x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_NE, 0, FALSE); break; case CEE_BGE: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_GT, 0, TRUE); x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_GE, 0, FALSE); x86_patch (br [0], s->code); break; case CEE_BGE_UN: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_GT, 0, FALSE); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_GE, 0, FALSE); x86_patch (br [0], s->code); break; case CEE_BLE: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_LT, 0, TRUE); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, TRUE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_LE, 0, FALSE); x86_patch (br [0], s->code); break; case CEE_BLE_UN: x86_alu_reg_reg (s->code, X86_CMP, lreg2, rreg2); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_LT, 0, FALSE); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); x86_alu_reg_reg (s->code, X86_CMP, lreg1, rreg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_LE, 0, FALSE); x86_patch (br [0], s->code); break; default: g_assert_not_reached (); } } # # floating point #stmt: STLOC (CONV_I4 (freg)) { # // fixme: set CW # x86_fist_pop_membase (s->code, X86_EBP, tree->data.i, FALSE); #} reg: CONV_I1 (freg) { if (mono_use_fast_iconv) { mono_emit_fast_iconv(s, tree); x86_widen_reg (s->code, tree->reg1, tree->reg1, TRUE, FALSE); } else { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(s->code, X86_ESP, 0); x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2); x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00); x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2); x86_fldcw_membase (s->code, X86_ESP, 2); x86_push_reg (s->code, X86_EAX); // SP = SP - 4 x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE); x86_pop_reg (s->code, tree->reg1); x86_widen_reg (s->code, tree->reg1, tree->reg1, TRUE, FALSE); x86_fldcw_membase (s->code, X86_ESP, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } } reg: CONV_U1 (freg) { if (mono_use_fast_iconv) { mono_emit_fast_iconv(s, tree); x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE); } else { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(s->code, X86_ESP, 0); x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2); x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00); x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2); x86_fldcw_membase (s->code, X86_ESP, 2); x86_push_reg (s->code, X86_EAX); // SP = SP - 4 x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE); x86_pop_reg (s->code, tree->reg1); x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, FALSE); x86_fldcw_membase (s->code, X86_ESP, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } } reg: CONV_I2 (freg) { if (mono_use_fast_iconv) { mono_emit_fast_iconv(s, tree); x86_widen_reg (s->code, tree->reg1, tree->reg1, TRUE, TRUE); } else { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(s->code, X86_ESP, 0); x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2); x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00); x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2); x86_fldcw_membase (s->code, X86_ESP, 2); x86_push_reg (s->code, X86_EAX); // SP = SP - 4 x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE); x86_pop_reg (s->code, tree->reg1); x86_widen_reg (s->code, tree->reg1, tree->reg1, TRUE, TRUE); x86_fldcw_membase (s->code, X86_ESP, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } } reg: CONV_U2 (freg) { if (mono_use_fast_iconv) { mono_emit_fast_iconv(s, tree); x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, TRUE); } else { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(s->code, X86_ESP, 0); x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2); x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00); x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2); x86_fldcw_membase (s->code, X86_ESP, 2); x86_push_reg (s->code, X86_EAX); // SP = SP - 4 x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE); x86_pop_reg (s->code, tree->reg1); x86_widen_reg (s->code, tree->reg1, tree->reg1, FALSE, TRUE); x86_fldcw_membase (s->code, X86_ESP, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } } reg: CONV_I4 (freg) { if (mono_use_fast_iconv) { mono_emit_fast_iconv(s, tree); } else { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(s->code, X86_ESP, 0); x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2); x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00); x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2); x86_fldcw_membase (s->code, X86_ESP, 2); x86_push_reg (s->code, X86_EAX); // SP = SP - 4 x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE); x86_pop_reg (s->code, tree->reg1); x86_fldcw_membase (s->code, X86_ESP, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } } reg: CONV_U4 (freg) { if (mono_use_fast_iconv) { mono_emit_fast_iconv(s, tree); } else { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(s->code, X86_ESP, 0); x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2); x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00); x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2); x86_fldcw_membase (s->code, X86_ESP, 2); x86_push_reg (s->code, X86_EAX); // SP = SP - 4 x86_fist_pop_membase (s->code, X86_ESP, 0, FALSE); x86_pop_reg (s->code, tree->reg1); x86_fldcw_membase (s->code, X86_ESP, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } } lreg: CONV_I8 (freg) { if (mono_use_fast_iconv) { mono_emit_fast_iconv_i8(s, tree); } else { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(s->code, X86_ESP, 0); x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2); x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00); x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2); x86_fldcw_membase (s->code, X86_ESP, 2); x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8); x86_fist_pop_membase (s->code, X86_ESP, 0, TRUE); x86_pop_reg (s->code, tree->reg1); x86_pop_reg (s->code, tree->reg2); x86_fldcw_membase (s->code, X86_ESP, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } } lreg: CONV_U8 (freg) { if (mono_use_fast_iconv) { mono_emit_fast_iconv_i8(s, tree); } else { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(s->code, X86_ESP, 0); x86_mov_reg_membase (s->code, tree->reg1, X86_ESP, 0, 2); x86_alu_reg_imm (s->code, X86_OR, tree->reg1, 0xc00); x86_mov_membase_reg (s->code, X86_ESP, 2, tree->reg1, 2); x86_fldcw_membase (s->code, X86_ESP, 2); x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8); x86_fist_pop_membase (s->code, X86_ESP, 0, TRUE); x86_pop_reg (s->code, tree->reg1); x86_pop_reg (s->code, tree->reg2); x86_fldcw_membase (s->code, X86_ESP, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } } reg: CSET (COMPARE (freg, freg)) { int treg = tree->reg1; if (treg != X86_EAX) x86_push_reg (s->code, X86_EAX); x86_fcompp (s->code); x86_fnstsw (s->code); x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500); switch (tree->data.i) { case CEE_CEQ: x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000); x86_set_reg (s->code, X86_CC_EQ, treg, TRUE); x86_widen_reg (s->code, treg, treg, FALSE, FALSE); break; case CEE_CGT: x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100); x86_set_reg (s->code, X86_CC_EQ, treg, TRUE); x86_widen_reg (s->code, treg, treg, FALSE, FALSE); break; case CEE_CGT_UN: x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100); x86_set_reg (s->code, X86_CC_EQ, treg, TRUE); x86_widen_reg (s->code, treg, treg, FALSE, FALSE); break; case CEE_CLT: x86_set_reg (s->code, X86_CC_EQ, treg, TRUE); x86_widen_reg (s->code, treg, treg, FALSE, FALSE); break; case CEE_CLT_UN: x86_set_reg (s->code, X86_CC_EQ, tree->reg1, TRUE); x86_widen_reg (s->code, treg, treg, FALSE, FALSE); break; default: g_assert_not_reached (); } if (treg != X86_EAX) x86_pop_reg (s->code, X86_EAX); } freg: CONV_R8 (freg) { /* nothing to do */ } freg: CONV_R4 (freg) { /* fixme: nothing to do ??*/ } freg: CONV_R8 (LDIND_I4 (ADDR_G)) { x86_fild (s->code, tree->left->left->data.p, FALSE); } freg: CONV_R4 (reg) { x86_push_reg (s->code, tree->left->reg1); x86_fild_membase (s->code, X86_ESP, 0, FALSE); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } freg: CONV_R8 (reg) { x86_push_reg (s->code, tree->left->reg1); x86_fild_membase (s->code, X86_ESP, 0, FALSE); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4); } freg: CONV_R_UN (reg) { x86_push_imm (s->code, 0); x86_push_reg (s->code, tree->left->reg1); x86_fild_membase (s->code, X86_ESP, 0, TRUE); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); } freg: CONV_R_UN (lreg) { static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 }; guint8 *br [1]; /* load 64bit integer to FP stack */ x86_push_imm (s->code, 0); x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); x86_fild_membase (s->code, X86_ESP, 0, TRUE); /* store as 80bit FP value */ x86_fst80_membase (s->code, X86_ESP, 0); /* test if lreg is negative */ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2); br [0] = s->code; x86_branch8 (s->code, X86_CC_GEZ, 0, TRUE); /* add correction constant mn */ x86_fld80_mem (s->code, mn); x86_fld80_membase (s->code, X86_ESP, 0); x86_fp_op_reg (s->code, X86_FADD, 1, TRUE); x86_fst80_membase (s->code, X86_ESP, 0); //x86_breakpoint (s->code); x86_patch (br [0], s->code); x86_fld80_membase (s->code, X86_ESP, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12); } freg: CONV_R4 (lreg) { x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); x86_fild_membase (s->code, X86_ESP, 0, TRUE); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); } freg: CONV_R8 (lreg) { x86_push_reg (s->code, tree->left->reg2); x86_push_reg (s->code, tree->left->reg1); x86_fild_membase (s->code, X86_ESP, 0, TRUE); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8); } freg: CONST_R4 { float f = *(float *)tree->data.p; if (f == 0.0) x86_fldz (s->code); else if (f == 1.0) x86_fld1(s->code); else x86_fld (s->code, tree->data.p, FALSE); } freg: CONST_R8 { double d = *(double *)tree->data.p; if (d == 0.0) x86_fldz (s->code); else if (d == 1.0) x86_fld1(s->code); else x86_fld (s->code, tree->data.p, TRUE); } freg: LDIND_R4 (addr) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_fld (s->code, tree->left->data.ainfo.offset, FALSE); break; case AMBase: x86_fld_membase (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, FALSE); break; case AMIndex: x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_fld_membase (s->code, tree->left->data.ainfo.indexreg, 0, FALSE); break; case AMBaseIndex: x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_fld_membase (s->code, tree->left->data.ainfo.indexreg, 0, FALSE); break; } } freg: LDIND_R8 (addr) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_fld (s->code, tree->left->data.ainfo.offset, TRUE); break; case AMBase: x86_fld_membase (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, TRUE); break; case AMIndex: x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_fld_membase (s->code, tree->left->data.ainfo.indexreg, 0, TRUE); break; case AMBaseIndex: x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_fld_membase (s->code, tree->left->data.ainfo.indexreg, 0, TRUE); break; } } freg: ADD (freg, freg) { x86_fp_op_reg (s->code, X86_FADD, 1, TRUE); } freg: SUB (freg, freg) { x86_fp_op_reg (s->code, X86_FSUB, 1, TRUE); } freg: MUL (freg, freg) { x86_fp_op_reg (s->code, X86_FMUL, 1, TRUE); } freg: DIV (freg, freg) { x86_fp_op_reg (s->code, X86_FDIV, 1, TRUE); } freg: CKFINITE (freg) { x86_push_reg (s->code, X86_EAX); x86_fxam (s->code); x86_fnstsw (s->code); x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4100); x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100); x86_pop_reg (s->code, X86_EAX); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_NE, FALSE, "ArithmeticException"); } freg: REM (freg, freg) { guint8 *l1, *l2; /* we need to exchange ST(0) with ST(1) */ x86_fxch (s->code, 1); /* this requires a loop, because fprem1 somtimes * returns a partial remainder */ l1 = s->code; x86_fprem (s->code); x86_fnstsw (s->code); x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x0400); l2 = s->code + 2; x86_branch8 (s->code, X86_CC_NE, l1 - l2, FALSE); /* pop result */ x86_fstp (s->code, 1); } freg: NEG (freg) { x86_fchs (s->code); } stmt: POP (freg) { x86_fstp (s->code, 0); } stmt: STIND_R4 (addr, freg) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_fst (s->code, tree->left->data.ainfo.offset, FALSE, TRUE); break; case AMBase: x86_fst_membase (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, FALSE, TRUE); break; case AMIndex: x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_fst_membase (s->code, tree->left->data.ainfo.indexreg, 0, FALSE, TRUE); break; case AMBaseIndex: x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_fst_membase (s->code, tree->left->data.ainfo.indexreg, 0, FALSE, TRUE); break; } } stmt: STIND_R8 (addr, freg) { switch (tree->left->data.ainfo.amode) { case AMImmediate: x86_fst (s->code, tree->left->data.ainfo.offset, TRUE, TRUE); break; case AMBase: x86_fst_membase (s->code, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, TRUE, TRUE); break; case AMIndex: x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, X86_NOBASEREG, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_fst_membase (s->code, tree->left->data.ainfo.indexreg, 0, TRUE, TRUE); break; case AMBaseIndex: x86_lea_memindex (s->code, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.basereg, tree->left->data.ainfo.offset, tree->left->data.ainfo.indexreg, tree->left->data.ainfo.shift); x86_fst_membase (s->code, tree->left->data.ainfo.indexreg, 0, TRUE, TRUE); break; } } stmt: REMOTE_STIND_R4 (reg, freg) { guint8 *br[2]; int treg = X86_EAX; int lreg = tree->left->reg1; int offset; if (lreg == treg) treg = X86_EDX; x86_mov_reg_membase (s->code, treg, lreg, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class)); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* this is a transparent proxy - remote the call */ /* save value to stack */ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4); x86_fst_membase (s->code, X86_ESP, 0, FALSE, TRUE); x86_push_reg (s->code, X86_ESP); x86_push_imm (s->code, tree->data.fi.field); x86_push_imm (s->code, tree->data.fi.klass); x86_push_reg (s->code, lreg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 20); br [1] = s->code; x86_jump8 (s->code, 0); x86_patch (br [0], s->code); offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) : tree->data.fi.field->offset; x86_fst_membase (s->code, lreg, offset, FALSE, TRUE); x86_patch (br [1], s->code); } stmt: REMOTE_STIND_R8 (reg, freg) { guint8 *br[2]; int treg = X86_EAX; int lreg = tree->left->reg1; int offset; if (lreg == treg) treg = X86_EDX; x86_mov_reg_membase (s->code, treg, lreg, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class)); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* this is a transparent proxy - remote the call */ /* save value to stack */ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8); x86_fst_membase (s->code, X86_ESP, 0, TRUE, TRUE); x86_push_reg (s->code, X86_ESP); x86_push_imm (s->code, tree->data.fi.field); x86_push_imm (s->code, tree->data.fi.klass); x86_push_reg (s->code, lreg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 24); br [1] = s->code; x86_jump8 (s->code, 0); x86_patch (br [0], s->code); offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) : tree->data.fi.field->offset; x86_fst_membase (s->code, lreg, offset, TRUE, TRUE); x86_patch (br [1], s->code); } stmt: ARG_R4 (freg) { int pad = tree->data.arg_info.pad; x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4 + pad); x86_fst_membase (s->code, X86_ESP, 0, FALSE, TRUE); } stmt: ARG_R8 (freg) { int pad = tree->data.arg_info.pad; x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8 + pad); x86_fst_membase (s->code, X86_ESP, 0, TRUE, TRUE); } # fixme: we need to implement unordered and ordered compares stmt: CBRANCH (COMPARE (freg, freg)) { x86_fcompp (s->code); x86_fnstsw (s->code); x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x4500); switch (tree->data.bi.cond) { case CEE_BLT: mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_EQ, 0, FALSE); break; case CEE_BLT_UN: mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_EQ, 0, FALSE); break; case CEE_BGT: x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_EQ, 0, FALSE); break; case CEE_BGT_UN: x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_EQ, 0, FALSE); break; case CEE_BEQ: x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_EQ, 0, TRUE); break; case CEE_BNE_UN: x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x4000); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_NE, 0, FALSE); break; case CEE_BGE: mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_NE, 0, FALSE); break; case CEE_BGE_UN: mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_NE, 0, FALSE); break; case CEE_BLE: x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_NE, 0, FALSE); break; case CEE_BLE_UN: x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0x0100); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bi.target); x86_branch32 (s->code, X86_CC_NE, 0, FALSE); break; default: g_assert_not_reached (); } } freg: CALL_R8 (this, reg) { int treg = X86_EAX; int lreg = tree->left->reg1; int rreg = tree->right->reg1; if (lreg == treg || rreg == treg) treg = X86_EDX; if (lreg == treg || rreg == treg) treg = X86_ECX; if (lreg == treg || rreg == treg) mono_assert_not_reached (); X86_CALL_BEGIN; x86_call_reg (s->code, rreg); X86_CALL_END; } freg: CALL_R8 (this, ADDR_G) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p); x86_call_code (s->code, 0); X86_CALL_END; } freg: CALL_R8 (this, INTF_ADDR) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoVTable, interface_offsets), 4); x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4); x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2); X86_CALL_END; } freg: CALL_R8 (this, VFUNC_ADDR) { int lreg = tree->left->reg1; int treg = X86_EAX; if (lreg == treg) treg = X86_EDX; X86_CALL_BEGIN; x86_mov_reg_membase (s->code, lreg, lreg, 0, 4); x86_call_virtual (s->code, lreg, G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2)); X86_CALL_END; } stmt: RET (freg) { if (!tree->last_instr) { mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL); x86_jump32 (s->code, 0); } } freg: SIN (freg) { x86_fsin (s->code); } freg: COS (freg) { x86_fcos (s->code); } freg: SQRT (freg) { x86_fsqrt (s->code); } # support for value types reg: LDIND_OBJ (reg) { if (tree->left->reg1 != tree->reg1) x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4); } stmt: STIND_OBJ (reg, reg) { mono_assert (tree->data.i > 0); x86_push_imm (s->code, tree->data.i); x86_push_reg (s->code, tree->right->reg1); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12); } stmt: REMOTE_STIND_OBJ (reg, reg) { guint8 *br[2]; int treg = X86_EAX; int lreg = tree->left->reg1; int rreg = tree->right->reg1; int size, offset; if (lreg == treg) treg = X86_EDX; if (rreg == treg) treg = X86_ECX; x86_mov_reg_membase (s->code, treg, lreg, 0, 4); x86_alu_membase_imm (s->code, X86_CMP, treg, 0, ((int)mono_defaults.transparent_proxy_class)); br [0] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE); /* this is a transparent proxy - remote the call */ x86_push_reg (s->code, rreg); x86_push_imm (s->code, tree->data.fi.field); x86_push_imm (s->code, tree->data.fi.klass); x86_push_reg (s->code, lreg); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_store_remote_field); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 16); br [1] = s->code; x86_jump8 (s->code, 0); x86_patch (br [0], s->code); offset = tree->data.fi.klass->valuetype ? tree->data.fi.field->offset - sizeof (MonoObject) : tree->data.fi.field->offset; size = mono_class_value_size (tree->data.fi.field->type->data.klass, NULL); x86_push_imm (s->code, size); x86_push_reg (s->code, tree->right->reg1); x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, offset); x86_push_reg (s->code, tree->left->reg1); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12); x86_patch (br [1], s->code); } stmt: ARG_OBJ (CONST_I4) { int pad = tree->data.arg_info.pad; X86_ARG_PAD (pad); x86_push_imm (s->code, tree->left->data.i); } stmt: ARG_OBJ (reg) { int size = tree->data.arg_info.size; int pad = tree->data.arg_info.pad; int sa; if (!size) return; sa = size + pad; /* reserve space for the argument */ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, sa); x86_push_imm (s->code, size); x86_push_reg (s->code, tree->left->reg1); x86_lea_membase (s->code, X86_EAX, X86_ESP, 2*4); x86_push_reg (s->code, X86_EAX); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12); } stmt: RET_OBJ (reg) { int size = tree->data.i; x86_push_imm (s->code, size); x86_push_reg (s->code, tree->left->reg1); x86_push_membase (s->code, X86_EBP, 8); mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY); x86_call_code (s->code, 0); x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12); if (!tree->last_instr) { mono_add_jump_info (s, s->code, MONO_JUMP_INFO_EPILOG, NULL); x86_jump32 (s->code, 0); } } %% #include "jit.h" gint64 mono_llmult (gint64 a, gint64 b) { return a * b; } guint64 mono_llmult_ovf_un (gpointer *exc, guint32 al, guint32 ah, guint32 bl, guint32 bh) { guint64 res, t1; // fixme: this is incredible slow if (ah && bh) goto raise_exception; res = (guint64)al * (guint64)bl; t1 = (guint64)ah * (guint64)bl + (guint64)al * (guint64)bh; if (t1 > 0xffffffff) goto raise_exception; res += ((guint64)t1) << 32; *exc = NULL; return res; raise_exception: *exc = mono_get_exception_overflow (); return 0; } guint64 mono_llmult_ovf (gpointer *exc, guint32 al, gint32 ah, guint32 bl, gint32 bh) { /* Use Karatsuba algorithm where: a*b is: AhBh(R^2+R)+(Ah-Al)(Bl-Bh)R+AlBl(R+1) where Ah is the "high half" (most significant 32 bits) of a and where Al is the "low half" (least significant 32 bits) of a and where Bh is the "high half" of b and Bl is the "low half" and where R is the Radix or "size of the half" (in our case 32 bits) Note, for the product of two 64 bit numbers to fit into a 64 result, ah and/or bh must be 0. This will save us from doing the AhBh term at all. Also note that we refactor so that we don't overflow 64 bits with intermediate results. So we use [(Ah-Al)(Bl-Bh)+AlBl]R+AlBl */ gint64 res, t1; gint32 sign; /* need to work with absoulte values, so find out what the resulting sign will be and convert any negative numbers from two's complement */ sign = ah ^ bh; if (ah < 0) { /* flip the bits and add 1 */ ah ^= ~0; if (al == 0) ah += 1; else { al ^= ~0; al +=1; } } if (bh < 0) { /* flip the bits and add 1 */ bh ^= ~0; if (bl == 0) bh += 1; else { bl ^= ~0; bl +=1; } } /* we overflow for sure if both upper halves are greater than zero because we would need to shift their product 64 bits to the left and that will not fit in a 64 bit result */ if (ah && bh) goto raise_exception; /* do the AlBl term first */ t1 = (gint64)al * (gint64)bl; res = t1; /* now do the [(Ah-Al)(Bl-Bh)+AlBl]R term */ t1 += (gint64)(ah - al) * (gint64)(bl - bh); t1 <<= 32; /* check for overflow */ if (t1 > (0x7FFFFFFFFFFFFFFF - res)) goto raise_exception; res += t1; *exc = NULL; if (sign < 0) return -res; else return res; raise_exception: *exc = mono_get_exception_overflow (); return 0; } gint64 mono_lldiv (gint64 a, gint64 b) { return a / b; } gint64 mono_llrem (gint64 a, gint64 b) { return a % b; } guint64 mono_lldiv_un (guint64 a, guint64 b) { return a / b; } guint64 mono_llrem_un (guint64 a, guint64 b) { return a % b; } MonoArray* mono_array_new_wrapper (MonoClass *eclass, guint32 n) { MonoDomain *domain = mono_domain_get (); return mono_array_new (domain, eclass, n); } MonoObject * mono_object_new_wrapper (MonoClass *klass) { MonoDomain *domain = mono_domain_get (); return mono_object_new (domain, klass); } MonoString* mono_ldstr_wrapper (MonoImage *image, guint32 ind) { MonoDomain *domain = mono_domain_get (); return mono_ldstr (domain, image, ind); } gpointer mono_ldsflda (MonoClass *klass, int offset) { MonoDomain *domain = mono_domain_get (); MonoVTable *vt; gpointer addr; vt = mono_class_vtable (domain, klass); addr = (char*)(vt->data) + offset; return addr; } void * debug_memcopy (void *dest, const void *src, size_t n) { int i, l = n; printf ("MEMCPY(%p to %p [%d]) ", src, dest, n); for (i = 0; i < l; i++) printf ("%02x ", *((guint8 *)src + i)); printf ("\n"); return memcpy (dest, src, n); } void mono_emit_fast_iconv (MBCGEN_TYPE* s, MBTREE_TYPE* tree) { guint8* br [3]; x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 12); x86_fist_membase (s->code, X86_ESP, 8, TRUE); // rounded value x86_fst_membase (s->code, X86_ESP, 0, FALSE, FALSE); // float value x86_fp_int_op_membase (s->code, X86_FSUB, X86_ESP, 8, TRUE); x86_fst_membase (s->code, X86_ESP, 4, FALSE, TRUE); // diff x86_pop_reg (s->code, tree->reg1); // float value x86_test_reg_reg (s->code, tree->reg1, tree->reg1); br[0] = s->code; x86_branch8 (s->code, X86_CC_S, 0, TRUE); x86_pop_reg (s->code, tree->reg1); // diff x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1); x86_pop_reg (s->code, tree->reg1); // rounded value x86_alu_reg_imm (s->code, X86_SBB, tree->reg1, 0); br[1] = s->code; x86_jump8 (s->code, 0); // freg is negative x86_patch (br[0], s->code); x86_pop_reg (s->code, tree->reg1); // diff x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1); x86_pop_reg (s->code, tree->reg1); // rounded value br[2] = s->code; x86_branch8 (s->code, X86_CC_Z, 0, FALSE); x86_alu_reg_imm (s->code, X86_SBB, tree->reg1, -1); x86_patch (br[1], s->code); x86_patch (br[2], s->code); } void mono_emit_fast_iconv_i8 (MBCGEN_TYPE* s, MBTREE_TYPE* tree) { guint8* br [3]; x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 16); x86_fld_reg (s->code, 0); x86_fist_pop_membase (s->code, X86_ESP, 8, TRUE); // rounded value (qword) x86_fst_membase (s->code, X86_ESP, 0, FALSE, FALSE); // float value x86_fild_membase (s->code, X86_ESP, 8, TRUE); x86_fp_op_reg (s->code, X86_FSUB, 1, TRUE); // diff x86_fst_membase (s->code, X86_ESP, 4, FALSE, TRUE); // diff x86_pop_reg (s->code, tree->reg1); // float value x86_test_reg_reg (s->code, tree->reg1, tree->reg1); br[0] = s->code; x86_branch8 (s->code, X86_CC_S, 0, TRUE); x86_pop_reg (s->code, tree->reg1); // diff x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1); x86_pop_reg (s->code, tree->reg1); // rounded value x86_pop_reg (s->code, tree->reg2); x86_alu_reg_imm (s->code, X86_SBB, tree->reg1, 0); x86_alu_reg_imm (s->code, X86_SBB, tree->reg2, 0); br[1] = s->code; x86_jump8 (s->code, 0); // freg is negative x86_patch (br[0], s->code); x86_pop_reg (s->code, tree->reg1); // diff x86_alu_reg_reg (s->code, X86_ADD, tree->reg1, tree->reg1); x86_pop_reg (s->code, tree->reg1); // rounded value x86_pop_reg (s->code, tree->reg2); br[2] = s->code; x86_branch8 (s->code, X86_CC_Z, 0, FALSE); x86_alu_reg_imm (s->code, X86_SBB, tree->reg1, -1); x86_alu_reg_imm (s->code, X86_SBB, tree->reg2, -1); x86_patch (br[1], s->code); x86_patch (br[2], s->code); } void mono_emit_stack_alloc (MBCGEN_TYPE* s, MBTREE_TYPE* tree) { #ifdef PLATFORM_WIN32 guint8* br[5]; int sreg; /* * Under Windows: * If requested stack size is larger than one page, * perform stack-touch operation * (see comments in mono_emit_stack_alloc_const below). */ x86_test_reg_imm (s->code, tree->left->reg1, ~0xFFF); br[0] = s->code; x86_branch8 (s->code, X86_CC_Z, 0, FALSE); sreg = tree->left->reg1; br[2] = s->code; /* loop */ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 0x1000); x86_test_membase_reg (s->code, X86_ESP, 0, X86_ESP); x86_alu_reg_imm (s->code, X86_SUB, sreg, 0x1000); x86_alu_reg_imm (s->code, X86_CMP, sreg, 0x1000); br[3] = s->code; x86_branch8 (s->code, X86_CC_AE, 0, FALSE); x86_patch (br[3], br[2]); x86_test_reg_reg (s->code, sreg, sreg); br[4] = s->code; x86_branch8 (s->code, X86_CC_Z, 0, FALSE); x86_alu_reg_reg (s->code, X86_SUB, X86_ESP, sreg); br[1] = s->code; x86_jump8 (s->code, 0); x86_patch (br[0], s->code); x86_alu_reg_reg (s->code, X86_SUB, X86_ESP, tree->left->reg1); x86_patch (br[1], s->code); x86_patch (br[4], s->code); #else /* PLATFORM_WIN32 */ x86_alu_reg_reg (s->code, X86_SUB, X86_ESP, tree->left->reg1); #endif } void mono_emit_stack_alloc_const (MBCGEN_TYPE* s, MBTREE_TYPE* tree, int size) { #ifdef PLATFORM_WIN32 int i, npages; guint8* br[2]; if (size > 0xFFE) { /* * Generate stack probe code. * Under Windows, it is necessary to allocate one page at a time, * "touching" stack after each successful sub-allocation. This is * because of the way stack growth is implemented - there is a * guard page before the lowest stack page that is currently commited. * Stack normally grows sequentially so OS traps access to the * guard page and commits more pages when needed. */ npages = ((unsigned) size) >> 12; if (npages > 4) { if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX) { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 0x1000); x86_test_membase_reg (s->code, X86_ESP, 0, X86_ESP); x86_mov_membase_reg (s->code, X86_ESP, 0x1000 - 4, X86_EAX, 4); /* save EAX */ x86_mov_reg_imm (s->code, X86_EAX, npages - 1); } else { x86_mov_reg_imm (s->code, X86_EAX, npages); } br[0] = s->code; /* loop */ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 0x1000); x86_test_membase_reg (s->code, X86_ESP, 0, X86_ESP); x86_dec_reg (s->code, X86_EAX); br[1] = s->code; x86_branch8 (s->code, X86_CC_NZ, 0, TRUE); x86_patch (br[1], br[0]); if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX) x86_mov_reg_membase (s->code, X86_EAX, X86_ESP, (npages * 0x1000) - 4, 4); /* restore EAX */ } else { /* generate unrolled code for relatively small allocs */ for (i = npages; --i >= 0;) { x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 0x1000); x86_test_membase_reg (s->code, X86_ESP, 0, X86_ESP); } } } if (size & 0xFFF) x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, -(size & 0xFFF)); #else /* PLATFORM_WIN32 */ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, size); #endif } gpointer mono_ldvirtftn (MonoObject *this, int slot) { MonoClass *class; MonoMethod *m; gpointer addr; gboolean is_proxy = FALSE; g_assert (this); if ((class = this->vtable->klass) == mono_defaults.transparent_proxy_class) { class = ((MonoTransparentProxy *)this)->klass; is_proxy = TRUE; } g_assert (slot <= class->vtable_size); m = class->vtable [slot]; if (is_proxy) { return mono_jit_create_remoting_trampoline (m); } else { EnterCriticalSection (metadata_section); addr = mono_compile_method (m); LeaveCriticalSection (metadata_section); return addr; } } gpointer mono_ldintftn (MonoObject *this, int slot) { MonoClass *class; MonoMethod *m; gpointer addr; gboolean is_proxy = FALSE; g_assert (this); if ((class = this->vtable->klass) == mono_defaults.transparent_proxy_class) { class = ((MonoTransparentProxy *)this)->klass; is_proxy = TRUE; } g_assert (slot < class->interface_count); slot = class->interface_offsets [slot]; m = class->vtable [slot]; if (is_proxy) { return mono_jit_create_remoting_trampoline (m); } else { EnterCriticalSection (metadata_section); addr = mono_compile_method (m); LeaveCriticalSection (metadata_section); return addr; } } gpointer mono_ldftn (MonoMethod *method) { gpointer addr; EnterCriticalSection (metadata_section); addr = mono_compile_method (method); LeaveCriticalSection (metadata_section); return addr; }