#include <mono/metadata/object.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/appdomain.h>
+#include <mono/metadata/marshal.h>
+#include <mono/metadata/threads.h>
#include <mono/arch/x86/x86-codegen.h>
#include "regset.h"
};
#undef OPDEF
+/* alignment of activation frames */
+#define MONO_FRAME_ALIGNMENT 4
+
void print_lmf (void);
#define MBTREE_TYPE MBTree
MonoClass *klass;
MonoClassField *field;
X86AddressInfo ainfo;
- MonoJitCallInfo ci;
MonoJitFieldInfo fi;
MonoJitBranchInfo bi;
+ MonoJitCallInfo call_info;
+ MonoJitArgumentInfo arg_info;
} data;
};
gpointer mono_ldintftn (MonoObject *this, int slot);
gpointer mono_ldftn (MonoMethod *method);
-void mono_emit_fast_iconv(MBCGEN_TYPE* s, MBTREE_TYPE* tree);
-void mono_emit_fast_iconv_i8(MBCGEN_TYPE* s, MBTREE_TYPE* tree);
+void mono_emit_fast_iconv (MBCGEN_TYPE* s, MBTREE_TYPE* tree);
+void mono_emit_fast_iconv_i8 (MBCGEN_TYPE* s, MBTREE_TYPE* tree);
+void mono_emit_stack_alloc (MBCGEN_TYPE* s, MBTREE_TYPE* tree);
+void mono_emit_stack_alloc_const (MBCGEN_TYPE* s, MBTREE_TYPE* tree, int size);
MonoArray*
mono_array_new_wrapper (MonoClass *eclass, guint32 n);
debug_memcopy (void *dest, const void *src, size_t n);
#ifdef DEBUG
-#define MEMCOPY debug_memcpy
+#define MEMCOPY debug_memcopy
#define PRINT_REG(text,reg) REAL_PRINT_REG(text,reg)
#else
x86_call_code (s->code, 0); \
} while (0);
+#define X86_ARG_PAD(pad) do { \
+ if (pad) { \
+ if (pad == 4) \
+ x86_push_reg (s->code, X86_EAX); \
+ else \
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, pad); \
+ } \
+} while (0)
+
+#define X86_CALL_END do { \
+ int size = tree->data.call_info.frame_size; \
+ if (size) \
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, size); \
+} while (0)
+
+#define X86_CALL_BEGIN do { \
+ int pad = tree->data.call_info.pad; \
+ X86_ARG_PAD (pad); \
+ if (tree->left->op != MB_TERM_NOP) { \
+ mono_assert (lreg >= 0); \
+ x86_push_reg (s->code, lreg); \
+ x86_alu_membase_imm (s->code, X86_CMP, lreg, 0, 0); \
+ } \
+ if (tree->data.call_info.vtype_num) { \
+ int offset = VARINFO (s, tree->data.call_info.vtype_num).offset; \
+ x86_lea_membase (s->code, treg, X86_EBP, offset); \
+ x86_push_reg (s->code, treg); \
+ } \
+} while (0)
+
/* we use this macro to move one lreg to another - source and
destination may overlap, but the register allocator has to
make sure that ((d1 < d2) && (s1 < s2))
%term CONST_I4 CONST_I8 CONST_R4 CONST_R8
%term LDIND_I1 LDIND_U1 LDIND_I2 LDIND_U2 LDIND_I4 LDIND_I8 LDIND_R4 LDIND_R8 LDIND_OBJ
%term STIND_I1 STIND_I2 STIND_I4 STIND_I8 STIND_R4 STIND_R8 STIND_OBJ
-%term ADDR_L ADDR_G ARG_I4 ARG_I8 ARG_R4 ARG_R8 ARG_OBJ ARG_STRING CALL_I4 CALL_I8 CALL_R8 CALL_VOID
-%term BREAK SWITCH BR RET_VOID RET RET_OBJ ENDFINALLY JMP
+%term ADDR_L ADDR_G ARG_I4 ARG_I8 ARG_R4 ARG_R8 ARG_OBJ CALL_I4 CALL_I8 CALL_R8 CALL_VOID
+%term BREAK SWITCH BR RET_VOID RET RET_OBJ ENDFINALLY ENDFILTER JMP
%term ADD ADD_OVF ADD_OVF_UN SUB SUB_OVF SUB_OVF_UN MUL MUL_OVF MUL_OVF_UN
%term DIV DIV_UN REM REM_UN AND OR XOR SHL SHR SHR_UN NEG NOT CKFINITE
%term COMPARE CBRANCH BRTRUE BRFALSE CSET
%term CONV_OVF_I1 CONV_OVF_U1 CONV_OVF_I2 CONV_OVF_U2 CONV_OVF_U4 CONV_OVF_U8 CONV_OVF_I4
%term CONV_OVF_I4_UN CONV_OVF_U1_UN CONV_OVF_U2_UN
%term CONV_OVF_I2_UN CONV_OVF_I8_UN CONV_OVF_I1_UN
-%term EXCEPTION THROW RETHROW HANDLER CHECKTHIS
+%term EXCEPTION THROW RETHROW HANDLER CHECKTHIS RETHROW_ABORT
%term LDLEN LDELEMA LDFTN LDVIRTFTN LDSTR LDSFLDA
%term REMOTE_LDFLDA REMOTE_STIND_I1 REMOTE_STIND_I2 REMOTE_STIND_I4
%term REMOTE_STIND_I8 REMOTE_STIND_R4 REMOTE_STIND_R8 REMOTE_STIND_OBJ
%term SIN COS SQRT
+
+%term FUNC1 PROC2 PROC3 FREE OBJADDR VTADDR
+
#
# we start at stmt
#
# we pass exception in ECX to catch handler
reg: EXCEPTION {
int offset = VARINFO (s, tree->data.i).offset;
- int reg = VARINFO (s, tree->data.i).reg;
if (tree->reg1 != X86_ECX)
x86_mov_reg_reg (s->code, tree->reg1, X86_ECX, 4);
/* store it so that we can RETHROW it later */
- if (reg < 0)
- x86_mov_membase_reg (s->code, X86_EBP, offset, tree->reg1, 4);
- else
- x86_mov_reg_reg (s->code, reg, tree->reg1, 4);
+ x86_mov_membase_reg (s->code, X86_EBP, offset, tree->reg1, 4);
}
stmt: THROW (reg) {
}
stmt: RETHROW {
- int off = VARINFO (s, tree->data.i).offset;
- int reg = VARINFO (s, tree->data.i).reg;
+ int offset = VARINFO (s, tree->data.i).offset;
gpointer target;
- if (reg < 0)
- x86_push_membase (s->code, X86_EBP, off);
- else
- x86_push_reg (s->code, reg);
+ x86_push_membase (s->code, X86_EBP, offset);
+
+ target = arch_get_throw_exception ();
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, target);
+ x86_call_code (s->code, target);
+}
+
+stmt: RETHROW_ABORT {
+ guint8 *br;
+ gpointer target;
+
+ target = mono_thread_current;
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, target);
+ x86_call_code (s->code, target);
+
+ x86_mov_reg_membase (s->code, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoThread, abort_exc), 4);
+ x86_alu_reg_imm (s->code, X86_CMP, X86_EAX, 0);
+ /* check for NULL */
+ br = s->code; x86_branch8 (s->code, X86_CC_EQ, 0, FALSE);
+
+ x86_push_reg (s->code, X86_EAX);
target = arch_get_throw_exception ();
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, target);
x86_call_code (s->code, target);
+
+ x86_patch (br, s->code);
}
stmt: HANDLER {
/* save ESP (used by ENDFINALLY) */
- x86_mov_membase_reg (s->code, X86_EBP, -16, X86_ESP, 4);
+ x86_mov_membase_reg (s->code, X86_EBP, mono_exc_esp_offset, X86_ESP, 4);
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_BB, tree->data.bb);
x86_call_imm (s->code, 0);
}
stmt: ENDFINALLY {
/* restore ESP - which can be modified when we allocate value types
* in the finally handler */
- x86_mov_reg_membase (s->code, X86_ESP, X86_EBP, -16, 4);
+ x86_mov_reg_membase (s->code, X86_ESP, X86_EBP, mono_exc_esp_offset, 4);
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ x86_ret (s->code);
+}
+
+stmt: ENDFILTER (reg) {
+ /* restore ESP - which can be modified when we allocate value types
+ * in the filter */
+ x86_mov_reg_membase (s->code, X86_ESP, X86_EBP, mono_exc_esp_offset, 4);
x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ if (tree->left->reg1 != X86_EAX)
+ x86_mov_reg_reg (s->code, X86_EAX, tree->left->reg1, 4);
x86_ret (s->code);
}
x86_push_reg (s->code, X86_EDX);
x86_push_reg (s->code, X86_ECX);
- x86_push_reg (s->code, X86_ESP);
+ x86_push_imm (s->code, 0);
x86_push_imm (s->code, tree->data.fi.field);
x86_push_imm (s->code, tree->data.fi.klass);
x86_push_reg (s->code, lreg);
x86_widen_reg (s->code, tree->reg1, tree->left->reg1, FALSE, TRUE);
}
-# warning: this chain rule requires a register
reg: CONST_I4 1 {
x86_mov_reg_imm (s->code, tree->reg1, tree->data.i);
}
reg: LDELEMA (reg, CONST_I4) {
int ind;
- x86_alu_membase_imm (s->code, X86_CMP, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length), tree->right->data.i);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "IndexOutOfRangeException");
+ if (mono_jit_boundcheck){
+ x86_alu_membase_imm (s->code, X86_CMP, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length), tree->right->data.i);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "IndexOutOfRangeException");
+ }
ind = tree->data.i * tree->right->data.i + G_STRUCT_OFFSET (MonoArray, vector);
- x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, ind);
-
- if (tree->reg1 != tree->left->reg1)
- x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
-
+ x86_lea_membase (s->code, tree->reg1, tree->left->reg1, ind);
}
reg: LDELEMA (reg, reg) {
- x86_alu_reg_membase (s->code, X86_CMP, tree->right->reg1, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length));
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, FALSE, "IndexOutOfRangeException");
+ if (mono_jit_boundcheck){
+ x86_alu_reg_membase (s->code, X86_CMP, tree->right->reg1, tree->left->reg1, G_STRUCT_OFFSET (MonoArray, max_length));
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, FALSE, "IndexOutOfRangeException");
+ }
if (tree->data.i == 1 || tree->data.i == 2 ||
tree->data.i == 4 || tree->data.i == 8) {
x86_push_imm (s->code, tree->data.p);
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_array_new_wrapper);
x86_call_code (s->code, 0);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer) + 4);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
x86_pop_reg (s->code, X86_EDX);
x86_pop_reg (s->code, X86_ECX);
x86_push_imm (s->code, tree->data.p);
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_array_new_specific);
x86_call_code (s->code, 0);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer) + 4);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
x86_pop_reg (s->code, X86_EDX);
x86_pop_reg (s->code, X86_ECX);
x86_push_imm (s->code, tree->data.klass);
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_object_new_wrapper);
x86_call_code (s->code, 0);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer));
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
x86_pop_reg (s->code, X86_EDX);
x86_pop_reg (s->code, X86_ECX);
x86_push_imm (s->code, tree->data.p);
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_object_new_specific);
x86_call_code (s->code, 0);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer));
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
x86_pop_reg (s->code, X86_EDX);
x86_pop_reg (s->code, X86_ECX);
PRINT_REG ("NEWOBJ_SPEC", tree->reg1);
}
+reg: OBJADDR (reg) {
+ if (tree->left->reg1 != tree->reg1)
+ x86_mov_reg_reg (s->code, tree->reg1, tree->left->reg1, 4);
+}
+
+reg: VTADDR (ADDR_L) {
+ int offset = VARINFO (s, tree->left->data.i).offset;
+
+ x86_lea_membase (s->code, tree->reg1, X86_EBP, offset);
+}
+
+stmt: FREE (reg) {
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, g_free);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
+}
+
+stmt: PROC2 (reg, reg) {
+ x86_push_reg (s->code, tree->right->reg1);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 8);
+}
+
+stmt: PROC3 (reg, CPSRC (reg, reg)) {
+ x86_push_reg (s->code, tree->right->right->reg1);
+ x86_push_reg (s->code, tree->right->left->reg1);
+ x86_push_reg (s->code, tree->left->reg1);
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
+}
+
+reg: FUNC1 (reg) {
+ if (tree->reg1 != X86_EAX)
+ x86_push_reg (s->code, X86_EAX);
+ x86_push_reg (s->code, X86_ECX);
+ x86_push_reg (s->code, X86_EDX);
+
+ x86_push_reg (s->code, tree->left->reg1);
+
+ mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->data.p);
+ x86_call_code (s->code, 0);
+ x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, sizeof (gpointer));
+
+ x86_pop_reg (s->code, X86_EDX);
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX) {
+ x86_mov_reg_reg (s->code, tree->reg1, X86_EAX, 4);
+ x86_pop_reg (s->code, X86_EAX);
+ }
+}
+
+reg: LOCALLOC (CONST_I4) {
+ int size;
+ int offset;
+
+ size = (tree->left->data.i + (MONO_FRAME_ALIGNMENT - 1)) & ~(MONO_FRAME_ALIGNMENT - 1); // align to MONO_FRAME_ALIGNMENT boundary
+ offset = 0;
+
+ if (size) {
+ mono_emit_stack_alloc_const (s, tree, size);
+
+ if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI) {
+ x86_push_reg (s->code, X86_EDI);
+ offset += 4;
+ }
+ if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX) {
+ x86_push_reg (s->code, X86_EAX);
+ offset += 4;
+ }
+ if (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX) {
+ x86_push_reg (s->code, X86_ECX);
+ offset += 4;
+ }
+
+ x86_mov_reg_imm (s->code, X86_ECX, size >> 2);
+ x86_alu_reg_reg (s->code, X86_SUB, X86_EAX, X86_EAX);
+
+ x86_lea_membase (s->code, X86_EDI, X86_ESP, offset);
+ x86_cld (s->code);
+ x86_prefix (s->code, X86_REP_PREFIX);
+ x86_stosd (s->code);
+
+ if (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX)
+ x86_pop_reg (s->code, X86_ECX);
+ if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX)
+ x86_pop_reg (s->code, X86_EAX);
+ if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI)
+ x86_pop_reg (s->code, X86_EDI);
+ }
+
+ x86_mov_reg_reg (s->code, tree->reg1, X86_ESP, 4);
+}
+
+
+
reg: LOCALLOC (reg) {
int offset = 0;
- /* size must be aligned to 4 bytes */
- x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, 3);
- x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, ~3);
+ /* size must be aligned to MONO_FRAME_ALIGNMENT bytes */
+ x86_alu_reg_imm (s->code, X86_ADD, tree->left->reg1, MONO_FRAME_ALIGNMENT - 1);
+ x86_alu_reg_imm (s->code, X86_AND, tree->left->reg1, ~(MONO_FRAME_ALIGNMENT - 1));
/* allocate space on stack */
- x86_alu_reg_reg (s->code, X86_SUB, X86_ESP, tree->left->reg1);
+ mono_emit_stack_alloc (s, tree);
if (tree->data.i) {
/* initialize with zero */
x86_prefix (s->code, X86_REP_PREFIX);
x86_stosl (s->code);
+ if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI)
+ x86_pop_reg (s->code, X86_EDI);
if (tree->reg1 != X86_ECX && tree->left->reg1 != X86_ECX)
x86_pop_reg (s->code, X86_ECX);
if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX)
x86_pop_reg (s->code, X86_EAX);
- if (tree->reg1 != X86_EDI && tree->left->reg1 != X86_EDI)
- x86_pop_reg (s->code, X86_EDI);
}
x86_mov_reg_reg (s->code, tree->reg1, X86_ESP, 4);
x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoClass, rank), klass->rank);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "InvalidCastException");
- x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, element_class), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, cast_class), 4);
x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4);
- x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->element_class->baseval);
- x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->element_class->diffval);
+ x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->cast_class->baseval);
+ x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->cast_class->diffval);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LE, FALSE, "InvalidCastException");
} else {
x86_alu_membase_imm (s->code, X86_CMP, lreg, G_STRUCT_OFFSET (MonoClass, rank), klass->rank);
br [1] = s->code; x86_branch8 (s->code, X86_CC_NE, 0, FALSE);
- x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, element_class), 4);
+ x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, cast_class), 4);
x86_mov_reg_membase (s->code, lreg, lreg, G_STRUCT_OFFSET (MonoClass, baseval), 4);
- x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->element_class->baseval);
- x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->element_class->diffval);
+ x86_alu_reg_mem (s->code, X86_SUB, lreg, &klass->cast_class->baseval);
+ x86_alu_reg_mem (s->code, X86_CMP, lreg, &klass->cast_class->diffval);
br [2] = s->code; x86_branch8 (s->code, X86_CC_LE, 0, FALSE);
x86_patch (br [1], s->code);
x86_mov_membase_imm (s->code, X86_ESP, 0, 0, 4);
stmt: INITOBJ (reg) {
int i, j;
- i = tree->data.i;
+ if (!(i = tree->data.i))
+ return;
if (i == 1 || i == 2 || i == 4) {
x86_mov_membase_imm (s->code, tree->left->reg1, 0, 0, i);
}
}
-
stmt: ARG_I4 (LDIND_I4 (addr)) {
MBTree *at = tree->left->left;
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
switch (at->data.ainfo.amode) {
stmt: ARG_I4 (LDIND_I4 (ADDR_L)) {
int treg = VARINFO (s, tree->left->left->data.i).reg;
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
x86_push_reg (s->code, treg);
} cost {
MBCOND ((VARINFO (data, tree->left->left->data.i).reg >= 0));
}
stmt: ARG_I4 (reg) {
- x86_push_reg (s->code, tree->left->reg1);
- PRINT_REG ("ARG_I4", tree->left->reg1);
-}
-
-# fixme: we must free the allocated strings somewhere
-stmt: ARG_STRING (reg) {
- x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
- x86_push_reg (s->code, X86_EAX);
- x86_push_reg (s->code, X86_ECX);
- x86_push_reg (s->code, X86_EDX);
+ int pad = tree->data.arg_info.pad;
+ X86_ARG_PAD (pad);
x86_push_reg (s->code, tree->left->reg1);
- mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, mono_string_to_utf8);
- x86_call_code (s->code, 0);
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 4);
-
- x86_mov_membase_reg (s->code, X86_ESP, 12, X86_EAX, 4);
-
- x86_pop_reg (s->code, X86_EDX);
- x86_pop_reg (s->code, X86_ECX);
- x86_pop_reg (s->code, X86_EAX);
}
stmt: ARG_I4 (ADDR_G) {
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
x86_push_imm (s->code, tree->left->data.p);
}
stmt: ARG_I4 (CONST_I4) "MB_USE_OPT1(0)" {
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
x86_push_imm (s->code, tree->left->data.i);
}
if (lreg == treg || rreg == treg)
mono_assert_not_reached ();
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_call_reg (s->code, rreg);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
-
- PRINT_REG ("CALL_I4", tree->reg1);
+ X86_CALL_END;
mono_assert (tree->reg1 == X86_EAX);
}
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- x86_alu_membase_imm (s->code, X86_CMP, lreg, 0, 0);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p);
x86_call_code (s->code, 0);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
-
- PRINT_REG ("CALL_I4", tree->reg1);
+ X86_CALL_END;
mono_assert (tree->reg1 == X86_EAX);
}
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_mov_reg_membase (s->code, lreg, lreg,
x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
-
- PRINT_REG ("CALL_I4(INTERFACE)", tree->reg1);
+ X86_CALL_END;
mono_assert (tree->reg1 == X86_EAX);
}
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_call_virtual (s->code, lreg,
G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
-
- PRINT_REG ("CALL_I4(VIRTUAL)", tree->reg1);
+ X86_CALL_END;
mono_assert (tree->reg1 == X86_EAX);
}
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- x86_alu_membase_imm (s->code, X86_CMP, lreg, 0, 0);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p);
x86_call_code (s->code, 0);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
+ X86_CALL_END;
}
stmt: CALL_VOID (this, reg) {
- int lreg = tree->left->reg1;
int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
- if (lreg == treg)
+ if (lreg == treg || rreg == treg)
treg = X86_EDX;
+ if (lreg == treg || rreg == treg)
+ treg = X86_ECX;
+ if (lreg == treg || rreg == treg)
+ mono_assert_not_reached ();
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- x86_alu_membase_imm (s->code, X86_CMP, lreg, 0, 0);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_call_reg (s->code, tree->right->reg1);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
+ X86_CALL_END;
}
stmt: CALL_VOID (this, INTF_ADDR) {
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_mov_reg_membase (s->code, lreg, lreg,
x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
+ X86_CALL_END;
}
stmt: CALL_VOID (this, VFUNC_ADDR) {
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_call_virtual (s->code, lreg,
G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
+ X86_CALL_END;
}
stmt: SWITCH (reg) {
tree->reg2 == X86_EDX);
}
+lreg: CALL_I8 (this, reg) {
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+
+ if (lreg == treg || rreg == treg)
+ treg = X86_EDX;
+ if (lreg == treg || rreg == treg)
+ treg = X86_ECX;
+ if (lreg == treg || rreg == treg)
+ mono_assert_not_reached ();
+
+ X86_CALL_BEGIN;
+
+ x86_call_reg (s->code, rreg);
+
+ X86_CALL_END;
+
+ mono_assert (tree->reg1 == X86_EAX);
+ mono_assert (tree->reg2 == X86_EDX);
+}
+
lreg: CALL_I8 (this, ADDR_G) {
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- x86_alu_membase_imm (s->code, X86_CMP, lreg, 0, 0);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p);
x86_call_code (s->code, 0);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
+ X86_CALL_END;
mono_assert (tree->reg1 == X86_EAX);
mono_assert (tree->reg2 == X86_EDX);
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_call_virtual (s->code, lreg,
G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
-
- PRINT_REG ("CALL0_I8(VIRTUAL)", tree->reg1);
- PRINT_REG ("CALL1_I8(VIRTUAL)", tree->reg2);
+ X86_CALL_END;
mono_assert (tree->reg1 == X86_EAX);
mono_assert (tree->reg2 == X86_EDX);
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_mov_reg_membase (s->code, lreg, lreg,
x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
-
- PRINT_REG ("CALL_I8(INTERFACE)", tree->reg1);
+ X86_CALL_END;
mono_assert (tree->reg1 == X86_EAX);
mono_assert (tree->reg2 == X86_EDX);
stmt: ARG_I8 (lreg) {
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
x86_push_reg (s->code, tree->left->reg2);
x86_push_reg (s->code, tree->left->reg1);
}
x86_fst80_membase (s->code, X86_ESP, 0);
/* test if lreg is negative */
- x86_test_reg_reg (s->code, tree->left->reg1, tree->left->reg1);
+ x86_test_reg_reg (s->code, tree->left->reg2, tree->left->reg2);
br [0] = s->code; x86_branch8 (s->code, X86_CC_GEZ, 0, TRUE);
/* add correction constant mn */
x86_fld80_membase (s->code, X86_ESP, 0);
x86_fp_op_reg (s->code, X86_FADD, 1, TRUE);
x86_fst80_membase (s->code, X86_ESP, 0);
-
+ //x86_breakpoint (s->code);
x86_patch (br [0], s->code);
x86_fld80_membase (s->code, X86_ESP, 0);
/* this requires a loop, because fprem1 somtimes
* returns a partial remainder */
l1 = s->code;
- x86_fprem1 (s->code);
+ x86_fprem (s->code);
x86_fnstsw (s->code);
x86_alu_reg_imm (s->code, X86_AND, X86_EAX, 0x0400);
l2 = s->code + 2;
x86_fchs (s->code);
}
-stmt: POP (freg)
+stmt: POP (freg) {
+ x86_fstp (s->code, 0);
+}
stmt: STIND_R4 (addr, freg) {
}
stmt: ARG_R4 (freg) {
- x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4);
+ int pad = tree->data.arg_info.pad;
+
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 4 + pad);
x86_fst_membase (s->code, X86_ESP, 0, FALSE, TRUE);
}
stmt: ARG_R8 (freg) {
- x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8);
+ int pad = tree->data.arg_info.pad;
+
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 8 + pad);
x86_fst_membase (s->code, X86_ESP, 0, TRUE, TRUE);
}
}
}
+freg: CALL_R8 (this, reg) {
+ int treg = X86_EAX;
+ int lreg = tree->left->reg1;
+ int rreg = tree->right->reg1;
+
+ if (lreg == treg || rreg == treg)
+ treg = X86_EDX;
+ if (lreg == treg || rreg == treg)
+ treg = X86_ECX;
+ if (lreg == treg || rreg == treg)
+ mono_assert_not_reached ();
+
+ X86_CALL_BEGIN;
+
+ x86_call_reg (s->code, rreg);
+
+ X86_CALL_END;
+}
+
freg: CALL_R8 (this, ADDR_G) {
int lreg = tree->left->reg1;
int treg = X86_EAX;
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- x86_alu_membase_imm (s->code, X86_CMP, lreg, 0, 0);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, tree->right->data.p);
x86_call_code (s->code, 0);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
+ X86_CALL_END;
}
freg: CALL_R8 (this, INTF_ADDR) {
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_mov_reg_membase (s->code, lreg, lreg,
x86_mov_reg_membase (s->code, lreg, lreg, tree->right->data.m->klass->interface_id << 2, 4);
x86_call_virtual (s->code, lreg, tree->right->data.m->slot << 2);
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
+ X86_CALL_END;
}
freg: CALL_R8 (this, VFUNC_ADDR) {
if (lreg == treg)
treg = X86_EDX;
- if (tree->left->op != MB_TERM_NOP) {
- mono_assert (lreg >= 0);
- x86_push_reg (s->code, lreg);
- }
-
- if (tree->data.ci.vtype_num) {
- int offset = VARINFO (s, tree->data.ci.vtype_num).offset;
- x86_lea_membase (s->code, treg, X86_EBP, offset);
- x86_push_reg (s->code, treg);
- }
+ X86_CALL_BEGIN;
x86_mov_reg_membase (s->code, lreg, lreg, 0, 4);
x86_call_virtual (s->code, lreg,
G_STRUCT_OFFSET (MonoVTable, vtable) + (tree->right->data.m->slot << 2));
- if (tree->data.ci.args_size)
- x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, tree->data.ci.args_size);
+ X86_CALL_END;
}
stmt: RET (freg) {
}
stmt: ARG_OBJ (CONST_I4) {
+ int pad = tree->data.arg_info.pad;
+
+ X86_ARG_PAD (pad);
x86_push_imm (s->code, tree->left->data.i);
}
stmt: ARG_OBJ (reg) {
- int size = tree->data.i;
+ int size = tree->data.arg_info.size;
+ int pad = tree->data.arg_info.pad;
int sa;
- mono_assert (size > 0);
+ if (!size)
+ return;
- sa = size + 3;
- sa &= ~3;
+ sa = size + pad;
/* reserve space for the argument */
x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, sa);
- x86_push_reg (s->code, X86_EAX);
- x86_push_reg (s->code, X86_EDX);
- x86_push_reg (s->code, X86_ECX);
-
x86_push_imm (s->code, size);
x86_push_reg (s->code, tree->left->reg1);
- x86_lea_membase (s->code, X86_EAX, X86_ESP, 5*4);
+ x86_lea_membase (s->code, X86_EAX, X86_ESP, 2*4);
x86_push_reg (s->code, X86_EAX);
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY);
x86_call_code (s->code, 0);
x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, 12);
-
- x86_pop_reg (s->code, X86_ECX);
- x86_pop_reg (s->code, X86_EDX);
- x86_pop_reg (s->code, X86_EAX);
}
stmt: RET_OBJ (reg) {
x86_push_reg (s->code, tree->left->reg1);
x86_push_membase (s->code, X86_EBP, 8);
-
mono_add_jump_info (s, s->code, MONO_JUMP_INFO_ABS, MEMCOPY);
x86_call_code (s->code, 0);
return 0;
}
+
guint64
-mono_llmult_ovf (gpointer *exc, guint32 al, gint32 ah, guint32 bl, gint32 bh)
-{
+mono_llmult_ovf (gpointer *exc, guint32 al, gint32 ah, guint32 bl, gint32 bh) {
+ /*
+ Use Karatsuba algorithm where:
+ a*b is: AhBh(R^2+R)+(Ah-Al)(Bl-Bh)R+AlBl(R+1)
+ where Ah is the "high half" (most significant 32 bits) of a and
+ where Al is the "low half" (least significant 32 bits) of a and
+ where Bh is the "high half" of b and Bl is the "low half" and
+ where R is the Radix or "size of the half" (in our case 32 bits)
+
+ Note, for the product of two 64 bit numbers to fit into a 64
+ result, ah and/or bh must be 0. This will save us from doing
+ the AhBh term at all.
+
+ Also note that we refactor so that we don't overflow 64 bits with
+ intermediate results. So we use [(Ah-Al)(Bl-Bh)+AlBl]R+AlBl
+ */
+
gint64 res, t1;
+ gint32 sign;
+
+ /* need to work with absoulte values, so find out what the
+ resulting sign will be and convert any negative numbers
+ from two's complement
+ */
+ sign = ah ^ bh;
+ if (ah < 0) {
+ /* flip the bits and add 1 */
+ ah ^= ~0;
+ if (al == 0)
+ ah += 1;
+ else {
+ al ^= ~0;
+ al +=1;
+ }
+ }
+
+ if (bh < 0) {
+ /* flip the bits and add 1 */
+ bh ^= ~0;
+ if (bl == 0)
+ bh += 1;
+ else {
+ bl ^= ~0;
+ bl +=1;
+ }
+ }
+
+ /* we overflow for sure if both upper halves are greater
+ than zero because we would need to shift their
+ product 64 bits to the left and that will not fit
+ in a 64 bit result */
+ if (ah && bh)
+ goto raise_exception;
- // fixme: check for overflow
+ /* do the AlBl term first */
+ t1 = (gint64)al * (gint64)bl;
- res = (gint64)al * (gint64)bl;
+ res = t1;
- t1 = (gint64)ah * bl + al * (gint64)bh;
+ /* now do the [(Ah-Al)(Bl-Bh)+AlBl]R term */
+ t1 += (gint64)(ah - al) * (gint64)(bl - bh);
+ t1 <<= 32;
+ /* check for overflow */
+ if (t1 > (0x7FFFFFFFFFFFFFFF - res))
+ goto raise_exception;
- res += ((gint64)t1) << 32;
+ res += t1;
*exc = NULL;
- return res;
-/*
+ if (sign < 0)
+ return -res;
+ else
+ return res;
+
raise_exception:
*exc = mono_get_exception_overflow ();
return 0;
-*/
}
gint64
x86_patch (br[2], s->code);
}
+void mono_emit_stack_alloc (MBCGEN_TYPE* s, MBTREE_TYPE* tree)
+{
+#ifdef PLATFORM_WIN32
+ guint8* br[5];
+ int sreg;
+
+ /*
+ * Under Windows:
+ * If requested stack size is larger than one page,
+ * perform stack-touch operation
+ * (see comments in mono_emit_stack_alloc_const below).
+ */
+ x86_test_reg_imm (s->code, tree->left->reg1, ~0xFFF);
+ br[0] = s->code; x86_branch8 (s->code, X86_CC_Z, 0, FALSE);
+
+ sreg = tree->left->reg1;
+
+ br[2] = s->code; /* loop */
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 0x1000);
+ x86_test_membase_reg (s->code, X86_ESP, 0, X86_ESP);
+ x86_alu_reg_imm (s->code, X86_SUB, sreg, 0x1000);
+ x86_alu_reg_imm (s->code, X86_CMP, sreg, 0x1000);
+ br[3] = s->code; x86_branch8 (s->code, X86_CC_AE, 0, FALSE);
+ x86_patch (br[3], br[2]);
+ x86_test_reg_reg (s->code, sreg, sreg);
+ br[4] = s->code; x86_branch8 (s->code, X86_CC_Z, 0, FALSE);
+ x86_alu_reg_reg (s->code, X86_SUB, X86_ESP, sreg);
+
+ br[1] = s->code; x86_jump8 (s->code, 0);
+
+ x86_patch (br[0], s->code);
+ x86_alu_reg_reg (s->code, X86_SUB, X86_ESP, tree->left->reg1);
+ x86_patch (br[1], s->code);
+ x86_patch (br[4], s->code);
+#else /* PLATFORM_WIN32 */
+ x86_alu_reg_reg (s->code, X86_SUB, X86_ESP, tree->left->reg1);
+#endif
+}
+
+void mono_emit_stack_alloc_const (MBCGEN_TYPE* s, MBTREE_TYPE* tree, int size)
+{
+#ifdef PLATFORM_WIN32
+ int i, npages;
+ guint8* br[2];
+
+ if (size > 0xFFE) {
+ /*
+ * Generate stack probe code.
+ * Under Windows, it is necessary to allocate one page at a time,
+ * "touching" stack after each successful sub-allocation. This is
+ * because of the way stack growth is implemented - there is a
+ * guard page before the lowest stack page that is currently commited.
+ * Stack normally grows sequentially so OS traps access to the
+ * guard page and commits more pages when needed.
+ */
+ npages = ((unsigned) size) >> 12;
+ if (npages > 4) {
+ if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX) {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 0x1000);
+ x86_test_membase_reg (s->code, X86_ESP, 0, X86_ESP);
+ x86_mov_membase_reg (s->code, X86_ESP, 0x1000 - 4, X86_EAX, 4); /* save EAX */
+ x86_mov_reg_imm (s->code, X86_EAX, npages - 1);
+ } else {
+ x86_mov_reg_imm (s->code, X86_EAX, npages);
+ }
+ br[0] = s->code; /* loop */
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 0x1000);
+ x86_test_membase_reg (s->code, X86_ESP, 0, X86_ESP);
+ x86_dec_reg (s->code, X86_EAX);
+ br[1] = s->code; x86_branch8 (s->code, X86_CC_NZ, 0, TRUE);
+ x86_patch (br[1], br[0]);
+ if (tree->reg1 != X86_EAX && tree->left->reg1 != X86_EAX)
+ x86_mov_reg_membase (s->code, X86_EAX, X86_ESP, (npages * 0x1000) - 4, 4); /* restore EAX */
+ } else {
+ /* generate unrolled code for relatively small allocs */
+ for (i = npages; --i >= 0;) {
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, 0x1000);
+ x86_test_membase_reg (s->code, X86_ESP, 0, X86_ESP);
+ }
+ }
+ }
+
+ if (size & 0xFFF) x86_alu_reg_imm (s->code, X86_ADD, X86_ESP, -(size & 0xFFF));
+#else /* PLATFORM_WIN32 */
+ x86_alu_reg_imm (s->code, X86_SUB, X86_ESP, size);
+#endif
+}
+
gpointer
mono_ldvirtftn (MonoObject *this, int slot)
{
m = class->vtable [slot];
if (is_proxy) {
- return arch_create_remoting_trampoline (m);
+ return mono_jit_create_remoting_trampoline (m);
} else {
EnterCriticalSection (metadata_section);
addr = mono_compile_method (m);
m = class->vtable [slot];
if (is_proxy) {
- return arch_create_remoting_trampoline (m);
+ return mono_jit_create_remoting_trampoline (m);
} else {
EnterCriticalSection (metadata_section);
addr = mono_compile_method (m);