2 * mini-x86.c: x86 backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/debug-helpers.h>
20 #include <mono/metadata/threads.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/metadata/mono-debug.h>
23 #include <mono/utils/mono-math.h>
24 #include <mono/utils/mono-counters.h>
31 /* On windows, these hold the key returned by TlsAlloc () */
32 static gint lmf_tls_offset = -1;
33 static gint lmf_addr_tls_offset = -1;
34 static gint appdomain_tls_offset = -1;
35 static gint thread_tls_offset = -1;
38 static gboolean optimize_for_xen = TRUE;
40 #define optimize_for_xen 0
44 static gboolean is_win32 = TRUE;
46 static gboolean is_win32 = FALSE;
49 /* This mutex protects architecture specific caches */
50 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
51 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
52 static CRITICAL_SECTION mini_arch_mutex;
54 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
59 /* Under windows, the default pinvoke calling convention is stdcall */
60 #define CALLCONV_IS_STDCALL(sig) ((((sig)->call_convention) == MONO_CALL_STDCALL) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_DEFAULT))
62 #define CALLCONV_IS_STDCALL(sig) (((sig)->call_convention) == MONO_CALL_STDCALL)
66 mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
69 mono_arch_regname (int reg)
72 case X86_EAX: return "%eax";
73 case X86_EBX: return "%ebx";
74 case X86_ECX: return "%ecx";
75 case X86_EDX: return "%edx";
76 case X86_ESP: return "%esp";
77 case X86_EBP: return "%ebp";
78 case X86_EDI: return "%edi";
79 case X86_ESI: return "%esi";
85 mono_arch_fregname (int reg)
110 mono_arch_xregname (int reg)
151 /* Only if storage == ArgValuetypeInReg */
152 ArgStorage pair_storage [2];
161 gboolean need_stack_align;
162 guint32 stack_align_amount;
170 #define FLOAT_PARAM_REGS 0
172 static X86_Reg_No param_regs [] = { 0 };
174 #if defined(PLATFORM_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
175 #define SMALL_STRUCTS_IN_REGS
176 static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX };
180 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
182 ainfo->offset = *stack_size;
184 if (*gr >= PARAM_REGS) {
185 ainfo->storage = ArgOnStack;
186 (*stack_size) += sizeof (gpointer);
189 ainfo->storage = ArgInIReg;
190 ainfo->reg = param_regs [*gr];
196 add_general_pair (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
198 ainfo->offset = *stack_size;
200 g_assert (PARAM_REGS == 0);
202 ainfo->storage = ArgOnStack;
203 (*stack_size) += sizeof (gpointer) * 2;
207 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
209 ainfo->offset = *stack_size;
211 if (*gr >= FLOAT_PARAM_REGS) {
212 ainfo->storage = ArgOnStack;
213 (*stack_size) += is_double ? 8 : 4;
216 /* A double register */
218 ainfo->storage = ArgInDoubleSSEReg;
220 ainfo->storage = ArgInFloatSSEReg;
228 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
230 guint32 *gr, guint32 *fr, guint32 *stack_size)
235 klass = mono_class_from_mono_type (type);
236 size = mini_type_stack_size_full (gsctx, &klass->byval_arg, NULL, sig->pinvoke);
238 #ifdef SMALL_STRUCTS_IN_REGS
239 if (sig->pinvoke && is_return) {
240 MonoMarshalType *info;
243 * the exact rules are not very well documented, the code below seems to work with the
244 * code generated by gcc 3.3.3 -mno-cygwin.
246 info = mono_marshal_load_type_info (klass);
249 ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
251 /* Special case structs with only a float member */
252 if ((info->native_size == 8) && (info->num_fields == 1) && (info->fields [0].field->type->type == MONO_TYPE_R8)) {
253 ainfo->storage = ArgValuetypeInReg;
254 ainfo->pair_storage [0] = ArgOnDoubleFpStack;
257 if ((info->native_size == 4) && (info->num_fields == 1) && (info->fields [0].field->type->type == MONO_TYPE_R4)) {
258 ainfo->storage = ArgValuetypeInReg;
259 ainfo->pair_storage [0] = ArgOnFloatFpStack;
262 if ((info->native_size == 1) || (info->native_size == 2) || (info->native_size == 4) || (info->native_size == 8)) {
263 ainfo->storage = ArgValuetypeInReg;
264 ainfo->pair_storage [0] = ArgInIReg;
265 ainfo->pair_regs [0] = return_regs [0];
266 if (info->native_size > 4) {
267 ainfo->pair_storage [1] = ArgInIReg;
268 ainfo->pair_regs [1] = return_regs [1];
275 ainfo->offset = *stack_size;
276 ainfo->storage = ArgOnStack;
277 *stack_size += ALIGN_TO (size, sizeof (gpointer));
283 * Obtain information about a call according to the calling convention.
284 * For x86 ELF, see the "System V Application Binary Interface Intel386
285 * Architecture Processor Supplment, Fourth Edition" document for more
287 * For x86 win32, see ???.
290 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
294 int n = sig->hasthis + sig->param_count;
295 guint32 stack_size = 0;
299 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
301 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
308 ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
309 switch (ret_type->type) {
310 case MONO_TYPE_BOOLEAN:
321 case MONO_TYPE_FNPTR:
322 case MONO_TYPE_CLASS:
323 case MONO_TYPE_OBJECT:
324 case MONO_TYPE_SZARRAY:
325 case MONO_TYPE_ARRAY:
326 case MONO_TYPE_STRING:
327 cinfo->ret.storage = ArgInIReg;
328 cinfo->ret.reg = X86_EAX;
332 cinfo->ret.storage = ArgInIReg;
333 cinfo->ret.reg = X86_EAX;
336 cinfo->ret.storage = ArgOnFloatFpStack;
339 cinfo->ret.storage = ArgOnDoubleFpStack;
341 case MONO_TYPE_GENERICINST:
342 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
343 cinfo->ret.storage = ArgInIReg;
344 cinfo->ret.reg = X86_EAX;
348 case MONO_TYPE_VALUETYPE: {
349 guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
351 add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
352 if (cinfo->ret.storage == ArgOnStack)
353 /* The caller passes the address where the value is stored */
354 add_general (&gr, &stack_size, &cinfo->ret);
357 case MONO_TYPE_TYPEDBYREF:
358 /* Same as a valuetype with size 24 */
359 add_general (&gr, &stack_size, &cinfo->ret);
363 cinfo->ret.storage = ArgNone;
366 g_error ("Can't handle as return value 0x%x", sig->ret->type);
372 add_general (&gr, &stack_size, cinfo->args + 0);
374 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
376 fr = FLOAT_PARAM_REGS;
378 /* Emit the signature cookie just before the implicit arguments */
379 add_general (&gr, &stack_size, &cinfo->sig_cookie);
382 for (i = 0; i < sig->param_count; ++i) {
383 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
386 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
387 /* We allways pass the sig cookie on the stack for simplicity */
389 * Prevent implicit arguments + the sig cookie from being passed
393 fr = FLOAT_PARAM_REGS;
395 /* Emit the signature cookie just before the implicit arguments */
396 add_general (&gr, &stack_size, &cinfo->sig_cookie);
399 if (sig->params [i]->byref) {
400 add_general (&gr, &stack_size, ainfo);
403 ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
404 switch (ptype->type) {
405 case MONO_TYPE_BOOLEAN:
408 add_general (&gr, &stack_size, ainfo);
413 add_general (&gr, &stack_size, ainfo);
417 add_general (&gr, &stack_size, ainfo);
422 case MONO_TYPE_FNPTR:
423 case MONO_TYPE_CLASS:
424 case MONO_TYPE_OBJECT:
425 case MONO_TYPE_STRING:
426 case MONO_TYPE_SZARRAY:
427 case MONO_TYPE_ARRAY:
428 add_general (&gr, &stack_size, ainfo);
430 case MONO_TYPE_GENERICINST:
431 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
432 add_general (&gr, &stack_size, ainfo);
436 case MONO_TYPE_VALUETYPE:
437 add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
439 case MONO_TYPE_TYPEDBYREF:
440 stack_size += sizeof (MonoTypedRef);
441 ainfo->storage = ArgOnStack;
445 add_general_pair (&gr, &stack_size, ainfo);
448 add_float (&fr, &stack_size, ainfo, FALSE);
451 add_float (&fr, &stack_size, ainfo, TRUE);
454 g_error ("unexpected type 0x%x", ptype->type);
455 g_assert_not_reached ();
459 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
461 fr = FLOAT_PARAM_REGS;
463 /* Emit the signature cookie just before the implicit arguments */
464 add_general (&gr, &stack_size, &cinfo->sig_cookie);
467 if (mono_do_x86_stack_align && (stack_size % MONO_ARCH_FRAME_ALIGNMENT) != 0) {
468 cinfo->need_stack_align = TRUE;
469 cinfo->stack_align_amount = MONO_ARCH_FRAME_ALIGNMENT - (stack_size % MONO_ARCH_FRAME_ALIGNMENT);
470 stack_size += cinfo->stack_align_amount;
473 cinfo->stack_usage = stack_size;
474 cinfo->reg_usage = gr;
475 cinfo->freg_usage = fr;
480 * mono_arch_get_argument_info:
481 * @csig: a method signature
482 * @param_count: the number of parameters to consider
483 * @arg_info: an array to store the result infos
485 * Gathers information on parameters such as size, alignment and
486 * padding. arg_info should be large enought to hold param_count + 1 entries.
488 * Returns the size of the argument area on the stack.
491 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
493 int k, args_size = 0;
499 cinfo = get_call_info (NULL, NULL, csig, FALSE);
501 if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
502 args_size += sizeof (gpointer);
506 arg_info [0].offset = offset;
509 args_size += sizeof (gpointer);
513 arg_info [0].size = args_size;
515 for (k = 0; k < param_count; k++) {
516 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
518 /* ignore alignment for now */
521 args_size += pad = (align - (args_size & (align - 1))) & (align - 1);
522 arg_info [k].pad = pad;
524 arg_info [k + 1].pad = 0;
525 arg_info [k + 1].size = size;
527 arg_info [k + 1].offset = offset;
531 if (mono_do_x86_stack_align && !CALLCONV_IS_STDCALL (csig))
532 align = MONO_ARCH_FRAME_ALIGNMENT;
535 args_size += pad = (align - (args_size & (align - 1))) & (align - 1);
536 arg_info [k].pad = pad;
543 static const guchar cpuid_impl [] = {
544 0x55, /* push %ebp */
545 0x89, 0xe5, /* mov %esp,%ebp */
546 0x53, /* push %ebx */
547 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
548 0x0f, 0xa2, /* cpuid */
549 0x50, /* push %eax */
550 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
551 0x89, 0x18, /* mov %ebx,(%eax) */
552 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
553 0x89, 0x08, /* mov %ecx,(%eax) */
554 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
555 0x89, 0x10, /* mov %edx,(%eax) */
557 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
558 0x89, 0x02, /* mov %eax,(%edx) */
564 typedef void (*CpuidFunc) (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx);
567 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
571 __asm__ __volatile__ (
574 "movl %%eax, %%edx\n"
575 "xorl $0x200000, %%eax\n"
580 "xorl %%edx, %%eax\n"
581 "andl $0x200000, %%eax\n"
603 /* Have to use the code manager to get around WinXP DEP */
604 static CpuidFunc func = NULL;
607 ptr = mono_global_codeman_reserve (sizeof (cpuid_impl));
608 memcpy (ptr, cpuid_impl, sizeof (cpuid_impl));
609 func = (CpuidFunc)ptr;
611 func (id, p_eax, p_ebx, p_ecx, p_edx);
614 * We use this approach because of issues with gcc and pic code, see:
615 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
616 __asm__ __volatile__ ("cpuid"
617 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
626 * Initialize the cpu to execute managed code.
629 mono_arch_cpu_init (void)
631 /* spec compliance requires running with double precision */
635 __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
636 fpcw &= ~X86_FPCW_PRECC_MASK;
637 fpcw |= X86_FPCW_PREC_DOUBLE;
638 __asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw));
639 __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
641 _control87 (_PC_53, MCW_PC);
646 * Initialize architecture specific code.
649 mono_arch_init (void)
651 InitializeCriticalSection (&mini_arch_mutex);
655 * Cleanup architecture specific code.
658 mono_arch_cleanup (void)
660 DeleteCriticalSection (&mini_arch_mutex);
664 * This function returns the optimizations supported on this cpu.
667 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
669 int eax, ebx, ecx, edx;
673 /* Feature Flags function, flags returned in EDX. */
674 if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
675 if (edx & (1 << 15)) {
676 opts |= MONO_OPT_CMOV;
678 opts |= MONO_OPT_FCMOV;
680 *exclude_mask |= MONO_OPT_FCMOV;
682 *exclude_mask |= MONO_OPT_CMOV;
684 opts |= MONO_OPT_SSE2;
686 *exclude_mask |= MONO_OPT_SSE2;
688 #ifdef MONO_ARCH_SIMD_INTRINSICS
689 /*SIMD intrinsics require at least SSE2.*/
690 if (!(opts & MONO_OPT_SSE2))
691 *exclude_mask |= MONO_OPT_SIMD;
698 * This function test for all SSE functions supported.
700 * Returns a bitmask corresponding to all supported versions.
702 * TODO detect other versions like SSE4a.
705 mono_arch_cpu_enumerate_simd_versions (void)
707 int eax, ebx, ecx, edx;
708 guint32 sse_opts = 0;
710 if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
712 sse_opts |= 1 << SIMD_VERSION_SSE1;
714 sse_opts |= 1 << SIMD_VERSION_SSE2;
716 sse_opts |= 1 << SIMD_VERSION_SSE3;
718 sse_opts |= 1 << SIMD_VERSION_SSSE3;
720 sse_opts |= 1 << SIMD_VERSION_SSE41;
722 sse_opts |= 1 << SIMD_VERSION_SSE42;
728 * Determine whenever the trap whose info is in SIGINFO is caused by
732 mono_arch_is_int_overflow (void *sigctx, void *info)
737 mono_arch_sigctx_to_monoctx (sigctx, &ctx);
739 ip = (guint8*)ctx.eip;
741 if ((ip [0] == 0xf7) && (x86_modrm_mod (ip [1]) == 0x3) && (x86_modrm_reg (ip [1]) == 0x7)) {
745 switch (x86_modrm_rm (ip [1])) {
765 g_assert_not_reached ();
777 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
782 for (i = 0; i < cfg->num_varinfo; i++) {
783 MonoInst *ins = cfg->varinfo [i];
784 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
787 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
790 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
791 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
794 /* we dont allocate I1 to registers because there is no simply way to sign extend
795 * 8bit quantities in caller saved registers on x86 */
796 if (mono_is_regsize_var (ins->inst_vtype) && (ins->inst_vtype->type != MONO_TYPE_I1)) {
797 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
798 g_assert (i == vmv->idx);
799 vars = g_list_prepend (vars, vmv);
803 vars = mono_varlist_sort (cfg, vars, 0);
809 mono_arch_get_global_int_regs (MonoCompile *cfg)
813 /* we can use 3 registers for global allocation */
814 regs = g_list_prepend (regs, (gpointer)X86_EBX);
815 regs = g_list_prepend (regs, (gpointer)X86_ESI);
816 regs = g_list_prepend (regs, (gpointer)X86_EDI);
822 * mono_arch_regalloc_cost:
824 * Return the cost, in number of memory references, of the action of
825 * allocating the variable VMV into a register during global register
829 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
831 MonoInst *ins = cfg->varinfo [vmv->idx];
833 if (cfg->method->save_lmf)
834 /* The register is already saved */
835 return (ins->opcode == OP_ARG) ? 1 : 0;
837 /* push+pop+possible load if it is an argument */
838 return (ins->opcode == OP_ARG) ? 3 : 2;
842 * Set var information according to the calling convention. X86 version.
843 * The locals var stuff should most likely be split in another method.
846 mono_arch_allocate_vars (MonoCompile *cfg)
848 MonoMethodSignature *sig;
849 MonoMethodHeader *header;
851 guint32 locals_stack_size, locals_stack_align;
856 header = mono_method_get_header (cfg->method);
857 sig = mono_method_signature (cfg->method);
859 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
861 cfg->frame_reg = X86_EBP;
864 /* Reserve space to save LMF and caller saved registers */
866 if (cfg->method->save_lmf) {
867 offset += sizeof (MonoLMF);
869 if (cfg->used_int_regs & (1 << X86_EBX)) {
873 if (cfg->used_int_regs & (1 << X86_EDI)) {
877 if (cfg->used_int_regs & (1 << X86_ESI)) {
882 switch (cinfo->ret.storage) {
883 case ArgValuetypeInReg:
884 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
886 cfg->ret->opcode = OP_REGOFFSET;
887 cfg->ret->inst_basereg = X86_EBP;
888 cfg->ret->inst_offset = - offset;
894 /* Allocate locals */
895 offsets = mono_allocate_stack_slots (cfg, &locals_stack_size, &locals_stack_align);
896 if (locals_stack_align) {
897 offset += (locals_stack_align - 1);
898 offset &= ~(locals_stack_align - 1);
901 * EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we
902 * have locals larger than 8 bytes we need to make sure that
903 * they have the appropriate offset.
905 if (MONO_ARCH_FRAME_ALIGNMENT > 8 && locals_stack_align > 8)
906 offset += MONO_ARCH_FRAME_ALIGNMENT - sizeof (gpointer) * 2;
907 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
908 if (offsets [i] != -1) {
909 MonoInst *inst = cfg->varinfo [i];
910 inst->opcode = OP_REGOFFSET;
911 inst->inst_basereg = X86_EBP;
912 inst->inst_offset = - (offset + offsets [i]);
913 //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
916 offset += locals_stack_size;
920 * Allocate arguments+return value
923 switch (cinfo->ret.storage) {
925 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
927 * In the new IR, the cfg->vret_addr variable represents the
928 * vtype return value.
930 cfg->vret_addr->opcode = OP_REGOFFSET;
931 cfg->vret_addr->inst_basereg = cfg->frame_reg;
932 cfg->vret_addr->inst_offset = cinfo->ret.offset + ARGS_OFFSET;
933 if (G_UNLIKELY (cfg->verbose_level > 1)) {
934 printf ("vret_addr =");
935 mono_print_ins (cfg->vret_addr);
938 cfg->ret->opcode = OP_REGOFFSET;
939 cfg->ret->inst_basereg = X86_EBP;
940 cfg->ret->inst_offset = cinfo->ret.offset + ARGS_OFFSET;
943 case ArgValuetypeInReg:
946 cfg->ret->opcode = OP_REGVAR;
947 cfg->ret->inst_c0 = cinfo->ret.reg;
948 cfg->ret->dreg = cinfo->ret.reg;
951 case ArgOnFloatFpStack:
952 case ArgOnDoubleFpStack:
955 g_assert_not_reached ();
958 if (sig->call_convention == MONO_CALL_VARARG) {
959 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
960 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
963 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
964 ArgInfo *ainfo = &cinfo->args [i];
965 inst = cfg->args [i];
966 if (inst->opcode != OP_REGVAR) {
967 inst->opcode = OP_REGOFFSET;
968 inst->inst_basereg = X86_EBP;
970 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
973 offset += (MONO_ARCH_FRAME_ALIGNMENT - 1);
974 offset &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
976 cfg->stack_offset = offset;
980 mono_arch_create_vars (MonoCompile *cfg)
982 MonoMethodSignature *sig;
985 sig = mono_method_signature (cfg->method);
987 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
989 if (cinfo->ret.storage == ArgValuetypeInReg)
990 cfg->ret_var_is_local = TRUE;
991 if ((cinfo->ret.storage != ArgValuetypeInReg) && MONO_TYPE_ISSTRUCT (sig->ret)) {
992 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
997 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call)
1000 MonoMethodSignature *tmp_sig;
1003 /* FIXME: Add support for signature tokens to AOT */
1004 cfg->disable_aot = TRUE;
1005 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1008 * mono_ArgIterator_Setup assumes the signature cookie is
1009 * passed first and all the arguments which were before it are
1010 * passed on the stack after the signature. So compensate by
1011 * passing a different signature.
1013 tmp_sig = mono_metadata_signature_dup (call->signature);
1014 tmp_sig->param_count -= call->signature->sentinelpos;
1015 tmp_sig->sentinelpos = 0;
1016 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1018 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1019 sig_arg->inst_p0 = tmp_sig;
1021 arg->inst_left = sig_arg;
1022 arg->type = STACK_PTR;
1023 /* prepend, so they get reversed */
1024 arg->next = call->out_args;
1025 call->out_args = arg;
1029 * It is expensive to adjust esp for each individual fp argument pushed on the stack
1030 * so we try to do it just once when we have multiple fp arguments in a row.
1031 * We don't use this mechanism generally because for int arguments the generated code
1032 * is slightly bigger and new generation cpus optimize away the dependency chains
1033 * created by push instructions on the esp value.
1034 * fp_arg_setup is the first argument in the execution sequence where the esp register
1038 collect_fp_stack_space (MonoMethodSignature *sig, int start_arg, int *fp_arg_setup)
1043 for (; start_arg < sig->param_count; ++start_arg) {
1044 t = mini_type_get_underlying_type (NULL, sig->params [start_arg]);
1045 if (!t->byref && t->type == MONO_TYPE_R8) {
1046 fp_space += sizeof (double);
1047 *fp_arg_setup = start_arg;
1056 * take the arguments and generate the arch-specific
1057 * instructions to properly call the function in call.
1058 * This includes pushing, moving arguments to the right register
1062 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1064 MonoMethodSignature *sig;
1067 int sentinelpos = 0;
1068 int fp_args_space = 0, fp_args_offset = 0, fp_arg_setup = -1;
1070 sig = call->signature;
1071 n = sig->param_count + sig->hasthis;
1073 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
1075 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1076 sentinelpos = sig->sentinelpos + (is_virtual ? 1 : 0);
1078 for (i = 0; i < n; ++i) {
1079 ArgInfo *ainfo = cinfo->args + i;
1081 /* Emit the signature cookie just before the implicit arguments */
1082 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) {
1083 emit_sig_cookie (cfg, call);
1086 if (is_virtual && i == 0) {
1087 /* the argument will be attached to the call instrucion */
1088 in = call->args [i];
1092 if (i >= sig->hasthis)
1093 t = sig->params [i - sig->hasthis];
1095 t = &mono_defaults.int_class->byval_arg;
1096 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
1098 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1099 in = call->args [i];
1100 arg->cil_code = in->cil_code;
1101 arg->inst_left = in;
1102 arg->type = in->type;
1103 /* prepend, so they get reversed */
1104 arg->next = call->out_args;
1105 call->out_args = arg;
1107 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) {
1112 if (t->type == MONO_TYPE_TYPEDBYREF) {
1113 size = sizeof (MonoTypedRef);
1114 align = sizeof (gpointer);
1117 size = mini_type_stack_size_full (cfg->generic_sharing_context, &in->klass->byval_arg, &ialign, sig->pinvoke);
1119 arg->opcode = OP_OUTARG_VT;
1120 arg->klass = in->klass;
1121 arg->backend.is_pinvoke = sig->pinvoke;
1122 arg->inst_imm = size;
1125 switch (ainfo->storage) {
1127 arg->opcode = OP_OUTARG;
1129 if (t->type == MONO_TYPE_R4) {
1130 arg->opcode = OP_OUTARG_R4;
1131 } else if (t->type == MONO_TYPE_R8) {
1132 arg->opcode = OP_OUTARG_R8;
1133 /* we store in the upper bits of backen.arg_info the needed
1134 * esp adjustment and in the lower bits the offset from esp
1135 * where the arg needs to be stored
1137 if (!fp_args_space) {
1138 fp_args_space = collect_fp_stack_space (sig, i - sig->hasthis, &fp_arg_setup);
1139 fp_args_offset = fp_args_space;
1141 arg->backend.arg_info = fp_args_space - fp_args_offset;
1142 fp_args_offset -= sizeof (double);
1143 if (i - sig->hasthis == fp_arg_setup) {
1144 arg->backend.arg_info |= fp_args_space << 16;
1146 if (fp_args_offset == 0) {
1147 /* the allocated esp stack is finished:
1148 * prepare for an eventual second run of fp args
1156 g_assert_not_reached ();
1162 /* Handle the case where there are no implicit arguments */
1163 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) {
1164 emit_sig_cookie (cfg, call);
1167 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1168 if (cinfo->ret.storage == ArgValuetypeInReg) {
1169 MonoInst *zero_inst;
1171 * After the call, the struct is in registers, but needs to be saved to the memory pointed
1172 * to by vt_arg in this_vret_args. This means that vt_arg needs to be saved somewhere
1173 * before calling the function. So we add a dummy instruction to represent pushing the
1174 * struct return address to the stack. The return address will be saved to this stack slot
1175 * by the code emitted in this_vret_args.
1177 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1178 MONO_INST_NEW (cfg, zero_inst, OP_ICONST);
1179 zero_inst->inst_p0 = 0;
1180 arg->inst_left = zero_inst;
1181 arg->type = STACK_PTR;
1182 /* prepend, so they get reversed */
1183 arg->next = call->out_args;
1184 call->out_args = arg;
1187 /* if the function returns a struct, the called method already does a ret $0x4 */
1188 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret))
1189 cinfo->stack_usage -= 4;
1192 call->stack_usage = cinfo->stack_usage;
1194 if (cinfo->need_stack_align) {
1195 MONO_INST_NEW (cfg, arg, OP_X86_OUTARG_ALIGN_STACK);
1196 arg->inst_c0 = cinfo->stack_align_amount;
1197 arg->next = call->out_args;
1198 call->out_args = arg;
1205 emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1207 MonoMethodSignature *tmp_sig;
1209 /* FIXME: Add support for signature tokens to AOT */
1210 cfg->disable_aot = TRUE;
1213 * mono_ArgIterator_Setup assumes the signature cookie is
1214 * passed first and all the arguments which were before it are
1215 * passed on the stack after the signature. So compensate by
1216 * passing a different signature.
1218 tmp_sig = mono_metadata_signature_dup (call->signature);
1219 tmp_sig->param_count -= call->signature->sentinelpos;
1220 tmp_sig->sentinelpos = 0;
1221 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_X86_PUSH_IMM, -1, -1, tmp_sig);
1227 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1230 MonoMethodSignature *sig;
1233 int sentinelpos = 0;
1235 sig = call->signature;
1236 n = sig->param_count + sig->hasthis;
1238 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
1240 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1241 sentinelpos = sig->sentinelpos + (sig->hasthis ? 1 : 0);
1243 if (cinfo->need_stack_align) {
1244 MONO_INST_NEW (cfg, arg, OP_SUB_IMM);
1245 arg->dreg = X86_ESP;
1246 arg->sreg1 = X86_ESP;
1247 arg->inst_imm = cinfo->stack_align_amount;
1248 MONO_ADD_INS (cfg->cbb, arg);
1251 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1254 if (cinfo->ret.storage == ArgValuetypeInReg) {
1255 if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
1257 * Tell the JIT to use a more efficient calling convention: call using
1258 * OP_CALL, compute the result location after the call, and save the
1261 call->vret_in_reg = TRUE;
1264 * The valuetype is in EAX:EDX after the call, needs to be copied to
1265 * the stack. Save the address here, so the call instruction can
1268 MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
1269 vtarg->sreg1 = call->vret_var->dreg;
1270 MONO_ADD_INS (cfg->cbb, vtarg);
1275 /* Handle the case where there are no implicit arguments */
1276 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) {
1277 emit_sig_cookie2 (cfg, call, cinfo);
1280 /* Arguments are pushed in the reverse order */
1281 for (i = n - 1; i >= 0; i --) {
1282 ArgInfo *ainfo = cinfo->args + i;
1285 if (i >= sig->hasthis)
1286 t = sig->params [i - sig->hasthis];
1288 t = &mono_defaults.int_class->byval_arg;
1289 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
1291 MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
1293 in = call->args [i];
1294 arg->cil_code = in->cil_code;
1295 arg->sreg1 = in->dreg;
1296 arg->type = in->type;
1298 g_assert (in->dreg != -1);
1300 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) {
1304 g_assert (in->klass);
1306 if (t->type == MONO_TYPE_TYPEDBYREF) {
1307 size = sizeof (MonoTypedRef);
1308 align = sizeof (gpointer);
1311 size = mini_type_stack_size_full (cfg->generic_sharing_context, &in->klass->byval_arg, &align, sig->pinvoke);
1315 arg->opcode = OP_OUTARG_VT;
1316 arg->sreg1 = in->dreg;
1317 arg->klass = in->klass;
1318 arg->backend.size = size;
1320 MONO_ADD_INS (cfg->cbb, arg);
1324 switch (ainfo->storage) {
1326 arg->opcode = OP_X86_PUSH;
1328 if (t->type == MONO_TYPE_R4) {
1329 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 4);
1330 arg->opcode = OP_STORER4_MEMBASE_REG;
1331 arg->inst_destbasereg = X86_ESP;
1332 arg->inst_offset = 0;
1333 } else if (t->type == MONO_TYPE_R8) {
1334 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
1335 arg->opcode = OP_STORER8_MEMBASE_REG;
1336 arg->inst_destbasereg = X86_ESP;
1337 arg->inst_offset = 0;
1338 } else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8) {
1340 MONO_EMIT_NEW_UNALU (cfg, OP_X86_PUSH, -1, in->dreg + 2);
1345 g_assert_not_reached ();
1348 MONO_ADD_INS (cfg->cbb, arg);
1351 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) {
1352 /* Emit the signature cookie just before the implicit arguments */
1353 emit_sig_cookie2 (cfg, call, cinfo);
1357 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1360 if (cinfo->ret.storage == ArgValuetypeInReg) {
1363 else if (cinfo->ret.storage == ArgInIReg) {
1365 /* The return address is passed in a register */
1366 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1367 vtarg->sreg1 = call->inst.dreg;
1368 vtarg->dreg = mono_regstate_next_int (cfg->rs);
1369 MONO_ADD_INS (cfg->cbb, vtarg);
1371 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1374 MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
1375 vtarg->type = STACK_MP;
1376 vtarg->sreg1 = call->vret_var->dreg;
1377 MONO_ADD_INS (cfg->cbb, vtarg);
1380 /* if the function returns a struct, the called method already does a ret $0x4 */
1381 cinfo->stack_usage -= 4;
1384 call->stack_usage = cinfo->stack_usage;
1388 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1391 int size = ins->backend.size;
1394 MONO_INST_NEW (cfg, arg, OP_X86_PUSH_MEMBASE);
1395 arg->sreg1 = src->dreg;
1397 MONO_ADD_INS (cfg->cbb, arg);
1398 } else if (size <= 20) {
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, ALIGN_TO (size, 4));
1400 mini_emit_memcpy2 (cfg, X86_ESP, 0, src->dreg, 0, size, 4);
1402 MONO_INST_NEW (cfg, arg, OP_X86_PUSH_OBJ);
1403 arg->inst_basereg = src->dreg;
1404 arg->inst_offset = 0;
1405 arg->inst_imm = size;
1407 MONO_ADD_INS (cfg->cbb, arg);
1412 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1414 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1417 if (ret->type == MONO_TYPE_R4) {
1420 } else if (ret->type == MONO_TYPE_R8) {
1423 } else if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1424 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EAX, val->dreg + 1);
1425 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EDX, val->dreg + 2);
1430 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1434 * Allow tracing to work with this interface (with an optional argument)
1437 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1441 g_assert (MONO_ARCH_FRAME_ALIGNMENT >= 8);
1442 x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 8);
1444 /* if some args are passed in registers, we need to save them here */
1445 x86_push_reg (code, X86_EBP);
1447 if (cfg->compile_aot) {
1448 x86_push_imm (code, cfg->method);
1449 x86_mov_reg_imm (code, X86_EAX, func);
1450 x86_call_reg (code, X86_EAX);
1452 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
1453 x86_push_imm (code, cfg->method);
1454 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
1455 x86_call_code (code, 0);
1457 x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT);
1471 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1474 int arg_size = 0, save_mode = SAVE_NONE;
1475 MonoMethod *method = cfg->method;
1477 switch (mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type) {
1478 case MONO_TYPE_VOID:
1479 /* special case string .ctor icall */
1480 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1481 save_mode = SAVE_EAX;
1483 save_mode = SAVE_NONE;
1487 save_mode = SAVE_EAX_EDX;
1491 save_mode = SAVE_FP;
1493 case MONO_TYPE_GENERICINST:
1494 if (!mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
1495 save_mode = SAVE_EAX;
1499 case MONO_TYPE_VALUETYPE:
1500 save_mode = SAVE_STRUCT;
1503 save_mode = SAVE_EAX;
1507 switch (save_mode) {
1509 x86_push_reg (code, X86_EDX);
1510 x86_push_reg (code, X86_EAX);
1511 if (enable_arguments) {
1512 x86_push_reg (code, X86_EDX);
1513 x86_push_reg (code, X86_EAX);
1518 x86_push_reg (code, X86_EAX);
1519 if (enable_arguments) {
1520 x86_push_reg (code, X86_EAX);
1525 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1526 x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
1527 if (enable_arguments) {
1528 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1529 x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
1534 if (enable_arguments) {
1535 x86_push_membase (code, X86_EBP, 8);
1544 if (cfg->compile_aot) {
1545 x86_push_imm (code, method);
1546 x86_mov_reg_imm (code, X86_EAX, func);
1547 x86_call_reg (code, X86_EAX);
1549 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
1550 x86_push_imm (code, method);
1551 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
1552 x86_call_code (code, 0);
1554 x86_alu_reg_imm (code, X86_ADD, X86_ESP, arg_size + 4);
1556 switch (save_mode) {
1558 x86_pop_reg (code, X86_EAX);
1559 x86_pop_reg (code, X86_EDX);
1562 x86_pop_reg (code, X86_EAX);
1565 x86_fld_membase (code, X86_ESP, 0, TRUE);
1566 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
1576 #define EMIT_COND_BRANCH(ins,cond,sign) \
1577 if (ins->flags & MONO_INST_BRLABEL) { \
1578 if (ins->inst_i0->inst_c0) { \
1579 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
1581 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1582 if ((cfg->opt & MONO_OPT_BRANCH) && \
1583 x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
1584 x86_branch8 (code, cond, 0, sign); \
1586 x86_branch32 (code, cond, 0, sign); \
1589 if (ins->inst_true_bb->native_offset) { \
1590 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1592 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1593 if ((cfg->opt & MONO_OPT_BRANCH) && \
1594 x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1595 x86_branch8 (code, cond, 0, sign); \
1597 x86_branch32 (code, cond, 0, sign); \
1602 * Emit an exception if condition is fail and
1603 * if possible do a directly branch to target
1605 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
1607 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1608 if (tins == NULL) { \
1609 mono_add_patch_info (cfg, code - cfg->native_code, \
1610 MONO_PATCH_INFO_EXC, exc_name); \
1611 x86_branch32 (code, cond, 0, signed); \
1613 EMIT_COND_BRANCH (tins, cond, signed); \
1617 #define EMIT_FPCOMPARE(code) do { \
1618 x86_fcompp (code); \
1619 x86_fnstsw (code); \
1624 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
1626 mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
1627 x86_call_code (code, 0);
1632 #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_IADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_ISBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB_IMM)))
1635 * mono_peephole_pass_1:
1637 * Perform peephole opts which should/can be performed before local regalloc
1640 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1644 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1645 MonoInst *last_ins = ins->prev;
1647 switch (ins->opcode) {
1650 if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
1652 * X86_LEA is like ADD, but doesn't have the
1653 * sreg1==dreg restriction.
1655 ins->opcode = OP_X86_LEA_MEMBASE;
1656 ins->inst_basereg = ins->sreg1;
1657 } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1658 ins->opcode = OP_X86_INC_REG;
1662 if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
1663 ins->opcode = OP_X86_LEA_MEMBASE;
1664 ins->inst_basereg = ins->sreg1;
1665 ins->inst_imm = -ins->inst_imm;
1666 } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1667 ins->opcode = OP_X86_DEC_REG;
1669 case OP_COMPARE_IMM:
1670 case OP_ICOMPARE_IMM:
1671 /* OP_COMPARE_IMM (reg, 0)
1673 * OP_X86_TEST_NULL (reg)
1676 ins->opcode = OP_X86_TEST_NULL;
1678 case OP_X86_COMPARE_MEMBASE_IMM:
1680 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1681 * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1683 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1684 * OP_COMPARE_IMM reg, imm
1686 * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1688 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1689 ins->inst_basereg == last_ins->inst_destbasereg &&
1690 ins->inst_offset == last_ins->inst_offset) {
1691 ins->opcode = OP_COMPARE_IMM;
1692 ins->sreg1 = last_ins->sreg1;
1694 /* check if we can remove cmp reg,0 with test null */
1696 ins->opcode = OP_X86_TEST_NULL;
1700 case OP_X86_PUSH_MEMBASE:
1701 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG ||
1702 last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1703 ins->inst_basereg == last_ins->inst_destbasereg &&
1704 ins->inst_offset == last_ins->inst_offset) {
1705 ins->opcode = OP_X86_PUSH;
1706 ins->sreg1 = last_ins->sreg1;
1711 mono_peephole_ins (bb, ins);
1716 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1720 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1721 switch (ins->opcode) {
1723 /* reg = 0 -> XOR (reg, reg) */
1724 /* XOR sets cflags on x86, so we cant do it always */
1725 if (ins->inst_c0 == 0 && (!ins->next || (ins->next && INST_IGNORES_CFLAGS (ins->next->opcode)))) {
1728 ins->opcode = OP_IXOR;
1729 ins->sreg1 = ins->dreg;
1730 ins->sreg2 = ins->dreg;
1733 * Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG
1734 * since it takes 3 bytes instead of 7.
1736 for (ins2 = ins->next; ins2; ins2 = ins2->next) {
1737 if ((ins2->opcode == OP_STORE_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
1738 ins2->opcode = OP_STORE_MEMBASE_REG;
1739 ins2->sreg1 = ins->dreg;
1741 else if ((ins2->opcode == OP_STOREI4_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
1742 ins2->opcode = OP_STOREI4_MEMBASE_REG;
1743 ins2->sreg1 = ins->dreg;
1745 else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM)) {
1746 /* Continue iteration */
1755 if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1756 ins->opcode = OP_X86_INC_REG;
1760 if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1761 ins->opcode = OP_X86_DEC_REG;
1765 mono_peephole_ins (bb, ins);
1770 * mono_arch_lowering_pass:
1772 * Converts complex opcodes into simpler ones so that each IR instruction
1773 * corresponds to one machine instruction.
1776 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1778 MonoInst *ins, *next;
1780 if (bb->max_vreg > cfg->rs->next_vreg)
1781 cfg->rs->next_vreg = bb->max_vreg;
1784 * FIXME: Need to add more instructions, but the current machine
1785 * description can't model some parts of the composite instructions like
1788 MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) {
1789 switch (ins->opcode) {
1792 case OP_IDIV_UN_IMM:
1793 case OP_IREM_UN_IMM:
1795 * Keep the cases where we could generated optimized code, otherwise convert
1796 * to the non-imm variant.
1798 if ((ins->opcode == OP_IREM_IMM) && mono_is_power_of_two (ins->inst_imm) >= 0)
1800 mono_decompose_op_imm (cfg, bb, ins);
1807 bb->max_vreg = cfg->rs->next_vreg;
1811 branch_cc_table [] = {
1812 X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1813 X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1814 X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1817 /* Maps CMP_... constants to X86_CC_... constants */
1820 X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT,
1821 X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT
1825 cc_signed_table [] = {
1826 TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
1827 FALSE, FALSE, FALSE, FALSE
1830 static unsigned char*
1831 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int size, gboolean is_signed)
1833 #define XMM_TEMP_REG 0
1834 /*This SSE2 optimization must not be done which OPT_SIMD in place as it clobbers xmm0.*/
1835 /*The xmm pass decomposes OP_FCONV_ ops anyway anyway.*/
1836 if (cfg->opt & MONO_OPT_SSE2 && size < 8 && !(cfg->opt & MONO_OPT_SIMD)) {
1837 /* optimize by assigning a local var for this use so we avoid
1838 * the stack manipulations */
1839 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1840 x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
1841 x86_movsd_reg_membase (code, XMM_TEMP_REG, X86_ESP, 0);
1842 x86_cvttsd2si (code, dreg, XMM_TEMP_REG);
1843 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
1845 x86_widen_reg (code, dreg, dreg, is_signed, FALSE);
1847 x86_widen_reg (code, dreg, dreg, is_signed, TRUE);
1850 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
1851 x86_fnstcw_membase(code, X86_ESP, 0);
1852 x86_mov_reg_membase (code, dreg, X86_ESP, 0, 2);
1853 x86_alu_reg_imm (code, X86_OR, dreg, 0xc00);
1854 x86_mov_membase_reg (code, X86_ESP, 2, dreg, 2);
1855 x86_fldcw_membase (code, X86_ESP, 2);
1857 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1858 x86_fist_pop_membase (code, X86_ESP, 0, TRUE);
1859 x86_pop_reg (code, dreg);
1860 /* FIXME: need the high register
1861 * x86_pop_reg (code, dreg_high);
1864 x86_push_reg (code, X86_EAX); // SP = SP - 4
1865 x86_fist_pop_membase (code, X86_ESP, 0, FALSE);
1866 x86_pop_reg (code, dreg);
1868 x86_fldcw_membase (code, X86_ESP, 0);
1869 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
1872 x86_widen_reg (code, dreg, dreg, is_signed, FALSE);
1874 x86_widen_reg (code, dreg, dreg, is_signed, TRUE);
1878 static unsigned char*
1879 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
1881 int sreg = tree->sreg1;
1882 int need_touch = FALSE;
1884 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
1893 * If requested stack size is larger than one page,
1894 * perform stack-touch operation
1897 * Generate stack probe code.
1898 * Under Windows, it is necessary to allocate one page at a time,
1899 * "touching" stack after each successful sub-allocation. This is
1900 * because of the way stack growth is implemented - there is a
1901 * guard page before the lowest stack page that is currently commited.
1902 * Stack normally grows sequentially so OS traps access to the
1903 * guard page and commits more pages when needed.
1905 x86_test_reg_imm (code, sreg, ~0xFFF);
1906 br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
1908 br[2] = code; /* loop */
1909 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
1910 x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
1913 * By the end of the loop, sreg2 is smaller than 0x1000, so the init routine
1914 * that follows only initializes the last part of the area.
1916 /* Same as the init code below with size==0x1000 */
1917 if (tree->flags & MONO_INST_INIT) {
1918 x86_push_reg (code, X86_EAX);
1919 x86_push_reg (code, X86_ECX);
1920 x86_push_reg (code, X86_EDI);
1921 x86_mov_reg_imm (code, X86_ECX, (0x1000 >> 2));
1922 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
1923 x86_lea_membase (code, X86_EDI, X86_ESP, 12);
1925 x86_prefix (code, X86_REP_PREFIX);
1927 x86_pop_reg (code, X86_EDI);
1928 x86_pop_reg (code, X86_ECX);
1929 x86_pop_reg (code, X86_EAX);
1932 x86_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
1933 x86_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
1934 br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
1935 x86_patch (br[3], br[2]);
1936 x86_test_reg_reg (code, sreg, sreg);
1937 br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
1938 x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
1940 br[1] = code; x86_jump8 (code, 0);
1942 x86_patch (br[0], code);
1943 x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
1944 x86_patch (br[1], code);
1945 x86_patch (br[4], code);
1948 x86_alu_reg_reg (code, X86_SUB, X86_ESP, tree->sreg1);
1950 if (tree->flags & MONO_INST_INIT) {
1952 if (tree->dreg != X86_EAX && sreg != X86_EAX) {
1953 x86_push_reg (code, X86_EAX);
1956 if (tree->dreg != X86_ECX && sreg != X86_ECX) {
1957 x86_push_reg (code, X86_ECX);
1960 if (tree->dreg != X86_EDI && sreg != X86_EDI) {
1961 x86_push_reg (code, X86_EDI);
1965 x86_shift_reg_imm (code, X86_SHR, sreg, 2);
1966 if (sreg != X86_ECX)
1967 x86_mov_reg_reg (code, X86_ECX, sreg, 4);
1968 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
1970 x86_lea_membase (code, X86_EDI, X86_ESP, offset);
1972 x86_prefix (code, X86_REP_PREFIX);
1975 if (tree->dreg != X86_EDI && sreg != X86_EDI)
1976 x86_pop_reg (code, X86_EDI);
1977 if (tree->dreg != X86_ECX && sreg != X86_ECX)
1978 x86_pop_reg (code, X86_ECX);
1979 if (tree->dreg != X86_EAX && sreg != X86_EAX)
1980 x86_pop_reg (code, X86_EAX);
1987 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
1992 /* Move return value to the target register */
1993 switch (ins->opcode) {
1996 case OP_CALL_MEMBASE:
1997 if (ins->dreg != X86_EAX)
1998 x86_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
2002 case OP_VCALL_MEMBASE:
2005 case OP_VCALL2_MEMBASE:
2006 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
2007 if (cinfo->ret.storage == ArgValuetypeInReg) {
2008 /* Pop the destination address from the stack */
2009 x86_pop_reg (code, X86_ECX);
2011 for (quad = 0; quad < 2; quad ++) {
2012 switch (cinfo->ret.pair_storage [quad]) {
2014 g_assert (cinfo->ret.pair_regs [quad] != X86_ECX);
2015 x86_mov_membase_reg (code, X86_ECX, (quad * sizeof (gpointer)), cinfo->ret.pair_regs [quad], sizeof (gpointer));
2020 g_assert_not_reached ();
2026 MonoCallInst *call = (MonoCallInst*)ins;
2027 if (call->method && !mono_method_signature (call->method)->ret->byref && mono_method_signature (call->method)->ret->type == MONO_TYPE_R4) {
2028 /* Avoid some precision issues by saving/reloading the return value */
2029 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
2030 x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE);
2031 x86_fld_membase (code, X86_ESP, 0, FALSE);
2032 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
2044 * mono_x86_emit_tls_get:
2045 * @code: buffer to store code to
2046 * @dreg: hard register where to place the result
2047 * @tls_offset: offset info
2049 * mono_x86_emit_tls_get emits in @code the native code that puts in
2050 * the dreg register the item in the thread local storage identified
2053 * Returns: a pointer to the end of the stored code
2056 mono_x86_emit_tls_get (guint8* code, int dreg, int tls_offset)
2058 #ifdef PLATFORM_WIN32
2060 * See the Under the Hood article in the May 1996 issue of Microsoft Systems
2061 * Journal and/or a disassembly of the TlsGet () function.
2063 g_assert (tls_offset < 64);
2064 x86_prefix (code, X86_FS_PREFIX);
2065 x86_mov_reg_mem (code, dreg, 0x18, 4);
2066 /* Dunno what this does but TlsGetValue () contains it */
2067 x86_alu_membase_imm (code, X86_AND, dreg, 0x34, 0);
2068 x86_mov_reg_membase (code, dreg, dreg, 3600 + (tls_offset * 4), 4);
2070 if (optimize_for_xen) {
2071 x86_prefix (code, X86_GS_PREFIX);
2072 x86_mov_reg_mem (code, dreg, 0, 4);
2073 x86_mov_reg_membase (code, dreg, dreg, tls_offset, 4);
2075 x86_prefix (code, X86_GS_PREFIX);
2076 x86_mov_reg_mem (code, dreg, tls_offset, 4);
2083 * emit_load_volatile_arguments:
2085 * Load volatile arguments from the stack to the original input registers.
2086 * Required before a tail call.
2089 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2091 MonoMethod *method = cfg->method;
2092 MonoMethodSignature *sig;
2097 /* FIXME: Generate intermediate code instead */
2099 sig = mono_method_signature (method);
2101 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
2103 /* This is the opposite of the code in emit_prolog */
2105 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2106 ArgInfo *ainfo = cinfo->args + i;
2108 inst = cfg->args [i];
2110 if (sig->hasthis && (i == 0))
2111 arg_type = &mono_defaults.object_class->byval_arg;
2113 arg_type = sig->params [i - sig->hasthis];
2116 * On x86, the arguments are either in their original stack locations, or in
2119 if (inst->opcode == OP_REGVAR) {
2120 g_assert (ainfo->storage == ArgOnStack);
2122 x86_mov_membase_reg (code, X86_EBP, inst->inst_offset, inst->dreg, 4);
2129 #define REAL_PRINT_REG(text,reg) \
2130 mono_assert (reg >= 0); \
2131 x86_push_reg (code, X86_EAX); \
2132 x86_push_reg (code, X86_EDX); \
2133 x86_push_reg (code, X86_ECX); \
2134 x86_push_reg (code, reg); \
2135 x86_push_imm (code, reg); \
2136 x86_push_imm (code, text " %d %p\n"); \
2137 x86_mov_reg_imm (code, X86_EAX, printf); \
2138 x86_call_reg (code, X86_EAX); \
2139 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 3*4); \
2140 x86_pop_reg (code, X86_ECX); \
2141 x86_pop_reg (code, X86_EDX); \
2142 x86_pop_reg (code, X86_EAX);
2144 /* benchmark and set based on cpu */
2145 #define LOOP_ALIGNMENT 8
2146 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
2149 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2154 guint8 *code = cfg->native_code + cfg->code_len;
2157 if (cfg->opt & MONO_OPT_LOOP) {
2158 int pad, align = LOOP_ALIGNMENT;
2159 /* set alignment depending on cpu */
2160 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
2162 /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
2163 x86_padding (code, pad);
2164 cfg->code_len += pad;
2165 bb->native_offset = cfg->code_len;
2169 if (cfg->verbose_level > 2)
2170 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2172 cpos = bb->max_offset;
2174 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2175 MonoProfileCoverageInfo *cov = cfg->coverage_info;
2176 g_assert (!cfg->compile_aot);
2179 cov->data [bb->dfn].cil_code = bb->cil_code;
2180 /* this is not thread save, but good enough */
2181 x86_inc_mem (code, &cov->data [bb->dfn].count);
2184 offset = code - cfg->native_code;
2186 mono_debug_open_block (cfg, bb, offset);
2188 MONO_BB_FOR_EACH_INS (bb, ins) {
2189 offset = code - cfg->native_code;
2191 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2193 if (G_UNLIKELY (offset > (cfg->code_size - max_len - 16))) {
2194 cfg->code_size *= 2;
2195 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2196 code = cfg->native_code + offset;
2197 mono_jit_stats.code_reallocs++;
2200 if (cfg->debug_info)
2201 mono_debug_record_line_number (cfg, ins, offset);
2203 switch (ins->opcode) {
2205 x86_mul_reg (code, ins->sreg2, TRUE);
2208 x86_mul_reg (code, ins->sreg2, FALSE);
2210 case OP_X86_SETEQ_MEMBASE:
2211 case OP_X86_SETNE_MEMBASE:
2212 x86_set_membase (code, ins->opcode == OP_X86_SETEQ_MEMBASE ? X86_CC_EQ : X86_CC_NE,
2213 ins->inst_basereg, ins->inst_offset, TRUE);
2215 case OP_STOREI1_MEMBASE_IMM:
2216 x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
2218 case OP_STOREI2_MEMBASE_IMM:
2219 x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
2221 case OP_STORE_MEMBASE_IMM:
2222 case OP_STOREI4_MEMBASE_IMM:
2223 x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
2225 case OP_STOREI1_MEMBASE_REG:
2226 x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
2228 case OP_STOREI2_MEMBASE_REG:
2229 x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
2231 case OP_STORE_MEMBASE_REG:
2232 case OP_STOREI4_MEMBASE_REG:
2233 x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
2235 case OP_STORE_MEM_IMM:
2236 x86_mov_mem_imm (code, ins->inst_p0, ins->inst_c0, 4);
2240 x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
2242 x86_mov_reg_mem (code, ins->dreg, ins->inst_p0, 4);
2246 /* These are created by the cprop pass so they use inst_imm as the source */
2247 x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
2250 x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, FALSE);
2253 x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, TRUE);
2255 case OP_LOAD_MEMBASE:
2256 case OP_LOADI4_MEMBASE:
2257 case OP_LOADU4_MEMBASE:
2258 x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
2260 case OP_LOADU1_MEMBASE:
2261 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
2263 case OP_LOADI1_MEMBASE:
2264 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
2266 case OP_LOADU2_MEMBASE:
2267 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
2269 case OP_LOADI2_MEMBASE:
2270 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
2272 case OP_ICONV_TO_I1:
2274 x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
2276 case OP_ICONV_TO_I2:
2278 x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
2280 case OP_ICONV_TO_U1:
2281 x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
2283 case OP_ICONV_TO_U2:
2284 x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
2288 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
2290 case OP_COMPARE_IMM:
2291 case OP_ICOMPARE_IMM:
2292 x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
2294 case OP_X86_COMPARE_MEMBASE_REG:
2295 x86_alu_membase_reg (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2297 case OP_X86_COMPARE_MEMBASE_IMM:
2298 x86_alu_membase_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2300 case OP_X86_COMPARE_MEMBASE8_IMM:
2301 x86_alu_membase8_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2303 case OP_X86_COMPARE_REG_MEMBASE:
2304 x86_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
2306 case OP_X86_COMPARE_MEM_IMM:
2307 x86_alu_mem_imm (code, X86_CMP, ins->inst_offset, ins->inst_imm);
2309 case OP_X86_TEST_NULL:
2310 x86_test_reg_reg (code, ins->sreg1, ins->sreg1);
2312 case OP_X86_ADD_MEMBASE_IMM:
2313 x86_alu_membase_imm (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2315 case OP_X86_ADD_REG_MEMBASE:
2316 x86_alu_reg_membase (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset);
2318 case OP_X86_SUB_MEMBASE_IMM:
2319 x86_alu_membase_imm (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2321 case OP_X86_SUB_REG_MEMBASE:
2322 x86_alu_reg_membase (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset);
2324 case OP_X86_AND_MEMBASE_IMM:
2325 x86_alu_membase_imm (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2327 case OP_X86_OR_MEMBASE_IMM:
2328 x86_alu_membase_imm (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2330 case OP_X86_XOR_MEMBASE_IMM:
2331 x86_alu_membase_imm (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2333 case OP_X86_ADD_MEMBASE_REG:
2334 x86_alu_membase_reg (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2336 case OP_X86_SUB_MEMBASE_REG:
2337 x86_alu_membase_reg (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2339 case OP_X86_AND_MEMBASE_REG:
2340 x86_alu_membase_reg (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2342 case OP_X86_OR_MEMBASE_REG:
2343 x86_alu_membase_reg (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2345 case OP_X86_XOR_MEMBASE_REG:
2346 x86_alu_membase_reg (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2348 case OP_X86_INC_MEMBASE:
2349 x86_inc_membase (code, ins->inst_basereg, ins->inst_offset);
2351 case OP_X86_INC_REG:
2352 x86_inc_reg (code, ins->dreg);
2354 case OP_X86_DEC_MEMBASE:
2355 x86_dec_membase (code, ins->inst_basereg, ins->inst_offset);
2357 case OP_X86_DEC_REG:
2358 x86_dec_reg (code, ins->dreg);
2360 case OP_X86_MUL_REG_MEMBASE:
2361 x86_imul_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
2363 case OP_X86_AND_REG_MEMBASE:
2364 x86_alu_reg_membase (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset);
2366 case OP_X86_OR_REG_MEMBASE:
2367 x86_alu_reg_membase (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset);
2369 case OP_X86_XOR_REG_MEMBASE:
2370 x86_alu_reg_membase (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset);
2373 x86_breakpoint (code);
2375 case OP_RELAXED_NOP:
2376 x86_prefix (code, X86_REP_PREFIX);
2384 case OP_DUMMY_STORE:
2385 case OP_NOT_REACHED:
2391 x86_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
2395 x86_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
2400 x86_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
2404 x86_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
2409 x86_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
2413 x86_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
2418 x86_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
2422 x86_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
2425 x86_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
2429 x86_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
2434 * The code is the same for div/rem, the allocator will allocate dreg
2435 * to RAX/RDX as appropriate.
2437 if (ins->sreg2 == X86_EDX) {
2438 /* cdq clobbers this */
2439 x86_push_reg (code, ins->sreg2);
2441 x86_div_membase (code, X86_ESP, 0, TRUE);
2442 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2445 x86_div_reg (code, ins->sreg2, TRUE);
2450 if (ins->sreg2 == X86_EDX) {
2451 x86_push_reg (code, ins->sreg2);
2452 x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX);
2453 x86_div_membase (code, X86_ESP, 0, FALSE);
2454 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2456 x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX);
2457 x86_div_reg (code, ins->sreg2, FALSE);
2461 x86_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
2463 x86_div_reg (code, ins->sreg2, TRUE);
2466 int power = mono_is_power_of_two (ins->inst_imm);
2468 g_assert (ins->sreg1 == X86_EAX);
2469 g_assert (ins->dreg == X86_EAX);
2470 g_assert (power >= 0);
2473 /* Based on http://compilers.iecc.com/comparch/article/93-04-079 */
2475 x86_alu_reg_imm (code, X86_AND, X86_EAX, 1);
2477 * If the divident is >= 0, this does not nothing. If it is positive, it
2478 * it transforms %eax=0 into %eax=0, and %eax=1 into %eax=-1.
2480 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EDX);
2481 x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
2483 /* Based on gcc code */
2485 /* Add compensation for negative dividents */
2487 x86_shift_reg_imm (code, X86_SHR, X86_EDX, 32 - power);
2488 x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_EDX);
2489 /* Compute remainder */
2490 x86_alu_reg_imm (code, X86_AND, X86_EAX, (1 << power) - 1);
2491 /* Remove compensation */
2492 x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
2497 x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
2501 x86_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
2504 x86_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
2508 x86_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
2511 g_assert (ins->sreg2 == X86_ECX);
2512 x86_shift_reg (code, X86_SHL, ins->dreg);
2515 g_assert (ins->sreg2 == X86_ECX);
2516 x86_shift_reg (code, X86_SAR, ins->dreg);
2520 x86_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
2523 case OP_ISHR_UN_IMM:
2524 x86_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
2527 g_assert (ins->sreg2 == X86_ECX);
2528 x86_shift_reg (code, X86_SHR, ins->dreg);
2532 x86_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
2535 guint8 *jump_to_end;
2537 /* handle shifts below 32 bits */
2538 x86_shld_reg (code, ins->backend.reg3, ins->sreg1);
2539 x86_shift_reg (code, X86_SHL, ins->sreg1);
2541 x86_test_reg_imm (code, X86_ECX, 32);
2542 jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
2544 /* handle shift over 32 bit */
2545 x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
2546 x86_clear_reg (code, ins->sreg1);
2548 x86_patch (jump_to_end, code);
2552 guint8 *jump_to_end;
2554 /* handle shifts below 32 bits */
2555 x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
2556 x86_shift_reg (code, X86_SAR, ins->backend.reg3);
2558 x86_test_reg_imm (code, X86_ECX, 32);
2559 jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
2561 /* handle shifts over 31 bits */
2562 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2563 x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 31);
2565 x86_patch (jump_to_end, code);
2569 guint8 *jump_to_end;
2571 /* handle shifts below 32 bits */
2572 x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
2573 x86_shift_reg (code, X86_SHR, ins->backend.reg3);
2575 x86_test_reg_imm (code, X86_ECX, 32);
2576 jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
2578 /* handle shifts over 31 bits */
2579 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2580 x86_clear_reg (code, ins->backend.reg3);
2582 x86_patch (jump_to_end, code);
2586 if (ins->inst_imm >= 32) {
2587 x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
2588 x86_clear_reg (code, ins->sreg1);
2589 x86_shift_reg_imm (code, X86_SHL, ins->backend.reg3, ins->inst_imm - 32);
2591 x86_shld_reg_imm (code, ins->backend.reg3, ins->sreg1, ins->inst_imm);
2592 x86_shift_reg_imm (code, X86_SHL, ins->sreg1, ins->inst_imm);
2596 if (ins->inst_imm >= 32) {
2597 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2598 x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 0x1f);
2599 x86_shift_reg_imm (code, X86_SAR, ins->sreg1, ins->inst_imm - 32);
2601 x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
2602 x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, ins->inst_imm);
2605 case OP_LSHR_UN_IMM:
2606 if (ins->inst_imm >= 32) {
2607 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2608 x86_clear_reg (code, ins->backend.reg3);
2609 x86_shift_reg_imm (code, X86_SHR, ins->sreg1, ins->inst_imm - 32);
2611 x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
2612 x86_shift_reg_imm (code, X86_SHR, ins->backend.reg3, ins->inst_imm);
2616 x86_not_reg (code, ins->sreg1);
2619 x86_neg_reg (code, ins->sreg1);
2623 x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
2627 switch (ins->inst_imm) {
2631 if (ins->dreg != ins->sreg1)
2632 x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
2633 x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2636 /* LEA r1, [r2 + r2*2] */
2637 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2640 /* LEA r1, [r2 + r2*4] */
2641 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2644 /* LEA r1, [r2 + r2*2] */
2646 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2647 x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2650 /* LEA r1, [r2 + r2*8] */
2651 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3);
2654 /* LEA r1, [r2 + r2*4] */
2656 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2657 x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2660 /* LEA r1, [r2 + r2*2] */
2662 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2663 x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
2666 /* LEA r1, [r2 + r2*4] */
2667 /* LEA r1, [r1 + r1*4] */
2668 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2669 x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
2672 /* LEA r1, [r2 + r2*4] */
2674 /* LEA r1, [r1 + r1*4] */
2675 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2676 x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
2677 x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
2680 x86_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2685 x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
2686 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2688 case OP_IMUL_OVF_UN: {
2689 /* the mul operation and the exception check should most likely be split */
2690 int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
2691 /*g_assert (ins->sreg2 == X86_EAX);
2692 g_assert (ins->dreg == X86_EAX);*/
2693 if (ins->sreg2 == X86_EAX) {
2694 non_eax_reg = ins->sreg1;
2695 } else if (ins->sreg1 == X86_EAX) {
2696 non_eax_reg = ins->sreg2;
2698 /* no need to save since we're going to store to it anyway */
2699 if (ins->dreg != X86_EAX) {
2701 x86_push_reg (code, X86_EAX);
2703 x86_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
2704 non_eax_reg = ins->sreg2;
2706 if (ins->dreg == X86_EDX) {
2709 x86_push_reg (code, X86_EAX);
2711 } else if (ins->dreg != X86_EAX) {
2713 x86_push_reg (code, X86_EDX);
2715 x86_mul_reg (code, non_eax_reg, FALSE);
2716 /* save before the check since pop and mov don't change the flags */
2717 if (ins->dreg != X86_EAX)
2718 x86_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
2720 x86_pop_reg (code, X86_EDX);
2722 x86_pop_reg (code, X86_EAX);
2723 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2727 x86_mov_reg_imm (code, ins->dreg, ins->inst_c0);
2730 g_assert_not_reached ();
2731 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2732 x86_mov_reg_imm (code, ins->dreg, 0);
2735 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2736 x86_mov_reg_imm (code, ins->dreg, 0);
2738 case OP_LOAD_GOTADDR:
2739 x86_call_imm (code, 0);
2741 * The patch needs to point to the pop, since the GOT offset needs
2742 * to be added to that address.
2744 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL);
2745 x86_pop_reg (code, ins->dreg);
2746 x86_alu_reg_imm (code, X86_ADD, ins->dreg, 0xf0f0f0f0);
2749 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
2750 x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, 0xf0f0f0f0, 4);
2752 case OP_X86_PUSH_GOT_ENTRY:
2753 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
2754 x86_push_membase (code, ins->inst_basereg, 0xf0f0f0f0);
2757 x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
2761 * Note: this 'frame destruction' logic is useful for tail calls, too.
2762 * Keep in sync with the code in emit_epilog.
2766 /* FIXME: no tracing support... */
2767 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2768 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
2769 /* reset offset to make max_len work */
2770 offset = code - cfg->native_code;
2772 g_assert (!cfg->method->save_lmf);
2774 code = emit_load_volatile_arguments (cfg, code);
2776 if (cfg->used_int_regs & (1 << X86_EBX))
2778 if (cfg->used_int_regs & (1 << X86_EDI))
2780 if (cfg->used_int_regs & (1 << X86_ESI))
2783 x86_lea_membase (code, X86_ESP, X86_EBP, pos);
2785 if (cfg->used_int_regs & (1 << X86_ESI))
2786 x86_pop_reg (code, X86_ESI);
2787 if (cfg->used_int_regs & (1 << X86_EDI))
2788 x86_pop_reg (code, X86_EDI);
2789 if (cfg->used_int_regs & (1 << X86_EBX))
2790 x86_pop_reg (code, X86_EBX);
2792 /* restore ESP/EBP */
2794 offset = code - cfg->native_code;
2795 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2796 x86_jump32 (code, 0);
2798 cfg->disable_aot = TRUE;
2802 /* ensure ins->sreg1 is not NULL
2803 * note that cmp DWORD PTR [eax], eax is one byte shorter than
2804 * cmp DWORD PTR [eax], 0
2806 x86_alu_membase_reg (code, X86_CMP, ins->sreg1, 0, ins->sreg1);
2809 int hreg = ins->sreg1 == X86_EAX? X86_ECX: X86_EAX;
2810 x86_push_reg (code, hreg);
2811 x86_lea_membase (code, hreg, X86_EBP, cfg->sig_cookie);
2812 x86_mov_membase_reg (code, ins->sreg1, 0, hreg, 4);
2813 x86_pop_reg (code, hreg);
2822 call = (MonoCallInst*)ins;
2823 if (ins->flags & MONO_INST_HAS_METHOD)
2824 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2826 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2827 if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
2828 /* a pop is one byte, while an add reg, imm is 3. So if there are 4 or 8
2829 * bytes to pop, we want to use pops. GCC does this (note it won't happen
2830 * for P4 or i686 because gcc will avoid using pop push at all. But we aren't
2831 * smart enough to do that optimization yet
2833 * It turns out that on my P4, doing two pops for 8 bytes on the stack makes
2834 * mcs botstrap slow down. However, doing 1 pop for 4 bytes creates a small,
2835 * (most likely from locality benefits). People with other processors should
2836 * check on theirs to see what happens.
2838 if (call->stack_usage == 4) {
2839 /* we want to use registers that won't get used soon, so use
2840 * ecx, as eax will get allocated first. edx is used by long calls,
2841 * so we can't use that.
2844 x86_pop_reg (code, X86_ECX);
2846 x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
2849 code = emit_move_return_value (cfg, ins, code);
2855 case OP_VOIDCALL_REG:
2857 call = (MonoCallInst*)ins;
2858 x86_call_reg (code, ins->sreg1);
2859 if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
2860 if (call->stack_usage == 4)
2861 x86_pop_reg (code, X86_ECX);
2863 x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
2865 code = emit_move_return_value (cfg, ins, code);
2867 case OP_FCALL_MEMBASE:
2868 case OP_LCALL_MEMBASE:
2869 case OP_VCALL_MEMBASE:
2870 case OP_VCALL2_MEMBASE:
2871 case OP_VOIDCALL_MEMBASE:
2872 case OP_CALL_MEMBASE:
2873 call = (MonoCallInst*)ins;
2874 x86_call_membase (code, ins->sreg1, ins->inst_offset);
2875 if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
2876 if (call->stack_usage == 4)
2877 x86_pop_reg (code, X86_ECX);
2879 x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
2881 code = emit_move_return_value (cfg, ins, code);
2885 x86_push_reg (code, ins->sreg1);
2887 case OP_X86_PUSH_IMM:
2888 x86_push_imm (code, ins->inst_imm);
2890 case OP_X86_PUSH_MEMBASE:
2891 x86_push_membase (code, ins->inst_basereg, ins->inst_offset);
2893 case OP_X86_PUSH_OBJ:
2894 x86_alu_reg_imm (code, X86_SUB, X86_ESP, ins->inst_imm);
2895 x86_push_reg (code, X86_EDI);
2896 x86_push_reg (code, X86_ESI);
2897 x86_push_reg (code, X86_ECX);
2898 if (ins->inst_offset)
2899 x86_lea_membase (code, X86_ESI, ins->inst_basereg, ins->inst_offset);
2901 x86_mov_reg_reg (code, X86_ESI, ins->inst_basereg, 4);
2902 x86_lea_membase (code, X86_EDI, X86_ESP, 12);
2903 x86_mov_reg_imm (code, X86_ECX, (ins->inst_imm >> 2));
2905 x86_prefix (code, X86_REP_PREFIX);
2907 x86_pop_reg (code, X86_ECX);
2908 x86_pop_reg (code, X86_ESI);
2909 x86_pop_reg (code, X86_EDI);
2912 x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
2914 case OP_X86_LEA_MEMBASE:
2915 x86_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
2918 x86_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
2921 /* keep alignment */
2922 x86_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1);
2923 x86_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
2924 code = mono_emit_stack_alloc (code, ins);
2925 x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
2927 case OP_LOCALLOC_IMM: {
2928 guint32 size = ins->inst_imm;
2929 size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
2931 if (ins->flags & MONO_INST_INIT) {
2932 /* FIXME: Optimize this */
2933 x86_mov_reg_imm (code, ins->dreg, size);
2934 ins->sreg1 = ins->dreg;
2936 code = mono_emit_stack_alloc (code, ins);
2937 x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
2939 x86_alu_reg_imm (code, X86_SUB, X86_ESP, size);
2940 x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
2945 x86_push_reg (code, ins->sreg1);
2946 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
2947 (gpointer)"mono_arch_throw_exception");
2951 x86_push_reg (code, ins->sreg1);
2952 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
2953 (gpointer)"mono_arch_rethrow_exception");
2956 case OP_CALL_HANDLER:
2957 x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
2958 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2959 x86_call_imm (code, 0);
2960 x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
2962 case OP_START_HANDLER: {
2963 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2964 x86_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, X86_ESP, 4);
2967 case OP_ENDFINALLY: {
2968 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2969 x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4);
2973 case OP_ENDFILTER: {
2974 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2975 x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4);
2976 /* The local allocator will put the result into EAX */
2982 ins->inst_c0 = code - cfg->native_code;
2985 if (ins->flags & MONO_INST_BRLABEL) {
2986 if (ins->inst_i0->inst_c0) {
2987 x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
2989 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
2990 if ((cfg->opt & MONO_OPT_BRANCH) &&
2991 x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
2992 x86_jump8 (code, 0);
2994 x86_jump32 (code, 0);
2997 if (ins->inst_target_bb->native_offset) {
2998 x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3000 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3001 if ((cfg->opt & MONO_OPT_BRANCH) &&
3002 x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
3003 x86_jump8 (code, 0);
3005 x86_jump32 (code, 0);
3010 x86_jump_reg (code, ins->sreg1);
3023 x86_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
3024 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3026 case OP_COND_EXC_EQ:
3027 case OP_COND_EXC_NE_UN:
3028 case OP_COND_EXC_LT:
3029 case OP_COND_EXC_LT_UN:
3030 case OP_COND_EXC_GT:
3031 case OP_COND_EXC_GT_UN:
3032 case OP_COND_EXC_GE:
3033 case OP_COND_EXC_GE_UN:
3034 case OP_COND_EXC_LE:
3035 case OP_COND_EXC_LE_UN:
3036 case OP_COND_EXC_IEQ:
3037 case OP_COND_EXC_INE_UN:
3038 case OP_COND_EXC_ILT:
3039 case OP_COND_EXC_ILT_UN:
3040 case OP_COND_EXC_IGT:
3041 case OP_COND_EXC_IGT_UN:
3042 case OP_COND_EXC_IGE:
3043 case OP_COND_EXC_IGE_UN:
3044 case OP_COND_EXC_ILE:
3045 case OP_COND_EXC_ILE_UN:
3046 EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->inst_p1);
3048 case OP_COND_EXC_OV:
3049 case OP_COND_EXC_NO:
3051 case OP_COND_EXC_NC:
3052 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
3054 case OP_COND_EXC_IOV:
3055 case OP_COND_EXC_INO:
3056 case OP_COND_EXC_IC:
3057 case OP_COND_EXC_INC:
3058 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_IEQ], (ins->opcode < OP_COND_EXC_INE_UN), ins->inst_p1);
3070 EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
3078 case OP_CMOV_INE_UN:
3079 case OP_CMOV_IGE_UN:
3080 case OP_CMOV_IGT_UN:
3081 case OP_CMOV_ILE_UN:
3082 case OP_CMOV_ILT_UN:
3083 g_assert (ins->dreg == ins->sreg1);
3084 x86_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2);
3087 /* floating point opcodes */
3089 double d = *(double *)ins->inst_p0;
3091 if ((d == 0.0) && (mono_signbit (d) == 0)) {
3093 } else if (d == 1.0) {
3096 if (cfg->compile_aot) {
3097 guint32 *val = (guint32*)&d;
3098 x86_push_imm (code, val [1]);
3099 x86_push_imm (code, val [0]);
3100 x86_fld_membase (code, X86_ESP, 0, TRUE);
3101 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
3104 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R8, ins->inst_p0);
3105 x86_fld (code, NULL, TRUE);
3111 float f = *(float *)ins->inst_p0;
3113 if ((f == 0.0) && (mono_signbit (f) == 0)) {
3115 } else if (f == 1.0) {
3118 if (cfg->compile_aot) {
3119 guint32 val = *(guint32*)&f;
3120 x86_push_imm (code, val);
3121 x86_fld_membase (code, X86_ESP, 0, FALSE);
3122 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
3125 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R4, ins->inst_p0);
3126 x86_fld (code, NULL, FALSE);
3131 case OP_STORER8_MEMBASE_REG:
3132 x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
3134 case OP_LOADR8_SPILL_MEMBASE:
3135 x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
3138 case OP_LOADR8_MEMBASE:
3139 x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
3141 case OP_STORER4_MEMBASE_REG:
3142 x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
3144 case OP_LOADR4_MEMBASE:
3145 x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
3147 case OP_ICONV_TO_R4: /* FIXME: change precision */
3148 case OP_ICONV_TO_R8:
3149 x86_push_reg (code, ins->sreg1);
3150 x86_fild_membase (code, X86_ESP, 0, FALSE);
3151 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
3153 case OP_ICONV_TO_R_UN:
3154 x86_push_imm (code, 0);
3155 x86_push_reg (code, ins->sreg1);
3156 x86_fild_membase (code, X86_ESP, 0, TRUE);
3157 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
3159 case OP_X86_FP_LOAD_I8:
3160 x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
3162 case OP_X86_FP_LOAD_I4:
3163 x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
3165 case OP_FCONV_TO_R4:
3166 /* FIXME: nothing to do ?? */
3168 case OP_FCONV_TO_I1:
3169 code = emit_float_to_int (cfg, code, ins->dreg, 1, TRUE);
3171 case OP_FCONV_TO_U1:
3172 code = emit_float_to_int (cfg, code, ins->dreg, 1, FALSE);
3174 case OP_FCONV_TO_I2:
3175 code = emit_float_to_int (cfg, code, ins->dreg, 2, TRUE);
3177 case OP_FCONV_TO_U2:
3178 code = emit_float_to_int (cfg, code, ins->dreg, 2, FALSE);
3180 case OP_FCONV_TO_I4:
3182 code = emit_float_to_int (cfg, code, ins->dreg, 4, TRUE);
3184 case OP_FCONV_TO_I8:
3185 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
3186 x86_fnstcw_membase(code, X86_ESP, 0);
3187 x86_mov_reg_membase (code, ins->dreg, X86_ESP, 0, 2);
3188 x86_alu_reg_imm (code, X86_OR, ins->dreg, 0xc00);
3189 x86_mov_membase_reg (code, X86_ESP, 2, ins->dreg, 2);
3190 x86_fldcw_membase (code, X86_ESP, 2);
3191 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
3192 x86_fist_pop_membase (code, X86_ESP, 0, TRUE);
3193 x86_pop_reg (code, ins->dreg);
3194 x86_pop_reg (code, ins->backend.reg3);
3195 x86_fldcw_membase (code, X86_ESP, 0);
3196 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
3198 case OP_LCONV_TO_R8_2:
3199 x86_push_reg (code, ins->sreg2);
3200 x86_push_reg (code, ins->sreg1);
3201 x86_fild_membase (code, X86_ESP, 0, TRUE);
3202 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
3204 case OP_LCONV_TO_R4_2:
3205 x86_push_reg (code, ins->sreg2);
3206 x86_push_reg (code, ins->sreg1);
3207 x86_fild_membase (code, X86_ESP, 0, TRUE);
3208 /* Change precision */
3209 x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE);
3210 x86_fld_membase (code, X86_ESP, 0, FALSE);
3211 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
3213 case OP_LCONV_TO_R_UN:
3214 case OP_LCONV_TO_R_UN_2: {
3215 static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
3218 /* load 64bit integer to FP stack */
3219 x86_push_imm (code, 0);
3220 x86_push_reg (code, ins->sreg2);
3221 x86_push_reg (code, ins->sreg1);
3222 x86_fild_membase (code, X86_ESP, 0, TRUE);
3223 /* store as 80bit FP value */
3224 x86_fst80_membase (code, X86_ESP, 0);
3226 /* test if lreg is negative */
3227 x86_test_reg_reg (code, ins->sreg2, ins->sreg2);
3228 br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
3230 /* add correction constant mn */
3231 x86_fld80_mem (code, mn);
3232 x86_fld80_membase (code, X86_ESP, 0);
3233 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3234 x86_fst80_membase (code, X86_ESP, 0);
3236 x86_patch (br, code);
3238 x86_fld80_membase (code, X86_ESP, 0);
3239 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 12);
3243 case OP_LCONV_TO_OVF_I:
3244 case OP_LCONV_TO_OVF_I4_2: {
3245 guint8 *br [3], *label [1];
3249 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3251 x86_test_reg_reg (code, ins->sreg1, ins->sreg1);
3253 /* If the low word top bit is set, see if we are negative */
3254 br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
3255 /* We are not negative (no top bit set, check for our top word to be zero */
3256 x86_test_reg_reg (code, ins->sreg2, ins->sreg2);
3257 br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
3260 /* throw exception */
3261 tins = mono_branch_optimize_exception_target (cfg, bb, "OverflowException");
3263 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, tins->inst_true_bb);
3264 if ((cfg->opt & MONO_OPT_BRANCH) && x86_is_imm8 (tins->inst_true_bb->max_offset - cpos))
3265 x86_jump8 (code, 0);
3267 x86_jump32 (code, 0);
3269 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
3270 x86_jump32 (code, 0);
3274 x86_patch (br [0], code);
3275 /* our top bit is set, check that top word is 0xfffffff */
3276 x86_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
3278 x86_patch (br [1], code);
3279 /* nope, emit exception */
3280 br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
3281 x86_patch (br [2], label [0]);
3283 if (ins->dreg != ins->sreg1)
3284 x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
3288 /* Not needed on the fp stack */
3291 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3294 x86_fp_op_reg (code, X86_FSUB, 1, TRUE);
3297 x86_fp_op_reg (code, X86_FMUL, 1, TRUE);
3300 x86_fp_op_reg (code, X86_FDIV, 1, TRUE);
3308 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3313 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3320 * it really doesn't make sense to inline all this code,
3321 * it's here just to show that things may not be as simple
3324 guchar *check_pos, *end_tan, *pop_jump;
3325 x86_push_reg (code, X86_EAX);
3328 x86_test_reg_imm (code, X86_EAX, X86_FP_C2);
3330 x86_branch8 (code, X86_CC_NE, 0, FALSE);
3331 x86_fstp (code, 0); /* pop the 1.0 */
3333 x86_jump8 (code, 0);
3335 x86_fp_op (code, X86_FADD, 0);
3339 x86_test_reg_imm (code, X86_EAX, X86_FP_C2);
3341 x86_branch8 (code, X86_CC_NE, 0, FALSE);
3344 x86_patch (pop_jump, code);
3345 x86_fstp (code, 0); /* pop the 1.0 */
3346 x86_patch (check_pos, code);
3347 x86_patch (end_tan, code);
3349 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3350 x86_pop_reg (code, X86_EAX);
3357 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3363 g_assert (cfg->opt & MONO_OPT_CMOV);
3364 g_assert (ins->dreg == ins->sreg1);
3365 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3366 x86_cmov_reg (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2);
3369 g_assert (cfg->opt & MONO_OPT_CMOV);
3370 g_assert (ins->dreg == ins->sreg1);
3371 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3372 x86_cmov_reg (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2);
3375 g_assert (cfg->opt & MONO_OPT_CMOV);
3376 g_assert (ins->dreg == ins->sreg1);
3377 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3378 x86_cmov_reg (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2);
3381 g_assert (cfg->opt & MONO_OPT_CMOV);
3382 g_assert (ins->dreg == ins->sreg1);
3383 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3384 x86_cmov_reg (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2);
3390 x86_fxch (code, ins->inst_imm);
3395 x86_push_reg (code, X86_EAX);
3396 /* we need to exchange ST(0) with ST(1) */
3399 /* this requires a loop, because fprem somtimes
3400 * returns a partial remainder */
3402 /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
3403 /* x86_fprem1 (code); */
3406 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_C2);
3408 x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
3413 x86_pop_reg (code, X86_EAX);
3417 if (cfg->opt & MONO_OPT_FCMOV) {
3418 x86_fcomip (code, 1);
3422 /* this overwrites EAX */
3423 EMIT_FPCOMPARE(code);
3424 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3427 if (cfg->opt & MONO_OPT_FCMOV) {
3428 /* zeroing the register at the start results in
3429 * shorter and faster code (we can also remove the widening op)
3431 guchar *unordered_check;
3432 x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3433 x86_fcomip (code, 1);
3435 unordered_check = code;
3436 x86_branch8 (code, X86_CC_P, 0, FALSE);
3437 x86_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
3438 x86_patch (unordered_check, code);
3441 if (ins->dreg != X86_EAX)
3442 x86_push_reg (code, X86_EAX);
3444 EMIT_FPCOMPARE(code);
3445 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3446 x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000);
3447 x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3448 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3450 if (ins->dreg != X86_EAX)
3451 x86_pop_reg (code, X86_EAX);
3455 if (cfg->opt & MONO_OPT_FCMOV) {
3456 /* zeroing the register at the start results in
3457 * shorter and faster code (we can also remove the widening op)
3459 x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3460 x86_fcomip (code, 1);
3462 if (ins->opcode == OP_FCLT_UN) {
3463 guchar *unordered_check = code;
3464 guchar *jump_to_end;
3465 x86_branch8 (code, X86_CC_P, 0, FALSE);
3466 x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3468 x86_jump8 (code, 0);
3469 x86_patch (unordered_check, code);
3470 x86_inc_reg (code, ins->dreg);
3471 x86_patch (jump_to_end, code);
3473 x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3477 if (ins->dreg != X86_EAX)
3478 x86_push_reg (code, X86_EAX);
3480 EMIT_FPCOMPARE(code);
3481 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3482 if (ins->opcode == OP_FCLT_UN) {
3483 guchar *is_not_zero_check, *end_jump;
3484 is_not_zero_check = code;
3485 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3487 x86_jump8 (code, 0);
3488 x86_patch (is_not_zero_check, code);
3489 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3491 x86_patch (end_jump, code);
3493 x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3494 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3496 if (ins->dreg != X86_EAX)
3497 x86_pop_reg (code, X86_EAX);
3501 if (cfg->opt & MONO_OPT_FCMOV) {
3502 /* zeroing the register at the start results in
3503 * shorter and faster code (we can also remove the widening op)
3505 guchar *unordered_check;
3506 x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3507 x86_fcomip (code, 1);
3509 if (ins->opcode == OP_FCGT) {
3510 unordered_check = code;
3511 x86_branch8 (code, X86_CC_P, 0, FALSE);
3512 x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3513 x86_patch (unordered_check, code);
3515 x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3519 if (ins->dreg != X86_EAX)
3520 x86_push_reg (code, X86_EAX);
3522 EMIT_FPCOMPARE(code);
3523 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3524 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3525 if (ins->opcode == OP_FCGT_UN) {
3526 guchar *is_not_zero_check, *end_jump;
3527 is_not_zero_check = code;
3528 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3530 x86_jump8 (code, 0);
3531 x86_patch (is_not_zero_check, code);
3532 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3534 x86_patch (end_jump, code);
3536 x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3537 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3539 if (ins->dreg != X86_EAX)
3540 x86_pop_reg (code, X86_EAX);
3543 if (cfg->opt & MONO_OPT_FCMOV) {
3544 guchar *jump = code;
3545 x86_branch8 (code, X86_CC_P, 0, TRUE);
3546 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3547 x86_patch (jump, code);
3550 x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000);
3551 EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
3554 /* Branch if C013 != 100 */
3555 if (cfg->opt & MONO_OPT_FCMOV) {
3556 /* branch if !ZF or (PF|CF) */
3557 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3558 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3559 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
3562 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3);
3563 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3566 if (cfg->opt & MONO_OPT_FCMOV) {
3567 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3570 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3573 if (cfg->opt & MONO_OPT_FCMOV) {
3574 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3575 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3578 if (ins->opcode == OP_FBLT_UN) {
3579 guchar *is_not_zero_check, *end_jump;
3580 is_not_zero_check = code;
3581 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3583 x86_jump8 (code, 0);
3584 x86_patch (is_not_zero_check, code);
3585 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3587 x86_patch (end_jump, code);
3589 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3593 if (cfg->opt & MONO_OPT_FCMOV) {
3594 if (ins->opcode == OP_FBGT) {
3597 /* skip branch if C1=1 */
3599 x86_branch8 (code, X86_CC_P, 0, FALSE);
3600 /* branch if (C0 | C3) = 1 */
3601 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
3602 x86_patch (br1, code);
3604 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
3608 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3609 if (ins->opcode == OP_FBGT_UN) {
3610 guchar *is_not_zero_check, *end_jump;
3611 is_not_zero_check = code;
3612 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3614 x86_jump8 (code, 0);
3615 x86_patch (is_not_zero_check, code);
3616 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3618 x86_patch (end_jump, code);
3620 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3623 /* Branch if C013 == 100 or 001 */
3624 if (cfg->opt & MONO_OPT_FCMOV) {
3627 /* skip branch if C1=1 */
3629 x86_branch8 (code, X86_CC_P, 0, FALSE);
3630 /* branch if (C0 | C3) = 1 */
3631 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
3632 x86_patch (br1, code);
3635 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3636 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3637 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3);
3638 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3641 /* Branch if C013 == 000 */
3642 if (cfg->opt & MONO_OPT_FCMOV) {
3643 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
3646 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3649 /* Branch if C013=000 or 100 */
3650 if (cfg->opt & MONO_OPT_FCMOV) {
3653 /* skip branch if C1=1 */
3655 x86_branch8 (code, X86_CC_P, 0, FALSE);
3656 /* branch if C0=0 */
3657 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
3658 x86_patch (br1, code);
3661 x86_alu_reg_imm (code, X86_AND, X86_EAX, (X86_FP_C0|X86_FP_C1));
3662 x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0);
3663 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3666 /* Branch if C013 != 001 */
3667 if (cfg->opt & MONO_OPT_FCMOV) {
3668 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3669 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
3672 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3673 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3677 x86_push_reg (code, X86_EAX);
3680 x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4100);
3681 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3682 x86_pop_reg (code, X86_EAX);
3684 /* Have to clean up the fp stack before throwing the exception */
3686 x86_branch8 (code, X86_CC_NE, 0, FALSE);
3689 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
3691 x86_patch (br1, code);
3695 code = mono_x86_emit_tls_get (code, ins->dreg, ins->inst_offset);
3698 case OP_MEMORY_BARRIER: {
3699 /* Not needed on x86 */
3702 case OP_ATOMIC_ADD_I4: {
3703 int dreg = ins->dreg;
3705 if (dreg == ins->inst_basereg) {
3706 x86_push_reg (code, ins->sreg2);
3710 if (dreg != ins->sreg2)
3711 x86_mov_reg_reg (code, ins->dreg, ins->sreg2, 4);
3713 x86_prefix (code, X86_LOCK_PREFIX);
3714 x86_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, 4);
3716 if (dreg != ins->dreg) {
3717 x86_mov_reg_reg (code, ins->dreg, dreg, 4);
3718 x86_pop_reg (code, dreg);
3723 case OP_ATOMIC_ADD_NEW_I4: {
3724 int dreg = ins->dreg;
3726 /* hack: limit in regalloc, dreg != sreg1 && dreg != sreg2 */
3727 if (ins->sreg2 == dreg) {
3728 if (dreg == X86_EBX) {
3730 if (ins->inst_basereg == X86_EDI)
3734 if (ins->inst_basereg == X86_EBX)
3737 } else if (ins->inst_basereg == dreg) {
3738 if (dreg == X86_EBX) {
3740 if (ins->sreg2 == X86_EDI)
3744 if (ins->sreg2 == X86_EBX)
3749 if (dreg != ins->dreg) {
3750 x86_push_reg (code, dreg);
3753 x86_mov_reg_reg (code, dreg, ins->sreg2, 4);
3754 x86_prefix (code, X86_LOCK_PREFIX);
3755 x86_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, 4);
3756 /* dreg contains the old value, add with sreg2 value */
3757 x86_alu_reg_reg (code, X86_ADD, dreg, ins->sreg2);
3759 if (ins->dreg != dreg) {
3760 x86_mov_reg_reg (code, ins->dreg, dreg, 4);
3761 x86_pop_reg (code, dreg);
3766 case OP_ATOMIC_EXCHANGE_I4:
3767 case OP_ATOMIC_CAS_IMM_I4: {
3769 int sreg2 = ins->sreg2;
3770 int breg = ins->inst_basereg;
3772 /* cmpxchg uses eax as comperand, need to make sure we can use it
3773 * hack to overcome limits in x86 reg allocator
3774 * (req: dreg == eax and sreg2 != eax and breg != eax)
3776 g_assert (ins->dreg == X86_EAX);
3778 /* We need the EAX reg for the cmpxchg */
3779 if (ins->sreg2 == X86_EAX) {
3780 x86_push_reg (code, X86_EDX);
3781 x86_mov_reg_reg (code, X86_EDX, X86_EAX, 4);
3785 if (breg == X86_EAX) {
3786 x86_push_reg (code, X86_ESI);
3787 x86_mov_reg_reg (code, X86_ESI, X86_EAX, 4);
3791 if (ins->opcode == OP_ATOMIC_CAS_IMM_I4) {
3792 x86_mov_reg_imm (code, X86_EAX, ins->backend.data);
3794 x86_prefix (code, X86_LOCK_PREFIX);
3795 x86_cmpxchg_membase_reg (code, breg, ins->inst_offset, sreg2);
3797 x86_mov_reg_membase (code, X86_EAX, breg, ins->inst_offset, 4);
3799 br [0] = code; x86_prefix (code, X86_LOCK_PREFIX);
3800 x86_cmpxchg_membase_reg (code, breg, ins->inst_offset, sreg2);
3801 br [1] = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
3802 x86_patch (br [1], br [0]);
3805 if (breg != ins->inst_basereg)
3806 x86_pop_reg (code, X86_ESI);
3808 if (ins->sreg2 != sreg2)
3809 x86_pop_reg (code, X86_EDX);
3813 #ifdef MONO_ARCH_SIMD_INTRINSICS
3815 x86_sse_alu_ps_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2);
3818 x86_sse_alu_ps_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2);
3821 x86_sse_alu_ps_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2);
3824 x86_sse_alu_ps_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2);
3827 x86_sse_alu_ps_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2);
3830 x86_sse_alu_ps_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2);
3833 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
3834 x86_sse_alu_ps_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0);
3837 x86_sse_alu_ps_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2);
3840 x86_sse_alu_ps_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2);
3843 x86_sse_alu_ps_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2);
3846 x86_sse_alu_ps_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2);
3849 x86_sse_alu_ps_reg_reg (code, X86_SSE_SQRT, ins->dreg, ins->sreg1);
3852 x86_sse_alu_ps_reg_reg (code, X86_SSE_RSQRT, ins->dreg, ins->sreg1);
3855 x86_sse_alu_ps_reg_reg (code, X86_SSE_RCP, ins->dreg, ins->sreg1);
3858 x86_sse_alu_sd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2);
3861 x86_sse_alu_sd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2);
3864 x86_sse_alu_sd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2);
3867 x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSHDUP, ins->dreg, ins->sreg1);
3870 x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSLDUP, ins->dreg, ins->sreg1);
3873 case OP_PSHUFLEW_HIGH:
3874 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
3875 x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 1);
3877 case OP_PSHUFLEW_LOW:
3878 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
3879 x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 0);
3882 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
3883 x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->sreg1, ins->inst_c0);
3887 x86_sse_alu_pd_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2);
3890 x86_sse_alu_pd_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2);
3893 x86_sse_alu_pd_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2);
3896 x86_sse_alu_pd_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2);
3899 x86_sse_alu_pd_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2);
3902 x86_sse_alu_pd_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2);
3905 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
3906 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0);
3909 x86_sse_alu_pd_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2);
3912 x86_sse_alu_pd_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2);
3915 x86_sse_alu_pd_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2);
3918 x86_sse_alu_pd_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2);
3921 x86_sse_alu_pd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2);
3924 x86_sse_alu_pd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2);
3927 x86_sse_alu_pd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2);
3930 x86_sse_alu_sd_reg_reg (code, X86_SSE_MOVDDUP, ins->dreg, ins->sreg1);
3933 case OP_EXTRACT_MASK:
3934 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMOVMSKB, ins->dreg, ins->sreg1);
3938 x86_sse_alu_pd_reg_reg (code, X86_SSE_PAND, ins->sreg1, ins->sreg2);
3941 x86_sse_alu_pd_reg_reg (code, X86_SSE_POR, ins->sreg1, ins->sreg2);
3944 x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->sreg1, ins->sreg2);
3948 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDB, ins->sreg1, ins->sreg2);
3951 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDW, ins->sreg1, ins->sreg2);
3954 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDD, ins->sreg1, ins->sreg2);
3957 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDQ, ins->sreg1, ins->sreg2);
3961 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBB, ins->sreg1, ins->sreg2);
3964 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBW, ins->sreg1, ins->sreg2);
3967 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBD, ins->sreg1, ins->sreg2);
3970 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBQ, ins->sreg1, ins->sreg2);
3974 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXUB, ins->sreg1, ins->sreg2);
3977 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUW, ins->sreg1, ins->sreg2);
3980 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUD, ins->sreg1, ins->sreg2);
3984 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSB, ins->sreg1, ins->sreg2);
3987 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXSW, ins->sreg1, ins->sreg2);
3990 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSD, ins->sreg1, ins->sreg2);
3994 x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGB, ins->sreg1, ins->sreg2);
3997 x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGW, ins->sreg1, ins->sreg2);
4001 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINUB, ins->sreg1, ins->sreg2);
4004 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUW, ins->sreg1, ins->sreg2);
4007 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUD, ins->sreg1, ins->sreg2);
4011 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSB, ins->sreg1, ins->sreg2);
4014 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINSW, ins->sreg1, ins->sreg2);
4017 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSD, ins->sreg1, ins->sreg2);
4021 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQB, ins->sreg1, ins->sreg2);
4024 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQW, ins->sreg1, ins->sreg2);
4027 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQD, ins->sreg1, ins->sreg2);
4030 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPEQQ, ins->sreg1, ins->sreg2);
4034 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTB, ins->sreg1, ins->sreg2);
4037 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTW, ins->sreg1, ins->sreg2);
4040 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTD, ins->sreg1, ins->sreg2);
4043 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPGTQ, ins->sreg1, ins->sreg2);
4046 case OP_PSUM_ABS_DIFF:
4047 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSADBW, ins->sreg1, ins->sreg2);
4050 case OP_UNPACK_LOWB:
4051 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLBW, ins->sreg1, ins->sreg2);
4053 case OP_UNPACK_LOWW:
4054 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLWD, ins->sreg1, ins->sreg2);
4056 case OP_UNPACK_LOWD:
4057 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLDQ, ins->sreg1, ins->sreg2);
4059 case OP_UNPACK_LOWQ:
4060 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLQDQ, ins->sreg1, ins->sreg2);
4062 case OP_UNPACK_LOWPS:
4063 x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2);
4065 case OP_UNPACK_LOWPD:
4066 x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2);
4069 case OP_UNPACK_HIGHB:
4070 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHBW, ins->sreg1, ins->sreg2);
4072 case OP_UNPACK_HIGHW:
4073 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHWD, ins->sreg1, ins->sreg2);
4075 case OP_UNPACK_HIGHD:
4076 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHDQ, ins->sreg1, ins->sreg2);
4078 case OP_UNPACK_HIGHQ:
4079 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHQDQ, ins->sreg1, ins->sreg2);
4081 case OP_UNPACK_HIGHPS:
4082 x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2);
4084 case OP_UNPACK_HIGHPD:
4085 x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2);
4089 x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSWB, ins->sreg1, ins->sreg2);
4092 x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSDW, ins->sreg1, ins->sreg2);
4095 x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKUSWB, ins->sreg1, ins->sreg2);
4098 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PACKUSDW, ins->sreg1, ins->sreg2);
4101 case OP_PADDB_SAT_UN:
4102 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSB, ins->sreg1, ins->sreg2);
4104 case OP_PSUBB_SAT_UN:
4105 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSB, ins->sreg1, ins->sreg2);
4107 case OP_PADDW_SAT_UN:
4108 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSW, ins->sreg1, ins->sreg2);
4110 case OP_PSUBW_SAT_UN:
4111 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSW, ins->sreg1, ins->sreg2);
4115 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSB, ins->sreg1, ins->sreg2);
4118 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSB, ins->sreg1, ins->sreg2);
4121 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSW, ins->sreg1, ins->sreg2);
4124 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSW, ins->sreg1, ins->sreg2);
4128 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULLW, ins->sreg1, ins->sreg2);
4131 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMULLD, ins->sreg1, ins->sreg2);
4134 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULUDQ, ins->sreg1, ins->sreg2);
4136 case OP_PMULW_HIGH_UN:
4137 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHUW, ins->sreg1, ins->sreg2);
4140 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHW, ins->sreg1, ins->sreg2);
4144 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHR, ins->dreg, ins->inst_imm);
4147 x86_sse_shift_reg_reg (code, X86_SSE_PSRLW_REG, ins->dreg, ins->sreg2);
4151 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SAR, ins->dreg, ins->inst_imm);
4154 x86_sse_shift_reg_reg (code, X86_SSE_PSRAW_REG, ins->dreg, ins->sreg2);
4158 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHL, ins->dreg, ins->inst_imm);
4161 x86_sse_shift_reg_reg (code, X86_SSE_PSLLW_REG, ins->dreg, ins->sreg2);
4165 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHR, ins->dreg, ins->inst_imm);
4168 x86_sse_shift_reg_reg (code, X86_SSE_PSRLD_REG, ins->dreg, ins->sreg2);
4172 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SAR, ins->dreg, ins->inst_imm);
4175 x86_sse_shift_reg_reg (code, X86_SSE_PSRAD_REG, ins->dreg, ins->sreg2);
4179 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHL, ins->dreg, ins->inst_imm);
4182 x86_sse_shift_reg_reg (code, X86_SSE_PSLLD_REG, ins->dreg, ins->sreg2);
4186 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHR, ins->dreg, ins->inst_imm);
4189 x86_sse_shift_reg_reg (code, X86_SSE_PSRLQ_REG, ins->dreg, ins->sreg2);
4193 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHL, ins->dreg, ins->inst_imm);
4196 x86_sse_shift_reg_reg (code, X86_SSE_PSLLQ_REG, ins->dreg, ins->sreg2);
4200 x86_movd_xreg_reg (code, ins->dreg, ins->sreg1);
4203 x86_movd_reg_xreg (code, ins->dreg, ins->sreg1);
4205 case OP_STOREX_MEMBASE_REG:
4206 case OP_STOREX_MEMBASE:
4207 x86_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
4209 case OP_LOADX_MEMBASE:
4210 x86_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
4212 case OP_LOADX_ALIGNED_MEMBASE:
4213 x86_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
4215 case OP_STOREX_ALIGNED_MEMBASE_REG:
4216 x86_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
4218 case OP_STOREX_NTA_MEMBASE_REG:
4219 x86_sse_alu_reg_membase (code, X86_SSE_MOVNTPS, ins->dreg, ins->sreg1, ins->inst_offset);
4221 case OP_PREFETCH_MEMBASE:
4222 x86_sse_alu_reg_membase (code, X86_SSE_PREFETCH, ins->backend.arg_info, ins->sreg1, ins->inst_offset);
4226 /*FIXME the peephole pass should have killed this*/
4227 if (ins->dreg != ins->sreg1)
4228 x86_movaps_reg_reg (code, ins->dreg, ins->sreg1);
4231 x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->dreg, ins->dreg);
4233 case OP_ICONV_TO_R8_RAW:
4234 x86_mov_membase_reg (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1, 4);
4235 x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE);
4238 case OP_FCONV_TO_R8_X:
4239 x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE);
4240 x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset);
4243 case OP_XCONV_R8_TO_I4:
4244 x86_cvttsd2si (code, ins->dreg, ins->sreg1);
4245 switch (ins->backend.source_opcode) {
4246 case OP_FCONV_TO_I1:
4247 x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
4249 case OP_FCONV_TO_U1:
4250 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4252 case OP_FCONV_TO_I2:
4253 x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
4255 case OP_FCONV_TO_U2:
4256 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
4262 g_warning ("unknown opcode %s\n", mono_inst_name (ins->opcode));
4263 g_assert_not_reached ();
4266 if (G_UNLIKELY ((code - cfg->native_code - offset) > max_len)) {
4267 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4268 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4269 g_assert_not_reached ();
4275 cfg->code_len = code - cfg->native_code;
4279 mono_arch_register_lowlevel_calls (void)
4284 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4286 MonoJumpInfo *patch_info;
4287 gboolean compile_aot = !run_cctors;
4289 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4290 unsigned char *ip = patch_info->ip.i + code;
4291 const unsigned char *target;
4293 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4296 switch (patch_info->type) {
4297 case MONO_PATCH_INFO_BB:
4298 case MONO_PATCH_INFO_LABEL:
4301 /* No need to patch these */
4306 switch (patch_info->type) {
4307 case MONO_PATCH_INFO_IP:
4308 *((gconstpointer *)(ip)) = target;
4310 case MONO_PATCH_INFO_CLASS_INIT: {
4312 /* Might already been changed to a nop */
4313 x86_call_code (code, 0);
4314 x86_patch (ip, target);
4317 case MONO_PATCH_INFO_ABS:
4318 case MONO_PATCH_INFO_METHOD:
4319 case MONO_PATCH_INFO_METHOD_JUMP:
4320 case MONO_PATCH_INFO_INTERNAL_METHOD:
4321 case MONO_PATCH_INFO_BB:
4322 case MONO_PATCH_INFO_LABEL:
4323 case MONO_PATCH_INFO_RGCTX_FETCH:
4324 case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
4325 case MONO_PATCH_INFO_MONITOR_ENTER:
4326 case MONO_PATCH_INFO_MONITOR_EXIT:
4327 x86_patch (ip, target);
4329 case MONO_PATCH_INFO_NONE:
4332 guint32 offset = mono_arch_get_patch_offset (ip);
4333 *((gconstpointer *)(ip + offset)) = target;
4341 mono_arch_emit_prolog (MonoCompile *cfg)
4343 MonoMethod *method = cfg->method;
4345 MonoMethodSignature *sig;
4347 int alloc_size, pos, max_offset, i;
4350 cfg->code_size = MAX (mono_method_get_header (method)->code_size * 4, 10240);
4352 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4353 cfg->code_size += 512;
4355 code = cfg->native_code = g_malloc (cfg->code_size);
4357 x86_push_reg (code, X86_EBP);
4358 x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
4360 alloc_size = cfg->stack_offset;
4363 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4364 /* Might need to attach the thread to the JIT or change the domain for the callback */
4365 if (appdomain_tls_offset != -1 && lmf_tls_offset != -1) {
4366 guint8 *buf, *no_domain_branch;
4368 code = mono_x86_emit_tls_get (code, X86_EAX, appdomain_tls_offset);
4369 x86_alu_reg_imm (code, X86_CMP, X86_EAX, GPOINTER_TO_UINT (cfg->domain));
4370 no_domain_branch = code;
4371 x86_branch8 (code, X86_CC_NE, 0, 0);
4372 code = mono_x86_emit_tls_get ( code, X86_EAX, lmf_tls_offset);
4373 x86_test_reg_reg (code, X86_EAX, X86_EAX);
4375 x86_branch8 (code, X86_CC_NE, 0, 0);
4376 x86_patch (no_domain_branch, code);
4377 x86_push_imm (code, cfg->domain);
4378 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
4379 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
4380 x86_patch (buf, code);
4381 #ifdef PLATFORM_WIN32
4382 /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
4383 /* FIXME: Add a separate key for LMF to avoid this */
4384 x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
4388 g_assert (!cfg->compile_aot);
4389 x86_push_imm (code, cfg->domain);
4390 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
4391 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
4395 if (method->save_lmf) {
4396 pos += sizeof (MonoLMF);
4398 /* save the current IP */
4399 mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4400 x86_push_imm_template (code);
4402 /* save all caller saved regs */
4403 x86_push_reg (code, X86_EBP);
4404 x86_push_reg (code, X86_ESI);
4405 x86_push_reg (code, X86_EDI);
4406 x86_push_reg (code, X86_EBX);
4408 if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
4410 * Optimized version which uses the mono_lmf TLS variable instead of indirection
4411 * through the mono_lmf_addr TLS variable.
4413 /* %eax = previous_lmf */
4414 x86_prefix (code, X86_GS_PREFIX);
4415 x86_mov_reg_mem (code, X86_EAX, lmf_tls_offset, 4);
4416 /* skip esp + method_info + lmf */
4417 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 12);
4418 /* push previous_lmf */
4419 x86_push_reg (code, X86_EAX);
4421 x86_prefix (code, X86_GS_PREFIX);
4422 x86_mov_mem_reg (code, lmf_tls_offset, X86_ESP, 4);
4424 /* get the address of lmf for the current thread */
4426 * This is performance critical so we try to use some tricks to make
4430 if (lmf_addr_tls_offset != -1) {
4431 /* Load lmf quicky using the GS register */
4432 code = mono_x86_emit_tls_get (code, X86_EAX, lmf_addr_tls_offset);
4433 #ifdef PLATFORM_WIN32
4434 /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
4435 /* FIXME: Add a separate key for LMF to avoid this */
4436 x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
4439 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_get_lmf_addr");
4442 /* Skip esp + method info */
4443 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
4446 x86_push_reg (code, X86_EAX);
4447 /* push *lfm (previous_lmf) */
4448 x86_push_membase (code, X86_EAX, 0);
4450 x86_mov_membase_reg (code, X86_EAX, 0, X86_ESP, 4);
4454 if (cfg->used_int_regs & (1 << X86_EBX)) {
4455 x86_push_reg (code, X86_EBX);
4459 if (cfg->used_int_regs & (1 << X86_EDI)) {
4460 x86_push_reg (code, X86_EDI);
4464 if (cfg->used_int_regs & (1 << X86_ESI)) {
4465 x86_push_reg (code, X86_ESI);
4472 /* the original alloc_size is already aligned: there is %ebp and retip pushed, so realign */
4473 if (mono_do_x86_stack_align) {
4474 int tot = alloc_size + pos + 4 + 4; /* ret ip + ebp */
4475 tot &= MONO_ARCH_FRAME_ALIGNMENT - 1;
4476 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - tot;
4480 /* See mono_emit_stack_alloc */
4481 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
4482 guint32 remaining_size = alloc_size;
4483 while (remaining_size >= 0x1000) {
4484 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
4485 x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
4486 remaining_size -= 0x1000;
4489 x86_alu_reg_imm (code, X86_SUB, X86_ESP, remaining_size);
4491 x86_alu_reg_imm (code, X86_SUB, X86_ESP, alloc_size);
4495 if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED ||
4496 cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE) {
4497 x86_alu_reg_imm (code, X86_AND, X86_ESP, -MONO_ARCH_FRAME_ALIGNMENT);
4500 #if DEBUG_STACK_ALIGNMENT
4501 /* check the stack is aligned */
4502 if (method->wrapper_type == MONO_WRAPPER_NONE) {
4503 x86_mov_reg_reg (code, X86_ECX, X86_ESP, 4);
4504 x86_alu_reg_imm (code, X86_AND, X86_ECX, MONO_ARCH_FRAME_ALIGNMENT - 1);
4505 x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
4506 x86_branch_disp (code, X86_CC_EQ, 3, FALSE);
4507 x86_breakpoint (code);
4511 /* compute max_offset in order to use short forward jumps */
4513 if (cfg->opt & MONO_OPT_BRANCH) {
4514 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4516 bb->max_offset = max_offset;
4518 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4520 /* max alignment for loops */
4521 if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
4522 max_offset += LOOP_ALIGNMENT;
4524 MONO_BB_FOR_EACH_INS (bb, ins) {
4525 if (ins->opcode == OP_LABEL)
4526 ins->inst_c1 = max_offset;
4528 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4533 /* store runtime generic context */
4534 if (cfg->rgctx_var) {
4535 g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET && cfg->rgctx_var->inst_basereg == X86_EBP);
4537 x86_mov_membase_reg (code, X86_EBP, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, 4);
4540 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4541 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4543 /* load arguments allocated to register from the stack */
4544 sig = mono_method_signature (method);
4547 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4548 inst = cfg->args [pos];
4549 if (inst->opcode == OP_REGVAR) {
4550 x86_mov_reg_membase (code, inst->dreg, X86_EBP, inst->inst_offset, 4);
4551 if (cfg->verbose_level > 2)
4552 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4557 cfg->code_len = code - cfg->native_code;
4559 g_assert (cfg->code_len < cfg->code_size);
4565 mono_arch_emit_epilog (MonoCompile *cfg)
4567 MonoMethod *method = cfg->method;
4568 MonoMethodSignature *sig = mono_method_signature (method);
4570 guint32 stack_to_pop;
4572 int max_epilog_size = 16;
4575 if (cfg->method->save_lmf)
4576 max_epilog_size += 128;
4578 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4579 cfg->code_size *= 2;
4580 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4581 mono_jit_stats.code_reallocs++;
4584 code = cfg->native_code + cfg->code_len;
4586 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4587 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4589 /* the code restoring the registers must be kept in sync with OP_JMP */
4592 if (method->save_lmf) {
4593 gint32 prev_lmf_reg;
4594 gint32 lmf_offset = -sizeof (MonoLMF);
4596 /* check if we need to restore protection of the stack after a stack overflow */
4597 if (mono_get_jit_tls_offset () != -1) {
4599 code = mono_x86_emit_tls_get (code, X86_ECX, mono_get_jit_tls_offset ());
4600 /* we load the value in a separate instruction: this mechanism may be
4601 * used later as a safer way to do thread interruption
4603 x86_mov_reg_membase (code, X86_ECX, X86_ECX, G_STRUCT_OFFSET (MonoJitTlsData, restore_stack_prot), 4);
4604 x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
4606 x86_branch8 (code, X86_CC_Z, 0, FALSE);
4607 /* note that the call trampoline will preserve eax/edx */
4608 x86_call_reg (code, X86_ECX);
4609 x86_patch (patch, code);
4611 /* FIXME: maybe save the jit tls in the prolog */
4613 if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
4615 * Optimized version which uses the mono_lmf TLS variable instead of indirection
4616 * through the mono_lmf_addr TLS variable.
4618 /* reg = previous_lmf */
4619 x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
4621 /* lmf = previous_lmf */
4622 x86_prefix (code, X86_GS_PREFIX);
4623 x86_mov_mem_reg (code, lmf_tls_offset, X86_ECX, 4);
4625 /* Find a spare register */
4626 switch (mini_type_get_underlying_type (cfg->generic_sharing_context, sig->ret)->type) {
4629 prev_lmf_reg = X86_EDI;
4630 cfg->used_int_regs |= (1 << X86_EDI);
4633 prev_lmf_reg = X86_EDX;
4637 /* reg = previous_lmf */
4638 x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
4641 x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 4);
4643 /* *(lmf) = previous_lmf */
4644 x86_mov_membase_reg (code, X86_ECX, 0, prev_lmf_reg, 4);
4647 /* restore caller saved regs */
4648 if (cfg->used_int_regs & (1 << X86_EBX)) {
4649 x86_mov_reg_membase (code, X86_EBX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), 4);
4652 if (cfg->used_int_regs & (1 << X86_EDI)) {
4653 x86_mov_reg_membase (code, X86_EDI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), 4);
4655 if (cfg->used_int_regs & (1 << X86_ESI)) {
4656 x86_mov_reg_membase (code, X86_ESI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), 4);
4659 /* EBP is restored by LEAVE */
4661 if (cfg->used_int_regs & (1 << X86_EBX)) {
4664 if (cfg->used_int_regs & (1 << X86_EDI)) {
4667 if (cfg->used_int_regs & (1 << X86_ESI)) {
4672 x86_lea_membase (code, X86_ESP, X86_EBP, pos);
4674 if (cfg->used_int_regs & (1 << X86_ESI)) {
4675 x86_pop_reg (code, X86_ESI);
4677 if (cfg->used_int_regs & (1 << X86_EDI)) {
4678 x86_pop_reg (code, X86_EDI);
4680 if (cfg->used_int_regs & (1 << X86_EBX)) {
4681 x86_pop_reg (code, X86_EBX);
4685 /* Load returned vtypes into registers if needed */
4686 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
4687 if (cinfo->ret.storage == ArgValuetypeInReg) {
4688 for (quad = 0; quad < 2; quad ++) {
4689 switch (cinfo->ret.pair_storage [quad]) {
4691 x86_mov_reg_membase (code, cinfo->ret.pair_regs [quad], cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), 4);
4693 case ArgOnFloatFpStack:
4694 x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), FALSE);
4696 case ArgOnDoubleFpStack:
4697 x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), TRUE);
4702 g_assert_not_reached ();
4709 if (CALLCONV_IS_STDCALL (sig)) {
4710 MonoJitArgumentInfo *arg_info = alloca (sizeof (MonoJitArgumentInfo) * (sig->param_count + 1));
4712 stack_to_pop = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
4713 } else if (MONO_TYPE_ISSTRUCT (mono_method_signature (cfg->method)->ret) && (cinfo->ret.storage == ArgOnStack))
4719 x86_ret_imm (code, stack_to_pop);
4723 cfg->code_len = code - cfg->native_code;
4725 g_assert (cfg->code_len < cfg->code_size);
4729 mono_arch_emit_exceptions (MonoCompile *cfg)
4731 MonoJumpInfo *patch_info;
4734 MonoClass *exc_classes [16];
4735 guint8 *exc_throw_start [16], *exc_throw_end [16];
4739 /* Compute needed space */
4740 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4741 if (patch_info->type == MONO_PATCH_INFO_EXC)
4746 * make sure we have enough space for exceptions
4747 * 16 is the size of two push_imm instructions and a call
4749 if (cfg->compile_aot)
4750 code_size = exc_count * 32;
4752 code_size = exc_count * 16;
4754 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4755 cfg->code_size *= 2;
4756 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4757 mono_jit_stats.code_reallocs++;
4760 code = cfg->native_code + cfg->code_len;
4763 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4764 switch (patch_info->type) {
4765 case MONO_PATCH_INFO_EXC: {
4766 MonoClass *exc_class;
4770 x86_patch (patch_info->ip.i + cfg->native_code, code);
4772 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4773 g_assert (exc_class);
4774 throw_ip = patch_info->ip.i;
4776 /* Find a throw sequence for the same exception class */
4777 for (i = 0; i < nthrows; ++i)
4778 if (exc_classes [i] == exc_class)
4781 x86_push_imm (code, (exc_throw_end [i] - cfg->native_code) - throw_ip);
4782 x86_jump_code (code, exc_throw_start [i]);
4783 patch_info->type = MONO_PATCH_INFO_NONE;
4788 /* Compute size of code following the push <OFFSET> */
4791 if ((code - cfg->native_code) - throw_ip < 126 - size) {
4792 /* Use the shorter form */
4794 x86_push_imm (code, 0);
4798 x86_push_imm (code, 0xf0f0f0f0);
4803 exc_classes [nthrows] = exc_class;
4804 exc_throw_start [nthrows] = code;
4807 x86_push_imm (code, exc_class->type_token - MONO_TOKEN_TYPE_DEF);
4808 patch_info->data.name = "mono_arch_throw_corlib_exception";
4809 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4810 patch_info->ip.i = code - cfg->native_code;
4811 x86_call_code (code, 0);
4812 x86_push_imm (buf, (code - cfg->native_code) - throw_ip);
4817 exc_throw_end [nthrows] = code;
4829 cfg->code_len = code - cfg->native_code;
4831 g_assert (cfg->code_len < cfg->code_size);
4835 mono_arch_flush_icache (guint8 *code, gint size)
4841 mono_arch_flush_register_windows (void)
4846 mono_arch_is_inst_imm (gint64 imm)
4852 * Support for fast access to the thread-local lmf structure using the GS
4853 * segment register on NPTL + kernel 2.6.x.
4856 static gboolean tls_offset_inited = FALSE;
4859 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4861 if (!tls_offset_inited) {
4862 if (!getenv ("MONO_NO_TLS")) {
4863 #ifdef PLATFORM_WIN32
4865 * We need to init this multiple times, since when we are first called, the key might not
4866 * be initialized yet.
4868 appdomain_tls_offset = mono_domain_get_tls_key ();
4869 lmf_tls_offset = mono_get_jit_tls_key ();
4870 thread_tls_offset = mono_thread_get_tls_key ();
4872 /* Only 64 tls entries can be accessed using inline code */
4873 if (appdomain_tls_offset >= 64)
4874 appdomain_tls_offset = -1;
4875 if (lmf_tls_offset >= 64)
4876 lmf_tls_offset = -1;
4877 if (thread_tls_offset >= 64)
4878 thread_tls_offset = -1;
4881 optimize_for_xen = access ("/proc/xen", F_OK) == 0;
4883 tls_offset_inited = TRUE;
4884 appdomain_tls_offset = mono_domain_get_tls_offset ();
4885 lmf_tls_offset = mono_get_lmf_tls_offset ();
4886 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4887 thread_tls_offset = mono_thread_get_tls_offset ();
4894 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4899 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4901 MonoCallInst *call = (MonoCallInst*)inst;
4902 CallInfo *cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, inst->signature, FALSE);
4904 /* add the this argument */
4905 if (this_reg != -1) {
4906 if (cinfo->args [0].storage == ArgInIReg) {
4908 MONO_INST_NEW (cfg, this, OP_MOVE);
4909 this->type = this_type;
4910 this->sreg1 = this_reg;
4911 this->dreg = mono_regstate_next_int (cfg->rs);
4912 mono_bblock_add_inst (cfg->cbb, this);
4914 mono_call_inst_add_outarg_reg (cfg, call, this->dreg, cinfo->args [0].reg, FALSE);
4918 MONO_INST_NEW (cfg, this, OP_OUTARG);
4919 this->type = this_type;
4920 this->sreg1 = this_reg;
4921 mono_bblock_add_inst (cfg->cbb, this);
4928 if (cinfo->ret.storage == ArgValuetypeInReg) {
4930 * The valuetype is in EAX:EDX after the call, needs to be copied to
4931 * the stack. Save the address here, so the call instruction can
4934 MONO_INST_NEW (cfg, vtarg, OP_STORE_MEMBASE_REG);
4935 vtarg->inst_destbasereg = X86_ESP;
4936 vtarg->inst_offset = inst->stack_usage;
4937 vtarg->sreg1 = vt_reg;
4938 mono_bblock_add_inst (cfg->cbb, vtarg);
4940 else if (cinfo->ret.storage == ArgInIReg) {
4941 /* The return address is passed in a register */
4942 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
4943 vtarg->sreg1 = vt_reg;
4944 vtarg->dreg = mono_regstate_next_int (cfg->rs);
4945 mono_bblock_add_inst (cfg->cbb, vtarg);
4947 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
4950 MONO_INST_NEW (cfg, vtarg, OP_OUTARG);
4951 vtarg->type = STACK_MP;
4952 vtarg->sreg1 = vt_reg;
4953 mono_bblock_add_inst (cfg->cbb, vtarg);
4958 #ifdef MONO_ARCH_HAVE_IMT
4960 // Linear handler, the bsearch head compare is shorter
4961 //[2 + 4] x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
4962 //[1 + 1] x86_branch8(inst,cond,imm,is_signed)
4963 // x86_patch(ins,target)
4964 //[1 + 5] x86_jump_mem(inst,mem)
4967 #define BR_SMALL_SIZE 2
4968 #define BR_LARGE_SIZE 5
4969 #define JUMP_IMM_SIZE 6
4970 #define ENABLE_WRONG_METHOD_CHECK 0
4973 imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
4975 int i, distance = 0;
4976 for (i = start; i < target; ++i)
4977 distance += imt_entries [i]->chunk_size;
4982 * LOCKING: called with the domain lock held
4985 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4986 gpointer fail_tramp)
4990 guint8 *code, *start;
4992 for (i = 0; i < count; ++i) {
4993 MonoIMTCheckItem *item = imt_entries [i];
4994 if (item->is_equals) {
4995 if (item->check_target_idx) {
4996 if (!item->compare_done)
4997 item->chunk_size += CMP_SIZE;
4998 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
5001 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + JUMP_IMM_SIZE * 2;
5003 item->chunk_size += JUMP_IMM_SIZE;
5004 #if ENABLE_WRONG_METHOD_CHECK
5005 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
5010 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
5011 imt_entries [item->check_target_idx]->compare_done = TRUE;
5013 size += item->chunk_size;
5016 code = mono_method_alloc_generic_virtual_thunk (domain, size);
5018 code = mono_code_manager_reserve (domain->code_mp, size);
5020 for (i = 0; i < count; ++i) {
5021 MonoIMTCheckItem *item = imt_entries [i];
5022 item->code_target = code;
5023 if (item->is_equals) {
5024 if (item->check_target_idx) {
5025 if (!item->compare_done)
5026 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
5027 item->jmp_code = code;
5028 x86_branch8 (code, X86_CC_NE, 0, FALSE);
5030 x86_jump_code (code, item->value.target_code);
5032 x86_jump_mem (code, & (vtable->vtable [item->value.vtable_slot]));
5035 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
5036 item->jmp_code = code;
5037 x86_branch8 (code, X86_CC_NE, 0, FALSE);
5038 x86_jump_code (code, item->value.target_code);
5039 x86_patch (item->jmp_code, code);
5040 x86_jump_code (code, fail_tramp);
5041 item->jmp_code = NULL;
5043 /* enable the commented code to assert on wrong method */
5044 #if ENABLE_WRONG_METHOD_CHECK
5045 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
5046 item->jmp_code = code;
5047 x86_branch8 (code, X86_CC_NE, 0, FALSE);
5049 x86_jump_mem (code, & (vtable->vtable [item->value.vtable_slot]));
5050 #if ENABLE_WRONG_METHOD_CHECK
5051 x86_patch (item->jmp_code, code);
5052 x86_breakpoint (code);
5053 item->jmp_code = NULL;
5058 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
5059 item->jmp_code = code;
5060 if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
5061 x86_branch8 (code, X86_CC_GE, 0, FALSE);
5063 x86_branch32 (code, X86_CC_GE, 0, FALSE);
5066 /* patch the branches to get to the target items */
5067 for (i = 0; i < count; ++i) {
5068 MonoIMTCheckItem *item = imt_entries [i];
5069 if (item->jmp_code) {
5070 if (item->check_target_idx) {
5071 x86_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5077 mono_stats.imt_thunks_size += code - start;
5078 g_assert (code - start <= size);
5083 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
5085 return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
5089 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
5091 MonoMethodSignature *sig = mono_method_signature (method);
5092 CallInfo *cinfo = get_call_info (gsctx, NULL, sig, FALSE);
5093 int this_argument_offset;
5094 MonoObject *this_argument;
5097 * this is the offset of the this arg from esp as saved at the start of
5098 * mono_arch_create_trampoline_code () in tramp-x86.c.
5100 this_argument_offset = 5;
5101 if (MONO_TYPE_ISSTRUCT (sig->ret) && (cinfo->ret.storage == ArgOnStack))
5102 this_argument_offset++;
5104 this_argument = * (MonoObject**) (((guint8*) regs [X86_ESP]) + this_argument_offset * sizeof (gpointer));
5107 return this_argument;
5112 mono_arch_find_static_call_vtable (gpointer *regs, guint8 *code)
5114 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5118 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5120 MonoInst *ins = NULL;
5122 if (cmethod->klass == mono_defaults.math_class) {
5123 if (strcmp (cmethod->name, "Sin") == 0) {
5124 MONO_INST_NEW (cfg, ins, OP_SIN);
5125 ins->inst_i0 = args [0];
5126 } else if (strcmp (cmethod->name, "Cos") == 0) {
5127 MONO_INST_NEW (cfg, ins, OP_COS);
5128 ins->inst_i0 = args [0];
5129 } else if (strcmp (cmethod->name, "Tan") == 0) {
5130 MONO_INST_NEW (cfg, ins, OP_TAN);
5131 ins->inst_i0 = args [0];
5132 } else if (strcmp (cmethod->name, "Atan") == 0) {
5133 MONO_INST_NEW (cfg, ins, OP_ATAN);
5134 ins->inst_i0 = args [0];
5135 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5136 MONO_INST_NEW (cfg, ins, OP_SQRT);
5137 ins->inst_i0 = args [0];
5138 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5139 MONO_INST_NEW (cfg, ins, OP_ABS);
5140 ins->inst_i0 = args [0];
5143 if (cfg->opt & MONO_OPT_CMOV) {
5146 if (strcmp (cmethod->name, "Min") == 0) {
5147 if (fsig->params [0]->type == MONO_TYPE_I4)
5149 else if (fsig->params [0]->type == MONO_TYPE_U4)
5150 opcode = OP_IMIN_UN;
5151 } else if (strcmp (cmethod->name, "Max") == 0) {
5152 if (fsig->params [0]->type == MONO_TYPE_I4)
5154 else if (fsig->params [0]->type == MONO_TYPE_U4)
5155 opcode = OP_IMAX_UN;
5159 MONO_INST_NEW (cfg, ins, opcode);
5160 ins->inst_i0 = args [0];
5161 ins->inst_i1 = args [1];
5166 /* OP_FREM is not IEEE compatible */
5167 else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
5168 MONO_INST_NEW (cfg, ins, OP_FREM);
5169 ins->inst_i0 = args [0];
5170 ins->inst_i1 = args [1];
5179 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5181 MonoInst *ins = NULL;
5184 if (cmethod->klass == mono_defaults.math_class) {
5185 if (strcmp (cmethod->name, "Sin") == 0) {
5187 } else if (strcmp (cmethod->name, "Cos") == 0) {
5189 } else if (strcmp (cmethod->name, "Tan") == 0) {
5191 } else if (strcmp (cmethod->name, "Atan") == 0) {
5193 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5195 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5200 MONO_INST_NEW (cfg, ins, opcode);
5201 ins->type = STACK_R8;
5202 ins->dreg = mono_alloc_freg (cfg);
5203 ins->sreg1 = args [0]->dreg;
5204 MONO_ADD_INS (cfg->cbb, ins);
5207 if (cfg->opt & MONO_OPT_CMOV) {
5210 if (strcmp (cmethod->name, "Min") == 0) {
5211 if (fsig->params [0]->type == MONO_TYPE_I4)
5213 } else if (strcmp (cmethod->name, "Max") == 0) {
5214 if (fsig->params [0]->type == MONO_TYPE_I4)
5219 MONO_INST_NEW (cfg, ins, opcode);
5220 ins->type = STACK_I4;
5221 ins->dreg = mono_alloc_ireg (cfg);
5222 ins->sreg1 = args [0]->dreg;
5223 ins->sreg2 = args [1]->dreg;
5224 MONO_ADD_INS (cfg->cbb, ins);
5229 /* OP_FREM is not IEEE compatible */
5230 else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
5231 MONO_INST_NEW (cfg, ins, OP_FREM);
5232 ins->inst_i0 = args [0];
5233 ins->inst_i1 = args [1];
5242 mono_arch_print_tree (MonoInst *tree, int arity)
5247 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5253 if (appdomain_tls_offset == -1)
5256 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5257 ins->inst_offset = appdomain_tls_offset;
5261 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
5265 if (thread_tls_offset == -1)
5268 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5269 ins->inst_offset = thread_tls_offset;
5274 mono_arch_get_patch_offset (guint8 *code)
5276 if ((code [0] == 0x8b) && (x86_modrm_mod (code [1]) == 0x2))
5278 else if ((code [0] == 0xba))
5280 else if ((code [0] == 0x68))
5283 else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x6))
5284 /* push <OFFSET>(<REG>) */
5286 else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x2))
5287 /* call *<OFFSET>(<REG>) */
5289 else if ((code [0] == 0xdd) || (code [0] == 0xd9))
5292 else if ((code [0] == 0x58) && (code [1] == 0x05))
5293 /* pop %eax; add <OFFSET>, %eax */
5295 else if ((code [0] >= 0x58) && (code [0] <= 0x58 + X86_NREG) && (code [1] == 0x81))
5296 /* pop <REG>; add <OFFSET>, <REG> */
5298 else if ((code [0] >= 0xb8) && (code [0] < 0xb8 + 8))
5299 /* mov <REG>, imm */
5302 g_assert_not_reached ();
5308 * mono_breakpoint_clean_code:
5310 * Copy @size bytes from @code - @offset to the buffer @buf. If the debugger inserted software
5311 * breakpoints in the original code, they are removed in the copy.
5313 * Returns TRUE if no sw breakpoint was present.
5316 mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size)
5319 gboolean can_write = TRUE;
5321 * If method_start is non-NULL we need to perform bound checks, since we access memory
5322 * at code - offset we could go before the start of the method and end up in a different
5323 * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes
5326 if (!method_start || code - offset >= method_start) {
5327 memcpy (buf, code - offset, size);
5329 int diff = code - method_start;
5330 memset (buf, 0, size);
5331 memcpy (buf + offset - diff, method_start, diff + size - offset);
5334 for (i = 0; i < MONO_BREAKPOINT_ARRAY_SIZE; ++i) {
5335 int idx = mono_breakpoint_info_index [i];
5339 ptr = mono_breakpoint_info [idx].address;
5340 if (ptr >= code && ptr < code + size) {
5341 guint8 saved_byte = mono_breakpoint_info [idx].saved_byte;
5343 /*g_print ("patching %p with 0x%02x (was: 0x%02x)\n", ptr, saved_byte, buf [ptr - code]);*/
5344 buf [ptr - code] = saved_byte;
5351 mono_arch_get_vcall_slot (guint8 *code, gpointer *regs, int *displacement)
5357 mono_breakpoint_clean_code (NULL, code, 8, buf, sizeof (buf));
5362 /* go to the start of the call instruction
5364 * address_byte = (m << 6) | (o << 3) | reg
5365 * call opcode: 0xff address_byte displacement
5367 * 0xff m=2,o=2 imm32
5372 * A given byte sequence can match more than case here, so we have to be
5373 * really careful about the ordering of the cases. Longer sequences
5376 if ((code [-2] == 0x8b) && (x86_modrm_mod (code [-1]) == 0x2) && (code [4] == 0xff) && (x86_modrm_reg (code [5]) == 0x2) && (x86_modrm_mod (code [5]) == 0x0)) {
5378 * This is an interface call
5379 * 8b 80 0c e8 ff ff mov 0xffffe80c(%eax),%eax
5380 * ff 10 call *(%eax)
5382 reg = x86_modrm_rm (code [5]);
5384 #ifdef MONO_ARCH_HAVE_IMT
5385 } else if ((code [-2] == 0xba) && (code [3] == 0xff) && (x86_modrm_mod (code [4]) == 1) && (x86_modrm_reg (code [4]) == 2) && ((signed char)code [5] < 0)) {
5386 /* IMT-based interface calls: with MONO_ARCH_IMT_REG == edx
5387 * ba 14 f8 28 08 mov $0x828f814,%edx
5388 * ff 50 fc call *0xfffffffc(%eax)
5390 reg = code [4] & 0x07;
5391 disp = (signed char)code [5];
5393 } else if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
5394 reg = code [4] & 0x07;
5395 disp = (signed char)code [5];
5397 if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
5398 reg = code [1] & 0x07;
5399 disp = *((gint32*)(code + 2));
5400 } else if ((code [1] == 0xe8)) {
5402 } else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
5404 * This is a interface call
5405 * 8b 40 30 mov 0x30(%eax),%eax
5406 * ff 10 call *(%eax)
5409 reg = code [5] & 0x07;
5415 *displacement = disp;
5420 mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
5424 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
5427 return (gpointer*)((char*)vt + displacement);
5431 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig,
5432 gssize *regs, guint8 *code)
5434 guint32 esp = regs [X86_ESP];
5439 gsctx = mono_get_generic_context_from_code (code);
5440 cinfo = get_call_info (gsctx, NULL, sig, FALSE);
5443 * The stack looks like:
5446 * <possible vtype return address>
5448 * <4 pointers pushed by mono_arch_create_trampoline_code ()>
5450 res = (((MonoObject**)esp) [5 + (cinfo->args [0].offset / 4)]);
5455 #define MAX_ARCH_DELEGATE_PARAMS 10
5458 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
5460 guint8 *code, *start;
5462 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
5465 /* FIXME: Support more cases */
5466 if (MONO_TYPE_ISSTRUCT (sig->ret))
5470 * The stack contains:
5476 static guint8* cached = NULL;
5480 start = code = mono_global_codeman_reserve (64);
5482 /* Replace the this argument with the target */
5483 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
5484 x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, target), 4);
5485 x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4);
5486 x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
5488 g_assert ((code - start) < 64);
5490 mono_debug_add_delegate_trampoline (start, code - start);
5492 mono_memory_barrier ();
5496 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
5498 /* 8 for mov_reg and jump, plus 8 for each parameter */
5499 int code_reserve = 8 + (sig->param_count * 8);
5501 for (i = 0; i < sig->param_count; ++i)
5502 if (!mono_is_regsize_var (sig->params [i]))
5505 code = cache [sig->param_count];
5510 * The stack contains:
5511 * <args in reverse order>
5516 * <args in reverse order>
5519 * without unbalancing the stack.
5520 * So move each arg up a spot in the stack (overwriting un-needed 'this' arg)
5521 * and leaving original spot of first arg as placeholder in stack so
5522 * when callee pops stack everything works.
5525 start = code = mono_global_codeman_reserve (code_reserve);
5527 /* store delegate for access to method_ptr */
5528 x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4);
5531 for (i = 0; i < sig->param_count; ++i) {
5532 x86_mov_reg_membase (code, X86_EAX, X86_ESP, (i+2)*4, 4);
5533 x86_mov_membase_reg (code, X86_ESP, (i+1)*4, X86_EAX, 4);
5536 x86_jump_membase (code, X86_ECX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
5538 g_assert ((code - start) < code_reserve);
5540 mono_debug_add_delegate_trampoline (start, code - start);
5542 mono_memory_barrier ();
5544 cache [sig->param_count] = start;
5551 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5554 case X86_ECX: return (gpointer)ctx->ecx;
5555 case X86_EDX: return (gpointer)ctx->edx;
5556 case X86_EBP: return (gpointer)ctx->ebp;
5557 case X86_ESP: return (gpointer)ctx->esp;
5558 default: return ((gpointer)(&ctx->eax)[reg]);
5562 #ifdef MONO_ARCH_SIMD_INTRINSICS
5565 get_float_to_x_spill_area (MonoCompile *cfg)
5567 if (!cfg->fconv_to_r8_x_var) {
5568 cfg->fconv_to_r8_x_var = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
5569 cfg->fconv_to_r8_x_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
5571 return cfg->fconv_to_r8_x_var;
5575 * Convert all fconv opts that MONO_OPT_SSE2 would get wrong.
5578 mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
5582 int dreg, src_opcode;
5583 g_assert (cfg->new_ir);
5585 if (!(cfg->opt & MONO_OPT_SSE2) || !(cfg->opt & MONO_OPT_SIMD))
5588 switch (src_opcode = ins->opcode) {
5589 case OP_FCONV_TO_I1:
5590 case OP_FCONV_TO_U1:
5591 case OP_FCONV_TO_I2:
5592 case OP_FCONV_TO_U2:
5593 case OP_FCONV_TO_I4:
5600 /* dreg is the IREG and sreg1 is the FREG */
5601 MONO_INST_NEW (cfg, fconv, OP_FCONV_TO_R8_X);
5602 fconv->klass = NULL; /*FIXME, what can I use here as the Mono.Simd lib might not be loaded yet*/
5603 fconv->sreg1 = ins->sreg1;
5604 fconv->dreg = mono_alloc_ireg (cfg);
5605 fconv->type = STACK_VTYPE;
5606 fconv->backend.spill_var = get_float_to_x_spill_area (cfg);
5608 mono_bblock_insert_before_ins (cfg->cbb, ins, fconv);
5612 ins->opcode = OP_XCONV_R8_TO_I4;
5614 ins->klass = mono_defaults.int32_class;
5615 ins->sreg1 = fconv->dreg;
5617 ins->type = STACK_I4;
5618 ins->backend.source_opcode = src_opcode;