2 * mini-x86.c: x86 backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/debug-helpers.h>
20 #include <mono/metadata/threads.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/metadata/mono-debug.h>
23 #include <mono/utils/mono-math.h>
24 #include <mono/utils/mono-counters.h>
31 /* On windows, these hold the key returned by TlsAlloc () */
32 static gint lmf_tls_offset = -1;
33 static gint lmf_addr_tls_offset = -1;
34 static gint appdomain_tls_offset = -1;
35 static gint thread_tls_offset = -1;
38 static gboolean optimize_for_xen = TRUE;
40 #define optimize_for_xen 0
44 static gboolean is_win32 = TRUE;
46 static gboolean is_win32 = FALSE;
49 /* This mutex protects architecture specific caches */
50 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
51 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
52 static CRITICAL_SECTION mini_arch_mutex;
54 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
59 /* Under windows, the default pinvoke calling convention is stdcall */
60 #define CALLCONV_IS_STDCALL(sig) ((((sig)->call_convention) == MONO_CALL_STDCALL) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_DEFAULT))
62 #define CALLCONV_IS_STDCALL(sig) (((sig)->call_convention) == MONO_CALL_STDCALL)
66 mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
69 mono_arch_regname (int reg)
72 case X86_EAX: return "%eax";
73 case X86_EBX: return "%ebx";
74 case X86_ECX: return "%ecx";
75 case X86_EDX: return "%edx";
76 case X86_ESP: return "%esp";
77 case X86_EBP: return "%ebp";
78 case X86_EDI: return "%edi";
79 case X86_ESI: return "%esi";
85 mono_arch_fregname (int reg)
110 mono_arch_xregname (int reg)
151 /* Only if storage == ArgValuetypeInReg */
152 ArgStorage pair_storage [2];
161 gboolean need_stack_align;
162 guint32 stack_align_amount;
170 #define FLOAT_PARAM_REGS 0
172 static X86_Reg_No param_regs [] = { 0 };
174 #if defined(PLATFORM_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
175 #define SMALL_STRUCTS_IN_REGS
176 static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX };
180 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
182 ainfo->offset = *stack_size;
184 if (*gr >= PARAM_REGS) {
185 ainfo->storage = ArgOnStack;
186 (*stack_size) += sizeof (gpointer);
189 ainfo->storage = ArgInIReg;
190 ainfo->reg = param_regs [*gr];
196 add_general_pair (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
198 ainfo->offset = *stack_size;
200 g_assert (PARAM_REGS == 0);
202 ainfo->storage = ArgOnStack;
203 (*stack_size) += sizeof (gpointer) * 2;
207 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
209 ainfo->offset = *stack_size;
211 if (*gr >= FLOAT_PARAM_REGS) {
212 ainfo->storage = ArgOnStack;
213 (*stack_size) += is_double ? 8 : 4;
216 /* A double register */
218 ainfo->storage = ArgInDoubleSSEReg;
220 ainfo->storage = ArgInFloatSSEReg;
228 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
230 guint32 *gr, guint32 *fr, guint32 *stack_size)
235 klass = mono_class_from_mono_type (type);
236 size = mini_type_stack_size_full (gsctx, &klass->byval_arg, NULL, sig->pinvoke);
238 #ifdef SMALL_STRUCTS_IN_REGS
239 if (sig->pinvoke && is_return) {
240 MonoMarshalType *info;
243 * the exact rules are not very well documented, the code below seems to work with the
244 * code generated by gcc 3.3.3 -mno-cygwin.
246 info = mono_marshal_load_type_info (klass);
249 ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
251 /* Special case structs with only a float member */
252 if ((info->native_size == 8) && (info->num_fields == 1) && (info->fields [0].field->type->type == MONO_TYPE_R8)) {
253 ainfo->storage = ArgValuetypeInReg;
254 ainfo->pair_storage [0] = ArgOnDoubleFpStack;
257 if ((info->native_size == 4) && (info->num_fields == 1) && (info->fields [0].field->type->type == MONO_TYPE_R4)) {
258 ainfo->storage = ArgValuetypeInReg;
259 ainfo->pair_storage [0] = ArgOnFloatFpStack;
262 if ((info->native_size == 1) || (info->native_size == 2) || (info->native_size == 4) || (info->native_size == 8)) {
263 ainfo->storage = ArgValuetypeInReg;
264 ainfo->pair_storage [0] = ArgInIReg;
265 ainfo->pair_regs [0] = return_regs [0];
266 if (info->native_size > 4) {
267 ainfo->pair_storage [1] = ArgInIReg;
268 ainfo->pair_regs [1] = return_regs [1];
275 ainfo->offset = *stack_size;
276 ainfo->storage = ArgOnStack;
277 *stack_size += ALIGN_TO (size, sizeof (gpointer));
283 * Obtain information about a call according to the calling convention.
284 * For x86 ELF, see the "System V Application Binary Interface Intel386
285 * Architecture Processor Supplment, Fourth Edition" document for more
287 * For x86 win32, see ???.
290 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
294 int n = sig->hasthis + sig->param_count;
295 guint32 stack_size = 0;
299 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
301 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
308 ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
309 switch (ret_type->type) {
310 case MONO_TYPE_BOOLEAN:
321 case MONO_TYPE_FNPTR:
322 case MONO_TYPE_CLASS:
323 case MONO_TYPE_OBJECT:
324 case MONO_TYPE_SZARRAY:
325 case MONO_TYPE_ARRAY:
326 case MONO_TYPE_STRING:
327 cinfo->ret.storage = ArgInIReg;
328 cinfo->ret.reg = X86_EAX;
332 cinfo->ret.storage = ArgInIReg;
333 cinfo->ret.reg = X86_EAX;
336 cinfo->ret.storage = ArgOnFloatFpStack;
339 cinfo->ret.storage = ArgOnDoubleFpStack;
341 case MONO_TYPE_GENERICINST:
342 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
343 cinfo->ret.storage = ArgInIReg;
344 cinfo->ret.reg = X86_EAX;
348 case MONO_TYPE_VALUETYPE: {
349 guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
351 add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
352 if (cinfo->ret.storage == ArgOnStack)
353 /* The caller passes the address where the value is stored */
354 add_general (&gr, &stack_size, &cinfo->ret);
357 case MONO_TYPE_TYPEDBYREF:
358 /* Same as a valuetype with size 24 */
359 add_general (&gr, &stack_size, &cinfo->ret);
363 cinfo->ret.storage = ArgNone;
366 g_error ("Can't handle as return value 0x%x", sig->ret->type);
372 add_general (&gr, &stack_size, cinfo->args + 0);
374 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
376 fr = FLOAT_PARAM_REGS;
378 /* Emit the signature cookie just before the implicit arguments */
379 add_general (&gr, &stack_size, &cinfo->sig_cookie);
382 for (i = 0; i < sig->param_count; ++i) {
383 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
386 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
387 /* We allways pass the sig cookie on the stack for simplicity */
389 * Prevent implicit arguments + the sig cookie from being passed
393 fr = FLOAT_PARAM_REGS;
395 /* Emit the signature cookie just before the implicit arguments */
396 add_general (&gr, &stack_size, &cinfo->sig_cookie);
399 if (sig->params [i]->byref) {
400 add_general (&gr, &stack_size, ainfo);
403 ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
404 switch (ptype->type) {
405 case MONO_TYPE_BOOLEAN:
408 add_general (&gr, &stack_size, ainfo);
413 add_general (&gr, &stack_size, ainfo);
417 add_general (&gr, &stack_size, ainfo);
422 case MONO_TYPE_FNPTR:
423 case MONO_TYPE_CLASS:
424 case MONO_TYPE_OBJECT:
425 case MONO_TYPE_STRING:
426 case MONO_TYPE_SZARRAY:
427 case MONO_TYPE_ARRAY:
428 add_general (&gr, &stack_size, ainfo);
430 case MONO_TYPE_GENERICINST:
431 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
432 add_general (&gr, &stack_size, ainfo);
436 case MONO_TYPE_VALUETYPE:
437 add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
439 case MONO_TYPE_TYPEDBYREF:
440 stack_size += sizeof (MonoTypedRef);
441 ainfo->storage = ArgOnStack;
445 add_general_pair (&gr, &stack_size, ainfo);
448 add_float (&fr, &stack_size, ainfo, FALSE);
451 add_float (&fr, &stack_size, ainfo, TRUE);
454 g_error ("unexpected type 0x%x", ptype->type);
455 g_assert_not_reached ();
459 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
461 fr = FLOAT_PARAM_REGS;
463 /* Emit the signature cookie just before the implicit arguments */
464 add_general (&gr, &stack_size, &cinfo->sig_cookie);
467 if (mono_do_x86_stack_align && (stack_size % MONO_ARCH_FRAME_ALIGNMENT) != 0) {
468 cinfo->need_stack_align = TRUE;
469 cinfo->stack_align_amount = MONO_ARCH_FRAME_ALIGNMENT - (stack_size % MONO_ARCH_FRAME_ALIGNMENT);
470 stack_size += cinfo->stack_align_amount;
473 cinfo->stack_usage = stack_size;
474 cinfo->reg_usage = gr;
475 cinfo->freg_usage = fr;
480 * mono_arch_get_argument_info:
481 * @csig: a method signature
482 * @param_count: the number of parameters to consider
483 * @arg_info: an array to store the result infos
485 * Gathers information on parameters such as size, alignment and
486 * padding. arg_info should be large enought to hold param_count + 1 entries.
488 * Returns the size of the argument area on the stack.
491 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
493 int k, args_size = 0;
499 cinfo = get_call_info (NULL, NULL, csig, FALSE);
501 if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
502 args_size += sizeof (gpointer);
506 arg_info [0].offset = offset;
509 args_size += sizeof (gpointer);
513 arg_info [0].size = args_size;
515 for (k = 0; k < param_count; k++) {
516 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
518 /* ignore alignment for now */
521 args_size += pad = (align - (args_size & (align - 1))) & (align - 1);
522 arg_info [k].pad = pad;
524 arg_info [k + 1].pad = 0;
525 arg_info [k + 1].size = size;
527 arg_info [k + 1].offset = offset;
531 if (mono_do_x86_stack_align && !CALLCONV_IS_STDCALL (csig))
532 align = MONO_ARCH_FRAME_ALIGNMENT;
535 args_size += pad = (align - (args_size & (align - 1))) & (align - 1);
536 arg_info [k].pad = pad;
543 static const guchar cpuid_impl [] = {
544 0x55, /* push %ebp */
545 0x89, 0xe5, /* mov %esp,%ebp */
546 0x53, /* push %ebx */
547 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
548 0x0f, 0xa2, /* cpuid */
549 0x50, /* push %eax */
550 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
551 0x89, 0x18, /* mov %ebx,(%eax) */
552 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
553 0x89, 0x08, /* mov %ecx,(%eax) */
554 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
555 0x89, 0x10, /* mov %edx,(%eax) */
557 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
558 0x89, 0x02, /* mov %eax,(%edx) */
564 typedef void (*CpuidFunc) (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx);
567 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
571 __asm__ __volatile__ (
574 "movl %%eax, %%edx\n"
575 "xorl $0x200000, %%eax\n"
580 "xorl %%edx, %%eax\n"
581 "andl $0x200000, %%eax\n"
603 /* Have to use the code manager to get around WinXP DEP */
604 static CpuidFunc func = NULL;
607 ptr = mono_global_codeman_reserve (sizeof (cpuid_impl));
608 memcpy (ptr, cpuid_impl, sizeof (cpuid_impl));
609 func = (CpuidFunc)ptr;
611 func (id, p_eax, p_ebx, p_ecx, p_edx);
614 * We use this approach because of issues with gcc and pic code, see:
615 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
616 __asm__ __volatile__ ("cpuid"
617 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
626 * Initialize the cpu to execute managed code.
629 mono_arch_cpu_init (void)
631 /* spec compliance requires running with double precision */
635 __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
636 fpcw &= ~X86_FPCW_PRECC_MASK;
637 fpcw |= X86_FPCW_PREC_DOUBLE;
638 __asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw));
639 __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
641 _control87 (_PC_53, MCW_PC);
646 * Initialize architecture specific code.
649 mono_arch_init (void)
651 InitializeCriticalSection (&mini_arch_mutex);
655 * Cleanup architecture specific code.
658 mono_arch_cleanup (void)
660 DeleteCriticalSection (&mini_arch_mutex);
664 * This function returns the optimizations supported on this cpu.
667 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
669 int eax, ebx, ecx, edx;
673 /* Feature Flags function, flags returned in EDX. */
674 if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
675 if (edx & (1 << 15)) {
676 opts |= MONO_OPT_CMOV;
678 opts |= MONO_OPT_FCMOV;
680 *exclude_mask |= MONO_OPT_FCMOV;
682 *exclude_mask |= MONO_OPT_CMOV;
684 opts |= MONO_OPT_SSE2;
686 *exclude_mask |= MONO_OPT_SSE2;
688 #ifdef MONO_ARCH_SIMD_INTRINSICS
689 /*SIMD intrinsics require at least SSE2.*/
690 if (!(opts & MONO_OPT_SSE2))
691 *exclude_mask |= MONO_OPT_SIMD;
698 * This function test for all SSE functions supported.
700 * Returns a bitmask corresponding to all supported versions.
702 * TODO detect other versions like SSE4a.
705 mono_arch_cpu_enumerate_simd_versions (void)
707 int eax, ebx, ecx, edx;
708 guint32 sse_opts = 0;
710 if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
712 sse_opts |= 1 << SIMD_VERSION_SSE1;
714 sse_opts |= 1 << SIMD_VERSION_SSE2;
716 sse_opts |= 1 << SIMD_VERSION_SSE3;
718 sse_opts |= 1 << SIMD_VERSION_SSSE3;
720 sse_opts |= 1 << SIMD_VERSION_SSE41;
722 sse_opts |= 1 << SIMD_VERSION_SSE42;
728 * Determine whenever the trap whose info is in SIGINFO is caused by
732 mono_arch_is_int_overflow (void *sigctx, void *info)
737 mono_arch_sigctx_to_monoctx (sigctx, &ctx);
739 ip = (guint8*)ctx.eip;
741 if ((ip [0] == 0xf7) && (x86_modrm_mod (ip [1]) == 0x3) && (x86_modrm_reg (ip [1]) == 0x7)) {
745 switch (x86_modrm_rm (ip [1])) {
765 g_assert_not_reached ();
777 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
782 for (i = 0; i < cfg->num_varinfo; i++) {
783 MonoInst *ins = cfg->varinfo [i];
784 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
787 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
790 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
791 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
794 /* we dont allocate I1 to registers because there is no simply way to sign extend
795 * 8bit quantities in caller saved registers on x86 */
796 if (mono_is_regsize_var (ins->inst_vtype) && (ins->inst_vtype->type != MONO_TYPE_I1)) {
797 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
798 g_assert (i == vmv->idx);
799 vars = g_list_prepend (vars, vmv);
803 vars = mono_varlist_sort (cfg, vars, 0);
809 mono_arch_get_global_int_regs (MonoCompile *cfg)
813 /* we can use 3 registers for global allocation */
814 regs = g_list_prepend (regs, (gpointer)X86_EBX);
815 regs = g_list_prepend (regs, (gpointer)X86_ESI);
816 regs = g_list_prepend (regs, (gpointer)X86_EDI);
822 * mono_arch_regalloc_cost:
824 * Return the cost, in number of memory references, of the action of
825 * allocating the variable VMV into a register during global register
829 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
831 MonoInst *ins = cfg->varinfo [vmv->idx];
833 if (cfg->method->save_lmf)
834 /* The register is already saved */
835 return (ins->opcode == OP_ARG) ? 1 : 0;
837 /* push+pop+possible load if it is an argument */
838 return (ins->opcode == OP_ARG) ? 3 : 2;
842 * Set var information according to the calling convention. X86 version.
843 * The locals var stuff should most likely be split in another method.
846 mono_arch_allocate_vars (MonoCompile *cfg)
848 MonoMethodSignature *sig;
849 MonoMethodHeader *header;
851 guint32 locals_stack_size, locals_stack_align;
856 header = mono_method_get_header (cfg->method);
857 sig = mono_method_signature (cfg->method);
859 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
861 cfg->frame_reg = X86_EBP;
864 /* Reserve space to save LMF and caller saved registers */
866 if (cfg->method->save_lmf) {
867 offset += sizeof (MonoLMF);
869 if (cfg->used_int_regs & (1 << X86_EBX)) {
873 if (cfg->used_int_regs & (1 << X86_EDI)) {
877 if (cfg->used_int_regs & (1 << X86_ESI)) {
882 switch (cinfo->ret.storage) {
883 case ArgValuetypeInReg:
884 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
886 cfg->ret->opcode = OP_REGOFFSET;
887 cfg->ret->inst_basereg = X86_EBP;
888 cfg->ret->inst_offset = - offset;
894 /* Allocate locals */
895 offsets = mono_allocate_stack_slots (cfg, &locals_stack_size, &locals_stack_align);
896 if (locals_stack_align) {
897 offset += (locals_stack_align - 1);
898 offset &= ~(locals_stack_align - 1);
901 * EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we
902 * have locals larger than 8 bytes we need to make sure that
903 * they have the appropriate offset.
905 if (MONO_ARCH_FRAME_ALIGNMENT > 8 && locals_stack_align > 8)
906 offset += MONO_ARCH_FRAME_ALIGNMENT - sizeof (gpointer) * 2;
907 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
908 if (offsets [i] != -1) {
909 MonoInst *inst = cfg->varinfo [i];
910 inst->opcode = OP_REGOFFSET;
911 inst->inst_basereg = X86_EBP;
912 inst->inst_offset = - (offset + offsets [i]);
913 //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
916 offset += locals_stack_size;
920 * Allocate arguments+return value
923 switch (cinfo->ret.storage) {
925 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
927 * In the new IR, the cfg->vret_addr variable represents the
928 * vtype return value.
930 cfg->vret_addr->opcode = OP_REGOFFSET;
931 cfg->vret_addr->inst_basereg = cfg->frame_reg;
932 cfg->vret_addr->inst_offset = cinfo->ret.offset + ARGS_OFFSET;
933 if (G_UNLIKELY (cfg->verbose_level > 1)) {
934 printf ("vret_addr =");
935 mono_print_ins (cfg->vret_addr);
938 cfg->ret->opcode = OP_REGOFFSET;
939 cfg->ret->inst_basereg = X86_EBP;
940 cfg->ret->inst_offset = cinfo->ret.offset + ARGS_OFFSET;
943 case ArgValuetypeInReg:
946 cfg->ret->opcode = OP_REGVAR;
947 cfg->ret->inst_c0 = cinfo->ret.reg;
948 cfg->ret->dreg = cinfo->ret.reg;
951 case ArgOnFloatFpStack:
952 case ArgOnDoubleFpStack:
955 g_assert_not_reached ();
958 if (sig->call_convention == MONO_CALL_VARARG) {
959 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
960 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
963 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
964 ArgInfo *ainfo = &cinfo->args [i];
965 inst = cfg->args [i];
966 if (inst->opcode != OP_REGVAR) {
967 inst->opcode = OP_REGOFFSET;
968 inst->inst_basereg = X86_EBP;
970 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
973 offset += (MONO_ARCH_FRAME_ALIGNMENT - 1);
974 offset &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
976 cfg->stack_offset = offset;
980 mono_arch_create_vars (MonoCompile *cfg)
982 MonoMethodSignature *sig;
985 sig = mono_method_signature (cfg->method);
987 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
989 if (cinfo->ret.storage == ArgValuetypeInReg)
990 cfg->ret_var_is_local = TRUE;
991 if ((cinfo->ret.storage != ArgValuetypeInReg) && MONO_TYPE_ISSTRUCT (sig->ret)) {
992 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
997 * It is expensive to adjust esp for each individual fp argument pushed on the stack
998 * so we try to do it just once when we have multiple fp arguments in a row.
999 * We don't use this mechanism generally because for int arguments the generated code
1000 * is slightly bigger and new generation cpus optimize away the dependency chains
1001 * created by push instructions on the esp value.
1002 * fp_arg_setup is the first argument in the execution sequence where the esp register
1005 static G_GNUC_UNUSED int
1006 collect_fp_stack_space (MonoMethodSignature *sig, int start_arg, int *fp_arg_setup)
1011 for (; start_arg < sig->param_count; ++start_arg) {
1012 t = mini_type_get_underlying_type (NULL, sig->params [start_arg]);
1013 if (!t->byref && t->type == MONO_TYPE_R8) {
1014 fp_space += sizeof (double);
1015 *fp_arg_setup = start_arg;
1024 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1026 MonoMethodSignature *tmp_sig;
1028 /* FIXME: Add support for signature tokens to AOT */
1029 cfg->disable_aot = TRUE;
1032 * mono_ArgIterator_Setup assumes the signature cookie is
1033 * passed first and all the arguments which were before it are
1034 * passed on the stack after the signature. So compensate by
1035 * passing a different signature.
1037 tmp_sig = mono_metadata_signature_dup (call->signature);
1038 tmp_sig->param_count -= call->signature->sentinelpos;
1039 tmp_sig->sentinelpos = 0;
1040 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1042 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_X86_PUSH_IMM, -1, -1, tmp_sig);
1046 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1049 MonoMethodSignature *sig;
1052 int sentinelpos = 0;
1054 sig = call->signature;
1055 n = sig->param_count + sig->hasthis;
1057 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
1059 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1060 sentinelpos = sig->sentinelpos + (sig->hasthis ? 1 : 0);
1062 if (cinfo->need_stack_align) {
1063 MONO_INST_NEW (cfg, arg, OP_SUB_IMM);
1064 arg->dreg = X86_ESP;
1065 arg->sreg1 = X86_ESP;
1066 arg->inst_imm = cinfo->stack_align_amount;
1067 MONO_ADD_INS (cfg->cbb, arg);
1070 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1073 if (cinfo->ret.storage == ArgValuetypeInReg) {
1074 if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
1076 * Tell the JIT to use a more efficient calling convention: call using
1077 * OP_CALL, compute the result location after the call, and save the
1080 call->vret_in_reg = TRUE;
1083 * The valuetype is in EAX:EDX after the call, needs to be copied to
1084 * the stack. Save the address here, so the call instruction can
1087 MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
1088 vtarg->sreg1 = call->vret_var->dreg;
1089 MONO_ADD_INS (cfg->cbb, vtarg);
1094 /* Handle the case where there are no implicit arguments */
1095 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) {
1096 emit_sig_cookie (cfg, call, cinfo);
1099 /* Arguments are pushed in the reverse order */
1100 for (i = n - 1; i >= 0; i --) {
1101 ArgInfo *ainfo = cinfo->args + i;
1104 if (i >= sig->hasthis)
1105 t = sig->params [i - sig->hasthis];
1107 t = &mono_defaults.int_class->byval_arg;
1108 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
1110 MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
1112 in = call->args [i];
1113 arg->cil_code = in->cil_code;
1114 arg->sreg1 = in->dreg;
1115 arg->type = in->type;
1117 g_assert (in->dreg != -1);
1119 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) {
1123 g_assert (in->klass);
1125 if (t->type == MONO_TYPE_TYPEDBYREF) {
1126 size = sizeof (MonoTypedRef);
1127 align = sizeof (gpointer);
1130 size = mini_type_stack_size_full (cfg->generic_sharing_context, &in->klass->byval_arg, &align, sig->pinvoke);
1134 arg->opcode = OP_OUTARG_VT;
1135 arg->sreg1 = in->dreg;
1136 arg->klass = in->klass;
1137 arg->backend.size = size;
1139 MONO_ADD_INS (cfg->cbb, arg);
1143 switch (ainfo->storage) {
1145 arg->opcode = OP_X86_PUSH;
1147 if (t->type == MONO_TYPE_R4) {
1148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 4);
1149 arg->opcode = OP_STORER4_MEMBASE_REG;
1150 arg->inst_destbasereg = X86_ESP;
1151 arg->inst_offset = 0;
1152 } else if (t->type == MONO_TYPE_R8) {
1153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
1154 arg->opcode = OP_STORER8_MEMBASE_REG;
1155 arg->inst_destbasereg = X86_ESP;
1156 arg->inst_offset = 0;
1157 } else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8) {
1159 MONO_EMIT_NEW_UNALU (cfg, OP_X86_PUSH, -1, in->dreg + 2);
1164 g_assert_not_reached ();
1167 MONO_ADD_INS (cfg->cbb, arg);
1170 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) {
1171 /* Emit the signature cookie just before the implicit arguments */
1172 emit_sig_cookie (cfg, call, cinfo);
1176 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1179 if (cinfo->ret.storage == ArgValuetypeInReg) {
1182 else if (cinfo->ret.storage == ArgInIReg) {
1184 /* The return address is passed in a register */
1185 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1186 vtarg->sreg1 = call->inst.dreg;
1187 vtarg->dreg = mono_alloc_ireg (cfg);
1188 MONO_ADD_INS (cfg->cbb, vtarg);
1190 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1193 MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
1194 vtarg->type = STACK_MP;
1195 vtarg->sreg1 = call->vret_var->dreg;
1196 MONO_ADD_INS (cfg->cbb, vtarg);
1199 /* if the function returns a struct, the called method already does a ret $0x4 */
1200 cinfo->stack_usage -= 4;
1203 call->stack_usage = cinfo->stack_usage;
1207 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1210 int size = ins->backend.size;
1213 MONO_INST_NEW (cfg, arg, OP_X86_PUSH_MEMBASE);
1214 arg->sreg1 = src->dreg;
1216 MONO_ADD_INS (cfg->cbb, arg);
1217 } else if (size <= 20) {
1218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, ALIGN_TO (size, 4));
1219 mini_emit_memcpy (cfg, X86_ESP, 0, src->dreg, 0, size, 4);
1221 MONO_INST_NEW (cfg, arg, OP_X86_PUSH_OBJ);
1222 arg->inst_basereg = src->dreg;
1223 arg->inst_offset = 0;
1224 arg->inst_imm = size;
1226 MONO_ADD_INS (cfg->cbb, arg);
1231 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1233 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1236 if (ret->type == MONO_TYPE_R4) {
1239 } else if (ret->type == MONO_TYPE_R8) {
1242 } else if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1243 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EAX, val->dreg + 1);
1244 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EDX, val->dreg + 2);
1249 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1253 * Allow tracing to work with this interface (with an optional argument)
1256 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1260 g_assert (MONO_ARCH_FRAME_ALIGNMENT >= 8);
1261 x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 8);
1263 /* if some args are passed in registers, we need to save them here */
1264 x86_push_reg (code, X86_EBP);
1266 if (cfg->compile_aot) {
1267 x86_push_imm (code, cfg->method);
1268 x86_mov_reg_imm (code, X86_EAX, func);
1269 x86_call_reg (code, X86_EAX);
1271 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
1272 x86_push_imm (code, cfg->method);
1273 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
1274 x86_call_code (code, 0);
1276 x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT);
1290 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1293 int arg_size = 0, save_mode = SAVE_NONE;
1294 MonoMethod *method = cfg->method;
1296 switch (mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type) {
1297 case MONO_TYPE_VOID:
1298 /* special case string .ctor icall */
1299 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1300 save_mode = SAVE_EAX;
1302 save_mode = SAVE_NONE;
1306 save_mode = SAVE_EAX_EDX;
1310 save_mode = SAVE_FP;
1312 case MONO_TYPE_GENERICINST:
1313 if (!mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
1314 save_mode = SAVE_EAX;
1318 case MONO_TYPE_VALUETYPE:
1319 save_mode = SAVE_STRUCT;
1322 save_mode = SAVE_EAX;
1326 switch (save_mode) {
1328 x86_push_reg (code, X86_EDX);
1329 x86_push_reg (code, X86_EAX);
1330 if (enable_arguments) {
1331 x86_push_reg (code, X86_EDX);
1332 x86_push_reg (code, X86_EAX);
1337 x86_push_reg (code, X86_EAX);
1338 if (enable_arguments) {
1339 x86_push_reg (code, X86_EAX);
1344 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1345 x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
1346 if (enable_arguments) {
1347 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1348 x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
1353 if (enable_arguments) {
1354 x86_push_membase (code, X86_EBP, 8);
1363 if (cfg->compile_aot) {
1364 x86_push_imm (code, method);
1365 x86_mov_reg_imm (code, X86_EAX, func);
1366 x86_call_reg (code, X86_EAX);
1368 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
1369 x86_push_imm (code, method);
1370 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
1371 x86_call_code (code, 0);
1373 x86_alu_reg_imm (code, X86_ADD, X86_ESP, arg_size + 4);
1375 switch (save_mode) {
1377 x86_pop_reg (code, X86_EAX);
1378 x86_pop_reg (code, X86_EDX);
1381 x86_pop_reg (code, X86_EAX);
1384 x86_fld_membase (code, X86_ESP, 0, TRUE);
1385 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
1395 #define EMIT_COND_BRANCH(ins,cond,sign) \
1396 if (ins->flags & MONO_INST_BRLABEL) { \
1397 if (ins->inst_i0->inst_c0) { \
1398 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
1400 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1401 if ((cfg->opt & MONO_OPT_BRANCH) && \
1402 x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
1403 x86_branch8 (code, cond, 0, sign); \
1405 x86_branch32 (code, cond, 0, sign); \
1408 if (ins->inst_true_bb->native_offset) { \
1409 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1411 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1412 if ((cfg->opt & MONO_OPT_BRANCH) && \
1413 x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1414 x86_branch8 (code, cond, 0, sign); \
1416 x86_branch32 (code, cond, 0, sign); \
1421 * Emit an exception if condition is fail and
1422 * if possible do a directly branch to target
1424 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
1426 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1427 if (tins == NULL) { \
1428 mono_add_patch_info (cfg, code - cfg->native_code, \
1429 MONO_PATCH_INFO_EXC, exc_name); \
1430 x86_branch32 (code, cond, 0, signed); \
1432 EMIT_COND_BRANCH (tins, cond, signed); \
1436 #define EMIT_FPCOMPARE(code) do { \
1437 x86_fcompp (code); \
1438 x86_fnstsw (code); \
1443 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
1445 mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
1446 x86_call_code (code, 0);
1451 #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_IADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_ISBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB_IMM)))
1454 * mono_peephole_pass_1:
1456 * Perform peephole opts which should/can be performed before local regalloc
1459 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1463 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1464 MonoInst *last_ins = ins->prev;
1466 switch (ins->opcode) {
1469 if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
1471 * X86_LEA is like ADD, but doesn't have the
1472 * sreg1==dreg restriction.
1474 ins->opcode = OP_X86_LEA_MEMBASE;
1475 ins->inst_basereg = ins->sreg1;
1476 } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1477 ins->opcode = OP_X86_INC_REG;
1481 if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
1482 ins->opcode = OP_X86_LEA_MEMBASE;
1483 ins->inst_basereg = ins->sreg1;
1484 ins->inst_imm = -ins->inst_imm;
1485 } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1486 ins->opcode = OP_X86_DEC_REG;
1488 case OP_COMPARE_IMM:
1489 case OP_ICOMPARE_IMM:
1490 /* OP_COMPARE_IMM (reg, 0)
1492 * OP_X86_TEST_NULL (reg)
1495 ins->opcode = OP_X86_TEST_NULL;
1497 case OP_X86_COMPARE_MEMBASE_IMM:
1499 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1500 * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1502 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1503 * OP_COMPARE_IMM reg, imm
1505 * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1507 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1508 ins->inst_basereg == last_ins->inst_destbasereg &&
1509 ins->inst_offset == last_ins->inst_offset) {
1510 ins->opcode = OP_COMPARE_IMM;
1511 ins->sreg1 = last_ins->sreg1;
1513 /* check if we can remove cmp reg,0 with test null */
1515 ins->opcode = OP_X86_TEST_NULL;
1519 case OP_X86_PUSH_MEMBASE:
1520 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG ||
1521 last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1522 ins->inst_basereg == last_ins->inst_destbasereg &&
1523 ins->inst_offset == last_ins->inst_offset) {
1524 ins->opcode = OP_X86_PUSH;
1525 ins->sreg1 = last_ins->sreg1;
1530 mono_peephole_ins (bb, ins);
1535 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1539 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1540 switch (ins->opcode) {
1542 /* reg = 0 -> XOR (reg, reg) */
1543 /* XOR sets cflags on x86, so we cant do it always */
1544 if (ins->inst_c0 == 0 && (!ins->next || (ins->next && INST_IGNORES_CFLAGS (ins->next->opcode)))) {
1547 ins->opcode = OP_IXOR;
1548 ins->sreg1 = ins->dreg;
1549 ins->sreg2 = ins->dreg;
1552 * Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG
1553 * since it takes 3 bytes instead of 7.
1555 for (ins2 = ins->next; ins2; ins2 = ins2->next) {
1556 if ((ins2->opcode == OP_STORE_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
1557 ins2->opcode = OP_STORE_MEMBASE_REG;
1558 ins2->sreg1 = ins->dreg;
1560 else if ((ins2->opcode == OP_STOREI4_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
1561 ins2->opcode = OP_STOREI4_MEMBASE_REG;
1562 ins2->sreg1 = ins->dreg;
1564 else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM)) {
1565 /* Continue iteration */
1574 if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1575 ins->opcode = OP_X86_INC_REG;
1579 if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1580 ins->opcode = OP_X86_DEC_REG;
1584 mono_peephole_ins (bb, ins);
1589 * mono_arch_lowering_pass:
1591 * Converts complex opcodes into simpler ones so that each IR instruction
1592 * corresponds to one machine instruction.
1595 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1597 MonoInst *ins, *next;
1600 * FIXME: Need to add more instructions, but the current machine
1601 * description can't model some parts of the composite instructions like
1604 MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) {
1605 switch (ins->opcode) {
1608 case OP_IDIV_UN_IMM:
1609 case OP_IREM_UN_IMM:
1611 * Keep the cases where we could generated optimized code, otherwise convert
1612 * to the non-imm variant.
1614 if ((ins->opcode == OP_IREM_IMM) && mono_is_power_of_two (ins->inst_imm) >= 0)
1616 mono_decompose_op_imm (cfg, bb, ins);
1623 bb->max_vreg = cfg->next_vreg;
1627 branch_cc_table [] = {
1628 X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1629 X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1630 X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1633 /* Maps CMP_... constants to X86_CC_... constants */
1636 X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT,
1637 X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT
1641 cc_signed_table [] = {
1642 TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
1643 FALSE, FALSE, FALSE, FALSE
1646 static unsigned char*
1647 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int size, gboolean is_signed)
1649 #define XMM_TEMP_REG 0
1650 /*This SSE2 optimization must not be done which OPT_SIMD in place as it clobbers xmm0.*/
1651 /*The xmm pass decomposes OP_FCONV_ ops anyway anyway.*/
1652 if (cfg->opt & MONO_OPT_SSE2 && size < 8 && !(cfg->opt & MONO_OPT_SIMD)) {
1653 /* optimize by assigning a local var for this use so we avoid
1654 * the stack manipulations */
1655 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1656 x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
1657 x86_movsd_reg_membase (code, XMM_TEMP_REG, X86_ESP, 0);
1658 x86_cvttsd2si (code, dreg, XMM_TEMP_REG);
1659 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
1661 x86_widen_reg (code, dreg, dreg, is_signed, FALSE);
1663 x86_widen_reg (code, dreg, dreg, is_signed, TRUE);
1666 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
1667 x86_fnstcw_membase(code, X86_ESP, 0);
1668 x86_mov_reg_membase (code, dreg, X86_ESP, 0, 2);
1669 x86_alu_reg_imm (code, X86_OR, dreg, 0xc00);
1670 x86_mov_membase_reg (code, X86_ESP, 2, dreg, 2);
1671 x86_fldcw_membase (code, X86_ESP, 2);
1673 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1674 x86_fist_pop_membase (code, X86_ESP, 0, TRUE);
1675 x86_pop_reg (code, dreg);
1676 /* FIXME: need the high register
1677 * x86_pop_reg (code, dreg_high);
1680 x86_push_reg (code, X86_EAX); // SP = SP - 4
1681 x86_fist_pop_membase (code, X86_ESP, 0, FALSE);
1682 x86_pop_reg (code, dreg);
1684 x86_fldcw_membase (code, X86_ESP, 0);
1685 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
1688 x86_widen_reg (code, dreg, dreg, is_signed, FALSE);
1690 x86_widen_reg (code, dreg, dreg, is_signed, TRUE);
1694 static unsigned char*
1695 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
1697 int sreg = tree->sreg1;
1698 int need_touch = FALSE;
1700 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
1709 * If requested stack size is larger than one page,
1710 * perform stack-touch operation
1713 * Generate stack probe code.
1714 * Under Windows, it is necessary to allocate one page at a time,
1715 * "touching" stack after each successful sub-allocation. This is
1716 * because of the way stack growth is implemented - there is a
1717 * guard page before the lowest stack page that is currently commited.
1718 * Stack normally grows sequentially so OS traps access to the
1719 * guard page and commits more pages when needed.
1721 x86_test_reg_imm (code, sreg, ~0xFFF);
1722 br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
1724 br[2] = code; /* loop */
1725 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
1726 x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
1729 * By the end of the loop, sreg2 is smaller than 0x1000, so the init routine
1730 * that follows only initializes the last part of the area.
1732 /* Same as the init code below with size==0x1000 */
1733 if (tree->flags & MONO_INST_INIT) {
1734 x86_push_reg (code, X86_EAX);
1735 x86_push_reg (code, X86_ECX);
1736 x86_push_reg (code, X86_EDI);
1737 x86_mov_reg_imm (code, X86_ECX, (0x1000 >> 2));
1738 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
1739 x86_lea_membase (code, X86_EDI, X86_ESP, 12);
1741 x86_prefix (code, X86_REP_PREFIX);
1743 x86_pop_reg (code, X86_EDI);
1744 x86_pop_reg (code, X86_ECX);
1745 x86_pop_reg (code, X86_EAX);
1748 x86_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
1749 x86_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
1750 br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
1751 x86_patch (br[3], br[2]);
1752 x86_test_reg_reg (code, sreg, sreg);
1753 br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
1754 x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
1756 br[1] = code; x86_jump8 (code, 0);
1758 x86_patch (br[0], code);
1759 x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
1760 x86_patch (br[1], code);
1761 x86_patch (br[4], code);
1764 x86_alu_reg_reg (code, X86_SUB, X86_ESP, tree->sreg1);
1766 if (tree->flags & MONO_INST_INIT) {
1768 if (tree->dreg != X86_EAX && sreg != X86_EAX) {
1769 x86_push_reg (code, X86_EAX);
1772 if (tree->dreg != X86_ECX && sreg != X86_ECX) {
1773 x86_push_reg (code, X86_ECX);
1776 if (tree->dreg != X86_EDI && sreg != X86_EDI) {
1777 x86_push_reg (code, X86_EDI);
1781 x86_shift_reg_imm (code, X86_SHR, sreg, 2);
1782 if (sreg != X86_ECX)
1783 x86_mov_reg_reg (code, X86_ECX, sreg, 4);
1784 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
1786 x86_lea_membase (code, X86_EDI, X86_ESP, offset);
1788 x86_prefix (code, X86_REP_PREFIX);
1791 if (tree->dreg != X86_EDI && sreg != X86_EDI)
1792 x86_pop_reg (code, X86_EDI);
1793 if (tree->dreg != X86_ECX && sreg != X86_ECX)
1794 x86_pop_reg (code, X86_ECX);
1795 if (tree->dreg != X86_EAX && sreg != X86_EAX)
1796 x86_pop_reg (code, X86_EAX);
1803 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
1808 /* Move return value to the target register */
1809 switch (ins->opcode) {
1812 case OP_CALL_MEMBASE:
1813 if (ins->dreg != X86_EAX)
1814 x86_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
1818 case OP_VCALL_MEMBASE:
1821 case OP_VCALL2_MEMBASE:
1822 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
1823 if (cinfo->ret.storage == ArgValuetypeInReg) {
1824 /* Pop the destination address from the stack */
1825 x86_pop_reg (code, X86_ECX);
1827 for (quad = 0; quad < 2; quad ++) {
1828 switch (cinfo->ret.pair_storage [quad]) {
1830 g_assert (cinfo->ret.pair_regs [quad] != X86_ECX);
1831 x86_mov_membase_reg (code, X86_ECX, (quad * sizeof (gpointer)), cinfo->ret.pair_regs [quad], sizeof (gpointer));
1836 g_assert_not_reached ();
1842 MonoCallInst *call = (MonoCallInst*)ins;
1843 if (call->method && !mono_method_signature (call->method)->ret->byref && mono_method_signature (call->method)->ret->type == MONO_TYPE_R4) {
1844 /* Avoid some precision issues by saving/reloading the return value */
1845 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1846 x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE);
1847 x86_fld_membase (code, X86_ESP, 0, FALSE);
1848 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
1860 * mono_x86_emit_tls_get:
1861 * @code: buffer to store code to
1862 * @dreg: hard register where to place the result
1863 * @tls_offset: offset info
1865 * mono_x86_emit_tls_get emits in @code the native code that puts in
1866 * the dreg register the item in the thread local storage identified
1869 * Returns: a pointer to the end of the stored code
1872 mono_x86_emit_tls_get (guint8* code, int dreg, int tls_offset)
1874 #ifdef PLATFORM_WIN32
1876 * See the Under the Hood article in the May 1996 issue of Microsoft Systems
1877 * Journal and/or a disassembly of the TlsGet () function.
1879 g_assert (tls_offset < 64);
1880 x86_prefix (code, X86_FS_PREFIX);
1881 x86_mov_reg_mem (code, dreg, 0x18, 4);
1882 /* Dunno what this does but TlsGetValue () contains it */
1883 x86_alu_membase_imm (code, X86_AND, dreg, 0x34, 0);
1884 x86_mov_reg_membase (code, dreg, dreg, 3600 + (tls_offset * 4), 4);
1886 if (optimize_for_xen) {
1887 x86_prefix (code, X86_GS_PREFIX);
1888 x86_mov_reg_mem (code, dreg, 0, 4);
1889 x86_mov_reg_membase (code, dreg, dreg, tls_offset, 4);
1891 x86_prefix (code, X86_GS_PREFIX);
1892 x86_mov_reg_mem (code, dreg, tls_offset, 4);
1899 * emit_load_volatile_arguments:
1901 * Load volatile arguments from the stack to the original input registers.
1902 * Required before a tail call.
1905 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
1907 MonoMethod *method = cfg->method;
1908 MonoMethodSignature *sig;
1913 /* FIXME: Generate intermediate code instead */
1915 sig = mono_method_signature (method);
1917 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
1919 /* This is the opposite of the code in emit_prolog */
1921 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1922 ArgInfo *ainfo = cinfo->args + i;
1924 inst = cfg->args [i];
1926 if (sig->hasthis && (i == 0))
1927 arg_type = &mono_defaults.object_class->byval_arg;
1929 arg_type = sig->params [i - sig->hasthis];
1932 * On x86, the arguments are either in their original stack locations, or in
1935 if (inst->opcode == OP_REGVAR) {
1936 g_assert (ainfo->storage == ArgOnStack);
1938 x86_mov_membase_reg (code, X86_EBP, inst->inst_offset, inst->dreg, 4);
1945 #define REAL_PRINT_REG(text,reg) \
1946 mono_assert (reg >= 0); \
1947 x86_push_reg (code, X86_EAX); \
1948 x86_push_reg (code, X86_EDX); \
1949 x86_push_reg (code, X86_ECX); \
1950 x86_push_reg (code, reg); \
1951 x86_push_imm (code, reg); \
1952 x86_push_imm (code, text " %d %p\n"); \
1953 x86_mov_reg_imm (code, X86_EAX, printf); \
1954 x86_call_reg (code, X86_EAX); \
1955 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 3*4); \
1956 x86_pop_reg (code, X86_ECX); \
1957 x86_pop_reg (code, X86_EDX); \
1958 x86_pop_reg (code, X86_EAX);
1960 /* benchmark and set based on cpu */
1961 #define LOOP_ALIGNMENT 8
1962 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
1967 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
1972 guint8 *code = cfg->native_code + cfg->code_len;
1975 if (cfg->opt & MONO_OPT_LOOP) {
1976 int pad, align = LOOP_ALIGNMENT;
1977 /* set alignment depending on cpu */
1978 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
1980 /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
1981 x86_padding (code, pad);
1982 cfg->code_len += pad;
1983 bb->native_offset = cfg->code_len;
1987 if (cfg->verbose_level > 2)
1988 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
1990 cpos = bb->max_offset;
1992 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
1993 MonoProfileCoverageInfo *cov = cfg->coverage_info;
1994 g_assert (!cfg->compile_aot);
1997 cov->data [bb->dfn].cil_code = bb->cil_code;
1998 /* this is not thread save, but good enough */
1999 x86_inc_mem (code, &cov->data [bb->dfn].count);
2002 offset = code - cfg->native_code;
2004 mono_debug_open_block (cfg, bb, offset);
2006 MONO_BB_FOR_EACH_INS (bb, ins) {
2007 offset = code - cfg->native_code;
2009 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2011 if (G_UNLIKELY (offset > (cfg->code_size - max_len - 16))) {
2012 cfg->code_size *= 2;
2013 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2014 code = cfg->native_code + offset;
2015 mono_jit_stats.code_reallocs++;
2018 if (cfg->debug_info)
2019 mono_debug_record_line_number (cfg, ins, offset);
2021 switch (ins->opcode) {
2023 x86_mul_reg (code, ins->sreg2, TRUE);
2026 x86_mul_reg (code, ins->sreg2, FALSE);
2028 case OP_X86_SETEQ_MEMBASE:
2029 case OP_X86_SETNE_MEMBASE:
2030 x86_set_membase (code, ins->opcode == OP_X86_SETEQ_MEMBASE ? X86_CC_EQ : X86_CC_NE,
2031 ins->inst_basereg, ins->inst_offset, TRUE);
2033 case OP_STOREI1_MEMBASE_IMM:
2034 x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
2036 case OP_STOREI2_MEMBASE_IMM:
2037 x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
2039 case OP_STORE_MEMBASE_IMM:
2040 case OP_STOREI4_MEMBASE_IMM:
2041 x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
2043 case OP_STOREI1_MEMBASE_REG:
2044 x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
2046 case OP_STOREI2_MEMBASE_REG:
2047 x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
2049 case OP_STORE_MEMBASE_REG:
2050 case OP_STOREI4_MEMBASE_REG:
2051 x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
2053 case OP_STORE_MEM_IMM:
2054 x86_mov_mem_imm (code, ins->inst_p0, ins->inst_c0, 4);
2057 x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
2061 /* These are created by the cprop pass so they use inst_imm as the source */
2062 x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
2065 x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, FALSE);
2068 x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, TRUE);
2070 case OP_LOAD_MEMBASE:
2071 case OP_LOADI4_MEMBASE:
2072 case OP_LOADU4_MEMBASE:
2073 x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
2075 case OP_LOADU1_MEMBASE:
2076 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
2078 case OP_LOADI1_MEMBASE:
2079 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
2081 case OP_LOADU2_MEMBASE:
2082 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
2084 case OP_LOADI2_MEMBASE:
2085 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
2087 case OP_ICONV_TO_I1:
2089 x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
2091 case OP_ICONV_TO_I2:
2093 x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
2095 case OP_ICONV_TO_U1:
2096 x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
2098 case OP_ICONV_TO_U2:
2099 x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
2103 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
2105 case OP_COMPARE_IMM:
2106 case OP_ICOMPARE_IMM:
2107 x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
2109 case OP_X86_COMPARE_MEMBASE_REG:
2110 x86_alu_membase_reg (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2112 case OP_X86_COMPARE_MEMBASE_IMM:
2113 x86_alu_membase_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2115 case OP_X86_COMPARE_MEMBASE8_IMM:
2116 x86_alu_membase8_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2118 case OP_X86_COMPARE_REG_MEMBASE:
2119 x86_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
2121 case OP_X86_COMPARE_MEM_IMM:
2122 x86_alu_mem_imm (code, X86_CMP, ins->inst_offset, ins->inst_imm);
2124 case OP_X86_TEST_NULL:
2125 x86_test_reg_reg (code, ins->sreg1, ins->sreg1);
2127 case OP_X86_ADD_MEMBASE_IMM:
2128 x86_alu_membase_imm (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2130 case OP_X86_ADD_REG_MEMBASE:
2131 x86_alu_reg_membase (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset);
2133 case OP_X86_SUB_MEMBASE_IMM:
2134 x86_alu_membase_imm (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2136 case OP_X86_SUB_REG_MEMBASE:
2137 x86_alu_reg_membase (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset);
2139 case OP_X86_AND_MEMBASE_IMM:
2140 x86_alu_membase_imm (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2142 case OP_X86_OR_MEMBASE_IMM:
2143 x86_alu_membase_imm (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2145 case OP_X86_XOR_MEMBASE_IMM:
2146 x86_alu_membase_imm (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2148 case OP_X86_ADD_MEMBASE_REG:
2149 x86_alu_membase_reg (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2151 case OP_X86_SUB_MEMBASE_REG:
2152 x86_alu_membase_reg (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2154 case OP_X86_AND_MEMBASE_REG:
2155 x86_alu_membase_reg (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2157 case OP_X86_OR_MEMBASE_REG:
2158 x86_alu_membase_reg (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2160 case OP_X86_XOR_MEMBASE_REG:
2161 x86_alu_membase_reg (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2163 case OP_X86_INC_MEMBASE:
2164 x86_inc_membase (code, ins->inst_basereg, ins->inst_offset);
2166 case OP_X86_INC_REG:
2167 x86_inc_reg (code, ins->dreg);
2169 case OP_X86_DEC_MEMBASE:
2170 x86_dec_membase (code, ins->inst_basereg, ins->inst_offset);
2172 case OP_X86_DEC_REG:
2173 x86_dec_reg (code, ins->dreg);
2175 case OP_X86_MUL_REG_MEMBASE:
2176 x86_imul_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
2178 case OP_X86_AND_REG_MEMBASE:
2179 x86_alu_reg_membase (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset);
2181 case OP_X86_OR_REG_MEMBASE:
2182 x86_alu_reg_membase (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset);
2184 case OP_X86_XOR_REG_MEMBASE:
2185 x86_alu_reg_membase (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset);
2188 x86_breakpoint (code);
2190 case OP_RELAXED_NOP:
2191 x86_prefix (code, X86_REP_PREFIX);
2199 case OP_DUMMY_STORE:
2200 case OP_NOT_REACHED:
2206 x86_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
2210 x86_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
2215 x86_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
2219 x86_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
2224 x86_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
2228 x86_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
2233 x86_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
2237 x86_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
2240 x86_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
2244 x86_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
2249 * The code is the same for div/rem, the allocator will allocate dreg
2250 * to RAX/RDX as appropriate.
2252 if (ins->sreg2 == X86_EDX) {
2253 /* cdq clobbers this */
2254 x86_push_reg (code, ins->sreg2);
2256 x86_div_membase (code, X86_ESP, 0, TRUE);
2257 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2260 x86_div_reg (code, ins->sreg2, TRUE);
2265 if (ins->sreg2 == X86_EDX) {
2266 x86_push_reg (code, ins->sreg2);
2267 x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX);
2268 x86_div_membase (code, X86_ESP, 0, FALSE);
2269 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2271 x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX);
2272 x86_div_reg (code, ins->sreg2, FALSE);
2276 x86_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
2278 x86_div_reg (code, ins->sreg2, TRUE);
2281 int power = mono_is_power_of_two (ins->inst_imm);
2283 g_assert (ins->sreg1 == X86_EAX);
2284 g_assert (ins->dreg == X86_EAX);
2285 g_assert (power >= 0);
2288 /* Based on http://compilers.iecc.com/comparch/article/93-04-079 */
2290 x86_alu_reg_imm (code, X86_AND, X86_EAX, 1);
2292 * If the divident is >= 0, this does not nothing. If it is positive, it
2293 * it transforms %eax=0 into %eax=0, and %eax=1 into %eax=-1.
2295 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EDX);
2296 x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
2298 /* Based on gcc code */
2300 /* Add compensation for negative dividents */
2302 x86_shift_reg_imm (code, X86_SHR, X86_EDX, 32 - power);
2303 x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_EDX);
2304 /* Compute remainder */
2305 x86_alu_reg_imm (code, X86_AND, X86_EAX, (1 << power) - 1);
2306 /* Remove compensation */
2307 x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
2312 x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
2316 x86_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
2319 x86_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
2323 x86_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
2326 g_assert (ins->sreg2 == X86_ECX);
2327 x86_shift_reg (code, X86_SHL, ins->dreg);
2330 g_assert (ins->sreg2 == X86_ECX);
2331 x86_shift_reg (code, X86_SAR, ins->dreg);
2335 x86_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
2338 case OP_ISHR_UN_IMM:
2339 x86_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
2342 g_assert (ins->sreg2 == X86_ECX);
2343 x86_shift_reg (code, X86_SHR, ins->dreg);
2347 x86_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
2350 guint8 *jump_to_end;
2352 /* handle shifts below 32 bits */
2353 x86_shld_reg (code, ins->backend.reg3, ins->sreg1);
2354 x86_shift_reg (code, X86_SHL, ins->sreg1);
2356 x86_test_reg_imm (code, X86_ECX, 32);
2357 jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
2359 /* handle shift over 32 bit */
2360 x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
2361 x86_clear_reg (code, ins->sreg1);
2363 x86_patch (jump_to_end, code);
2367 guint8 *jump_to_end;
2369 /* handle shifts below 32 bits */
2370 x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
2371 x86_shift_reg (code, X86_SAR, ins->backend.reg3);
2373 x86_test_reg_imm (code, X86_ECX, 32);
2374 jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
2376 /* handle shifts over 31 bits */
2377 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2378 x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 31);
2380 x86_patch (jump_to_end, code);
2384 guint8 *jump_to_end;
2386 /* handle shifts below 32 bits */
2387 x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
2388 x86_shift_reg (code, X86_SHR, ins->backend.reg3);
2390 x86_test_reg_imm (code, X86_ECX, 32);
2391 jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
2393 /* handle shifts over 31 bits */
2394 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2395 x86_clear_reg (code, ins->backend.reg3);
2397 x86_patch (jump_to_end, code);
2401 if (ins->inst_imm >= 32) {
2402 x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
2403 x86_clear_reg (code, ins->sreg1);
2404 x86_shift_reg_imm (code, X86_SHL, ins->backend.reg3, ins->inst_imm - 32);
2406 x86_shld_reg_imm (code, ins->backend.reg3, ins->sreg1, ins->inst_imm);
2407 x86_shift_reg_imm (code, X86_SHL, ins->sreg1, ins->inst_imm);
2411 if (ins->inst_imm >= 32) {
2412 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2413 x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 0x1f);
2414 x86_shift_reg_imm (code, X86_SAR, ins->sreg1, ins->inst_imm - 32);
2416 x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
2417 x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, ins->inst_imm);
2420 case OP_LSHR_UN_IMM:
2421 if (ins->inst_imm >= 32) {
2422 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2423 x86_clear_reg (code, ins->backend.reg3);
2424 x86_shift_reg_imm (code, X86_SHR, ins->sreg1, ins->inst_imm - 32);
2426 x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
2427 x86_shift_reg_imm (code, X86_SHR, ins->backend.reg3, ins->inst_imm);
2431 x86_not_reg (code, ins->sreg1);
2434 x86_neg_reg (code, ins->sreg1);
2438 x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
2442 switch (ins->inst_imm) {
2446 if (ins->dreg != ins->sreg1)
2447 x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
2448 x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2451 /* LEA r1, [r2 + r2*2] */
2452 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2455 /* LEA r1, [r2 + r2*4] */
2456 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2459 /* LEA r1, [r2 + r2*2] */
2461 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2462 x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2465 /* LEA r1, [r2 + r2*8] */
2466 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3);
2469 /* LEA r1, [r2 + r2*4] */
2471 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2472 x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2475 /* LEA r1, [r2 + r2*2] */
2477 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2478 x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
2481 /* LEA r1, [r2 + r2*4] */
2482 /* LEA r1, [r1 + r1*4] */
2483 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2484 x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
2487 /* LEA r1, [r2 + r2*4] */
2489 /* LEA r1, [r1 + r1*4] */
2490 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2491 x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
2492 x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
2495 x86_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2500 x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
2501 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2503 case OP_IMUL_OVF_UN: {
2504 /* the mul operation and the exception check should most likely be split */
2505 int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
2506 /*g_assert (ins->sreg2 == X86_EAX);
2507 g_assert (ins->dreg == X86_EAX);*/
2508 if (ins->sreg2 == X86_EAX) {
2509 non_eax_reg = ins->sreg1;
2510 } else if (ins->sreg1 == X86_EAX) {
2511 non_eax_reg = ins->sreg2;
2513 /* no need to save since we're going to store to it anyway */
2514 if (ins->dreg != X86_EAX) {
2516 x86_push_reg (code, X86_EAX);
2518 x86_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
2519 non_eax_reg = ins->sreg2;
2521 if (ins->dreg == X86_EDX) {
2524 x86_push_reg (code, X86_EAX);
2526 } else if (ins->dreg != X86_EAX) {
2528 x86_push_reg (code, X86_EDX);
2530 x86_mul_reg (code, non_eax_reg, FALSE);
2531 /* save before the check since pop and mov don't change the flags */
2532 if (ins->dreg != X86_EAX)
2533 x86_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
2535 x86_pop_reg (code, X86_EDX);
2537 x86_pop_reg (code, X86_EAX);
2538 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2542 x86_mov_reg_imm (code, ins->dreg, ins->inst_c0);
2545 g_assert_not_reached ();
2546 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2547 x86_mov_reg_imm (code, ins->dreg, 0);
2550 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2551 x86_mov_reg_imm (code, ins->dreg, 0);
2553 case OP_LOAD_GOTADDR:
2554 x86_call_imm (code, 0);
2556 * The patch needs to point to the pop, since the GOT offset needs
2557 * to be added to that address.
2559 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL);
2560 x86_pop_reg (code, ins->dreg);
2561 x86_alu_reg_imm (code, X86_ADD, ins->dreg, 0xf0f0f0f0);
2564 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
2565 x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, 0xf0f0f0f0, 4);
2567 case OP_X86_PUSH_GOT_ENTRY:
2568 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
2569 x86_push_membase (code, ins->inst_basereg, 0xf0f0f0f0);
2572 x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
2576 * Note: this 'frame destruction' logic is useful for tail calls, too.
2577 * Keep in sync with the code in emit_epilog.
2581 /* FIXME: no tracing support... */
2582 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2583 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
2584 /* reset offset to make max_len work */
2585 offset = code - cfg->native_code;
2587 g_assert (!cfg->method->save_lmf);
2589 code = emit_load_volatile_arguments (cfg, code);
2591 if (cfg->used_int_regs & (1 << X86_EBX))
2593 if (cfg->used_int_regs & (1 << X86_EDI))
2595 if (cfg->used_int_regs & (1 << X86_ESI))
2598 x86_lea_membase (code, X86_ESP, X86_EBP, pos);
2600 if (cfg->used_int_regs & (1 << X86_ESI))
2601 x86_pop_reg (code, X86_ESI);
2602 if (cfg->used_int_regs & (1 << X86_EDI))
2603 x86_pop_reg (code, X86_EDI);
2604 if (cfg->used_int_regs & (1 << X86_EBX))
2605 x86_pop_reg (code, X86_EBX);
2607 /* restore ESP/EBP */
2609 offset = code - cfg->native_code;
2610 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2611 x86_jump32 (code, 0);
2613 cfg->disable_aot = TRUE;
2617 /* ensure ins->sreg1 is not NULL
2618 * note that cmp DWORD PTR [eax], eax is one byte shorter than
2619 * cmp DWORD PTR [eax], 0
2621 x86_alu_membase_reg (code, X86_CMP, ins->sreg1, 0, ins->sreg1);
2624 int hreg = ins->sreg1 == X86_EAX? X86_ECX: X86_EAX;
2625 x86_push_reg (code, hreg);
2626 x86_lea_membase (code, hreg, X86_EBP, cfg->sig_cookie);
2627 x86_mov_membase_reg (code, ins->sreg1, 0, hreg, 4);
2628 x86_pop_reg (code, hreg);
2637 call = (MonoCallInst*)ins;
2638 if (ins->flags & MONO_INST_HAS_METHOD)
2639 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2641 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2642 if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
2643 /* a pop is one byte, while an add reg, imm is 3. So if there are 4 or 8
2644 * bytes to pop, we want to use pops. GCC does this (note it won't happen
2645 * for P4 or i686 because gcc will avoid using pop push at all. But we aren't
2646 * smart enough to do that optimization yet
2648 * It turns out that on my P4, doing two pops for 8 bytes on the stack makes
2649 * mcs botstrap slow down. However, doing 1 pop for 4 bytes creates a small,
2650 * (most likely from locality benefits). People with other processors should
2651 * check on theirs to see what happens.
2653 if (call->stack_usage == 4) {
2654 /* we want to use registers that won't get used soon, so use
2655 * ecx, as eax will get allocated first. edx is used by long calls,
2656 * so we can't use that.
2659 x86_pop_reg (code, X86_ECX);
2661 x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
2664 code = emit_move_return_value (cfg, ins, code);
2670 case OP_VOIDCALL_REG:
2672 call = (MonoCallInst*)ins;
2673 x86_call_reg (code, ins->sreg1);
2674 if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
2675 if (call->stack_usage == 4)
2676 x86_pop_reg (code, X86_ECX);
2678 x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
2680 code = emit_move_return_value (cfg, ins, code);
2682 case OP_FCALL_MEMBASE:
2683 case OP_LCALL_MEMBASE:
2684 case OP_VCALL_MEMBASE:
2685 case OP_VCALL2_MEMBASE:
2686 case OP_VOIDCALL_MEMBASE:
2687 case OP_CALL_MEMBASE:
2688 call = (MonoCallInst*)ins;
2689 x86_call_membase (code, ins->sreg1, ins->inst_offset);
2690 if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
2691 if (call->stack_usage == 4)
2692 x86_pop_reg (code, X86_ECX);
2694 x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
2696 code = emit_move_return_value (cfg, ins, code);
2699 x86_push_reg (code, ins->sreg1);
2701 case OP_X86_PUSH_IMM:
2702 x86_push_imm (code, ins->inst_imm);
2704 case OP_X86_PUSH_MEMBASE:
2705 x86_push_membase (code, ins->inst_basereg, ins->inst_offset);
2707 case OP_X86_PUSH_OBJ:
2708 x86_alu_reg_imm (code, X86_SUB, X86_ESP, ins->inst_imm);
2709 x86_push_reg (code, X86_EDI);
2710 x86_push_reg (code, X86_ESI);
2711 x86_push_reg (code, X86_ECX);
2712 if (ins->inst_offset)
2713 x86_lea_membase (code, X86_ESI, ins->inst_basereg, ins->inst_offset);
2715 x86_mov_reg_reg (code, X86_ESI, ins->inst_basereg, 4);
2716 x86_lea_membase (code, X86_EDI, X86_ESP, 12);
2717 x86_mov_reg_imm (code, X86_ECX, (ins->inst_imm >> 2));
2719 x86_prefix (code, X86_REP_PREFIX);
2721 x86_pop_reg (code, X86_ECX);
2722 x86_pop_reg (code, X86_ESI);
2723 x86_pop_reg (code, X86_EDI);
2726 x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
2728 case OP_X86_LEA_MEMBASE:
2729 x86_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
2732 x86_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
2735 /* keep alignment */
2736 x86_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1);
2737 x86_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
2738 code = mono_emit_stack_alloc (code, ins);
2739 x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
2741 case OP_LOCALLOC_IMM: {
2742 guint32 size = ins->inst_imm;
2743 size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
2745 if (ins->flags & MONO_INST_INIT) {
2746 /* FIXME: Optimize this */
2747 x86_mov_reg_imm (code, ins->dreg, size);
2748 ins->sreg1 = ins->dreg;
2750 code = mono_emit_stack_alloc (code, ins);
2751 x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
2753 x86_alu_reg_imm (code, X86_SUB, X86_ESP, size);
2754 x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
2759 x86_push_reg (code, ins->sreg1);
2760 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
2761 (gpointer)"mono_arch_throw_exception");
2765 x86_push_reg (code, ins->sreg1);
2766 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
2767 (gpointer)"mono_arch_rethrow_exception");
2770 case OP_CALL_HANDLER:
2771 x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
2772 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2773 x86_call_imm (code, 0);
2774 x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
2776 case OP_START_HANDLER: {
2777 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2778 x86_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, X86_ESP, 4);
2781 case OP_ENDFINALLY: {
2782 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2783 x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4);
2787 case OP_ENDFILTER: {
2788 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2789 x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4);
2790 /* The local allocator will put the result into EAX */
2796 ins->inst_c0 = code - cfg->native_code;
2799 if (ins->flags & MONO_INST_BRLABEL) {
2800 if (ins->inst_i0->inst_c0) {
2801 x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
2803 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
2804 if ((cfg->opt & MONO_OPT_BRANCH) &&
2805 x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
2806 x86_jump8 (code, 0);
2808 x86_jump32 (code, 0);
2811 if (ins->inst_target_bb->native_offset) {
2812 x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
2814 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2815 if ((cfg->opt & MONO_OPT_BRANCH) &&
2816 x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
2817 x86_jump8 (code, 0);
2819 x86_jump32 (code, 0);
2824 x86_jump_reg (code, ins->sreg1);
2837 x86_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
2838 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
2840 case OP_COND_EXC_EQ:
2841 case OP_COND_EXC_NE_UN:
2842 case OP_COND_EXC_LT:
2843 case OP_COND_EXC_LT_UN:
2844 case OP_COND_EXC_GT:
2845 case OP_COND_EXC_GT_UN:
2846 case OP_COND_EXC_GE:
2847 case OP_COND_EXC_GE_UN:
2848 case OP_COND_EXC_LE:
2849 case OP_COND_EXC_LE_UN:
2850 case OP_COND_EXC_IEQ:
2851 case OP_COND_EXC_INE_UN:
2852 case OP_COND_EXC_ILT:
2853 case OP_COND_EXC_ILT_UN:
2854 case OP_COND_EXC_IGT:
2855 case OP_COND_EXC_IGT_UN:
2856 case OP_COND_EXC_IGE:
2857 case OP_COND_EXC_IGE_UN:
2858 case OP_COND_EXC_ILE:
2859 case OP_COND_EXC_ILE_UN:
2860 EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->inst_p1);
2862 case OP_COND_EXC_OV:
2863 case OP_COND_EXC_NO:
2865 case OP_COND_EXC_NC:
2866 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
2868 case OP_COND_EXC_IOV:
2869 case OP_COND_EXC_INO:
2870 case OP_COND_EXC_IC:
2871 case OP_COND_EXC_INC:
2872 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_IEQ], (ins->opcode < OP_COND_EXC_INE_UN), ins->inst_p1);
2884 EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
2892 case OP_CMOV_INE_UN:
2893 case OP_CMOV_IGE_UN:
2894 case OP_CMOV_IGT_UN:
2895 case OP_CMOV_ILE_UN:
2896 case OP_CMOV_ILT_UN:
2897 g_assert (ins->dreg == ins->sreg1);
2898 x86_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2);
2901 /* floating point opcodes */
2903 double d = *(double *)ins->inst_p0;
2905 if ((d == 0.0) && (mono_signbit (d) == 0)) {
2907 } else if (d == 1.0) {
2910 if (cfg->compile_aot) {
2911 guint32 *val = (guint32*)&d;
2912 x86_push_imm (code, val [1]);
2913 x86_push_imm (code, val [0]);
2914 x86_fld_membase (code, X86_ESP, 0, TRUE);
2915 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
2918 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R8, ins->inst_p0);
2919 x86_fld (code, NULL, TRUE);
2925 float f = *(float *)ins->inst_p0;
2927 if ((f == 0.0) && (mono_signbit (f) == 0)) {
2929 } else if (f == 1.0) {
2932 if (cfg->compile_aot) {
2933 guint32 val = *(guint32*)&f;
2934 x86_push_imm (code, val);
2935 x86_fld_membase (code, X86_ESP, 0, FALSE);
2936 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2939 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R4, ins->inst_p0);
2940 x86_fld (code, NULL, FALSE);
2945 case OP_STORER8_MEMBASE_REG:
2946 x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
2948 case OP_LOADR8_SPILL_MEMBASE:
2949 x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
2952 case OP_LOADR8_MEMBASE:
2953 x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
2955 case OP_STORER4_MEMBASE_REG:
2956 x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
2958 case OP_LOADR4_MEMBASE:
2959 x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
2961 case OP_ICONV_TO_R4: /* FIXME: change precision */
2962 case OP_ICONV_TO_R8:
2963 x86_push_reg (code, ins->sreg1);
2964 x86_fild_membase (code, X86_ESP, 0, FALSE);
2965 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2967 case OP_ICONV_TO_R_UN:
2968 x86_push_imm (code, 0);
2969 x86_push_reg (code, ins->sreg1);
2970 x86_fild_membase (code, X86_ESP, 0, TRUE);
2971 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
2973 case OP_X86_FP_LOAD_I8:
2974 x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
2976 case OP_X86_FP_LOAD_I4:
2977 x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
2979 case OP_FCONV_TO_R4:
2980 /* FIXME: nothing to do ?? */
2982 case OP_FCONV_TO_I1:
2983 code = emit_float_to_int (cfg, code, ins->dreg, 1, TRUE);
2985 case OP_FCONV_TO_U1:
2986 code = emit_float_to_int (cfg, code, ins->dreg, 1, FALSE);
2988 case OP_FCONV_TO_I2:
2989 code = emit_float_to_int (cfg, code, ins->dreg, 2, TRUE);
2991 case OP_FCONV_TO_U2:
2992 code = emit_float_to_int (cfg, code, ins->dreg, 2, FALSE);
2994 case OP_FCONV_TO_I4:
2996 code = emit_float_to_int (cfg, code, ins->dreg, 4, TRUE);
2998 case OP_FCONV_TO_I8:
2999 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
3000 x86_fnstcw_membase(code, X86_ESP, 0);
3001 x86_mov_reg_membase (code, ins->dreg, X86_ESP, 0, 2);
3002 x86_alu_reg_imm (code, X86_OR, ins->dreg, 0xc00);
3003 x86_mov_membase_reg (code, X86_ESP, 2, ins->dreg, 2);
3004 x86_fldcw_membase (code, X86_ESP, 2);
3005 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
3006 x86_fist_pop_membase (code, X86_ESP, 0, TRUE);
3007 x86_pop_reg (code, ins->dreg);
3008 x86_pop_reg (code, ins->backend.reg3);
3009 x86_fldcw_membase (code, X86_ESP, 0);
3010 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
3012 case OP_LCONV_TO_R8_2:
3013 x86_push_reg (code, ins->sreg2);
3014 x86_push_reg (code, ins->sreg1);
3015 x86_fild_membase (code, X86_ESP, 0, TRUE);
3016 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
3018 case OP_LCONV_TO_R4_2:
3019 x86_push_reg (code, ins->sreg2);
3020 x86_push_reg (code, ins->sreg1);
3021 x86_fild_membase (code, X86_ESP, 0, TRUE);
3022 /* Change precision */
3023 x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE);
3024 x86_fld_membase (code, X86_ESP, 0, FALSE);
3025 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
3027 case OP_LCONV_TO_R_UN:
3028 case OP_LCONV_TO_R_UN_2: {
3029 static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
3032 /* load 64bit integer to FP stack */
3033 x86_push_imm (code, 0);
3034 x86_push_reg (code, ins->sreg2);
3035 x86_push_reg (code, ins->sreg1);
3036 x86_fild_membase (code, X86_ESP, 0, TRUE);
3037 /* store as 80bit FP value */
3038 x86_fst80_membase (code, X86_ESP, 0);
3040 /* test if lreg is negative */
3041 x86_test_reg_reg (code, ins->sreg2, ins->sreg2);
3042 br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
3044 /* add correction constant mn */
3045 x86_fld80_mem (code, mn);
3046 x86_fld80_membase (code, X86_ESP, 0);
3047 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3048 x86_fst80_membase (code, X86_ESP, 0);
3050 x86_patch (br, code);
3052 x86_fld80_membase (code, X86_ESP, 0);
3053 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 12);
3057 case OP_LCONV_TO_OVF_I:
3058 case OP_LCONV_TO_OVF_I4_2: {
3059 guint8 *br [3], *label [1];
3063 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3065 x86_test_reg_reg (code, ins->sreg1, ins->sreg1);
3067 /* If the low word top bit is set, see if we are negative */
3068 br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
3069 /* We are not negative (no top bit set, check for our top word to be zero */
3070 x86_test_reg_reg (code, ins->sreg2, ins->sreg2);
3071 br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
3074 /* throw exception */
3075 tins = mono_branch_optimize_exception_target (cfg, bb, "OverflowException");
3077 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, tins->inst_true_bb);
3078 if ((cfg->opt & MONO_OPT_BRANCH) && x86_is_imm8 (tins->inst_true_bb->max_offset - cpos))
3079 x86_jump8 (code, 0);
3081 x86_jump32 (code, 0);
3083 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
3084 x86_jump32 (code, 0);
3088 x86_patch (br [0], code);
3089 /* our top bit is set, check that top word is 0xfffffff */
3090 x86_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
3092 x86_patch (br [1], code);
3093 /* nope, emit exception */
3094 br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
3095 x86_patch (br [2], label [0]);
3097 if (ins->dreg != ins->sreg1)
3098 x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
3102 /* Not needed on the fp stack */
3105 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3108 x86_fp_op_reg (code, X86_FSUB, 1, TRUE);
3111 x86_fp_op_reg (code, X86_FMUL, 1, TRUE);
3114 x86_fp_op_reg (code, X86_FDIV, 1, TRUE);
3122 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3127 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3134 * it really doesn't make sense to inline all this code,
3135 * it's here just to show that things may not be as simple
3138 guchar *check_pos, *end_tan, *pop_jump;
3139 x86_push_reg (code, X86_EAX);
3142 x86_test_reg_imm (code, X86_EAX, X86_FP_C2);
3144 x86_branch8 (code, X86_CC_NE, 0, FALSE);
3145 x86_fstp (code, 0); /* pop the 1.0 */
3147 x86_jump8 (code, 0);
3149 x86_fp_op (code, X86_FADD, 0);
3153 x86_test_reg_imm (code, X86_EAX, X86_FP_C2);
3155 x86_branch8 (code, X86_CC_NE, 0, FALSE);
3158 x86_patch (pop_jump, code);
3159 x86_fstp (code, 0); /* pop the 1.0 */
3160 x86_patch (check_pos, code);
3161 x86_patch (end_tan, code);
3163 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3164 x86_pop_reg (code, X86_EAX);
3171 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3180 g_assert (cfg->opt & MONO_OPT_CMOV);
3181 g_assert (ins->dreg == ins->sreg1);
3182 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3183 x86_cmov_reg (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2);
3186 g_assert (cfg->opt & MONO_OPT_CMOV);
3187 g_assert (ins->dreg == ins->sreg1);
3188 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3189 x86_cmov_reg (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2);
3192 g_assert (cfg->opt & MONO_OPT_CMOV);
3193 g_assert (ins->dreg == ins->sreg1);
3194 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3195 x86_cmov_reg (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2);
3198 g_assert (cfg->opt & MONO_OPT_CMOV);
3199 g_assert (ins->dreg == ins->sreg1);
3200 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3201 x86_cmov_reg (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2);
3207 x86_fxch (code, ins->inst_imm);
3212 x86_push_reg (code, X86_EAX);
3213 /* we need to exchange ST(0) with ST(1) */
3216 /* this requires a loop, because fprem somtimes
3217 * returns a partial remainder */
3219 /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
3220 /* x86_fprem1 (code); */
3223 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_C2);
3225 x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
3230 x86_pop_reg (code, X86_EAX);
3234 if (cfg->opt & MONO_OPT_FCMOV) {
3235 x86_fcomip (code, 1);
3239 /* this overwrites EAX */
3240 EMIT_FPCOMPARE(code);
3241 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3244 if (cfg->opt & MONO_OPT_FCMOV) {
3245 /* zeroing the register at the start results in
3246 * shorter and faster code (we can also remove the widening op)
3248 guchar *unordered_check;
3249 x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3250 x86_fcomip (code, 1);
3252 unordered_check = code;
3253 x86_branch8 (code, X86_CC_P, 0, FALSE);
3254 x86_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
3255 x86_patch (unordered_check, code);
3258 if (ins->dreg != X86_EAX)
3259 x86_push_reg (code, X86_EAX);
3261 EMIT_FPCOMPARE(code);
3262 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3263 x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000);
3264 x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3265 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3267 if (ins->dreg != X86_EAX)
3268 x86_pop_reg (code, X86_EAX);
3272 if (cfg->opt & MONO_OPT_FCMOV) {
3273 /* zeroing the register at the start results in
3274 * shorter and faster code (we can also remove the widening op)
3276 x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3277 x86_fcomip (code, 1);
3279 if (ins->opcode == OP_FCLT_UN) {
3280 guchar *unordered_check = code;
3281 guchar *jump_to_end;
3282 x86_branch8 (code, X86_CC_P, 0, FALSE);
3283 x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3285 x86_jump8 (code, 0);
3286 x86_patch (unordered_check, code);
3287 x86_inc_reg (code, ins->dreg);
3288 x86_patch (jump_to_end, code);
3290 x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3294 if (ins->dreg != X86_EAX)
3295 x86_push_reg (code, X86_EAX);
3297 EMIT_FPCOMPARE(code);
3298 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3299 if (ins->opcode == OP_FCLT_UN) {
3300 guchar *is_not_zero_check, *end_jump;
3301 is_not_zero_check = code;
3302 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3304 x86_jump8 (code, 0);
3305 x86_patch (is_not_zero_check, code);
3306 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3308 x86_patch (end_jump, code);
3310 x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3311 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3313 if (ins->dreg != X86_EAX)
3314 x86_pop_reg (code, X86_EAX);
3318 if (cfg->opt & MONO_OPT_FCMOV) {
3319 /* zeroing the register at the start results in
3320 * shorter and faster code (we can also remove the widening op)
3322 guchar *unordered_check;
3323 x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3324 x86_fcomip (code, 1);
3326 if (ins->opcode == OP_FCGT) {
3327 unordered_check = code;
3328 x86_branch8 (code, X86_CC_P, 0, FALSE);
3329 x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3330 x86_patch (unordered_check, code);
3332 x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3336 if (ins->dreg != X86_EAX)
3337 x86_push_reg (code, X86_EAX);
3339 EMIT_FPCOMPARE(code);
3340 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3341 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3342 if (ins->opcode == OP_FCGT_UN) {
3343 guchar *is_not_zero_check, *end_jump;
3344 is_not_zero_check = code;
3345 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3347 x86_jump8 (code, 0);
3348 x86_patch (is_not_zero_check, code);
3349 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3351 x86_patch (end_jump, code);
3353 x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3354 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3356 if (ins->dreg != X86_EAX)
3357 x86_pop_reg (code, X86_EAX);
3360 if (cfg->opt & MONO_OPT_FCMOV) {
3361 guchar *jump = code;
3362 x86_branch8 (code, X86_CC_P, 0, TRUE);
3363 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3364 x86_patch (jump, code);
3367 x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000);
3368 EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
3371 /* Branch if C013 != 100 */
3372 if (cfg->opt & MONO_OPT_FCMOV) {
3373 /* branch if !ZF or (PF|CF) */
3374 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3375 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3376 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
3379 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3);
3380 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3383 if (cfg->opt & MONO_OPT_FCMOV) {
3384 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3387 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3390 if (cfg->opt & MONO_OPT_FCMOV) {
3391 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3392 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3395 if (ins->opcode == OP_FBLT_UN) {
3396 guchar *is_not_zero_check, *end_jump;
3397 is_not_zero_check = code;
3398 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3400 x86_jump8 (code, 0);
3401 x86_patch (is_not_zero_check, code);
3402 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3404 x86_patch (end_jump, code);
3406 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3410 if (cfg->opt & MONO_OPT_FCMOV) {
3411 if (ins->opcode == OP_FBGT) {
3414 /* skip branch if C1=1 */
3416 x86_branch8 (code, X86_CC_P, 0, FALSE);
3417 /* branch if (C0 | C3) = 1 */
3418 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
3419 x86_patch (br1, code);
3421 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
3425 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3426 if (ins->opcode == OP_FBGT_UN) {
3427 guchar *is_not_zero_check, *end_jump;
3428 is_not_zero_check = code;
3429 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3431 x86_jump8 (code, 0);
3432 x86_patch (is_not_zero_check, code);
3433 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3435 x86_patch (end_jump, code);
3437 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3440 /* Branch if C013 == 100 or 001 */
3441 if (cfg->opt & MONO_OPT_FCMOV) {
3444 /* skip branch if C1=1 */
3446 x86_branch8 (code, X86_CC_P, 0, FALSE);
3447 /* branch if (C0 | C3) = 1 */
3448 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
3449 x86_patch (br1, code);
3452 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3453 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3454 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3);
3455 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3458 /* Branch if C013 == 000 */
3459 if (cfg->opt & MONO_OPT_FCMOV) {
3460 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
3463 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3466 /* Branch if C013=000 or 100 */
3467 if (cfg->opt & MONO_OPT_FCMOV) {
3470 /* skip branch if C1=1 */
3472 x86_branch8 (code, X86_CC_P, 0, FALSE);
3473 /* branch if C0=0 */
3474 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
3475 x86_patch (br1, code);
3478 x86_alu_reg_imm (code, X86_AND, X86_EAX, (X86_FP_C0|X86_FP_C1));
3479 x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0);
3480 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3483 /* Branch if C013 != 001 */
3484 if (cfg->opt & MONO_OPT_FCMOV) {
3485 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3486 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
3489 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3490 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3494 x86_push_reg (code, X86_EAX);
3497 x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4100);
3498 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3499 x86_pop_reg (code, X86_EAX);
3501 /* Have to clean up the fp stack before throwing the exception */
3503 x86_branch8 (code, X86_CC_NE, 0, FALSE);
3506 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
3508 x86_patch (br1, code);
3512 code = mono_x86_emit_tls_get (code, ins->dreg, ins->inst_offset);
3515 case OP_MEMORY_BARRIER: {
3516 /* Not needed on x86 */
3519 case OP_ATOMIC_ADD_I4: {
3520 int dreg = ins->dreg;
3522 if (dreg == ins->inst_basereg) {
3523 x86_push_reg (code, ins->sreg2);
3527 if (dreg != ins->sreg2)
3528 x86_mov_reg_reg (code, ins->dreg, ins->sreg2, 4);
3530 x86_prefix (code, X86_LOCK_PREFIX);
3531 x86_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, 4);
3533 if (dreg != ins->dreg) {
3534 x86_mov_reg_reg (code, ins->dreg, dreg, 4);
3535 x86_pop_reg (code, dreg);
3540 case OP_ATOMIC_ADD_NEW_I4: {
3541 int dreg = ins->dreg;
3543 /* hack: limit in regalloc, dreg != sreg1 && dreg != sreg2 */
3544 if (ins->sreg2 == dreg) {
3545 if (dreg == X86_EBX) {
3547 if (ins->inst_basereg == X86_EDI)
3551 if (ins->inst_basereg == X86_EBX)
3554 } else if (ins->inst_basereg == dreg) {
3555 if (dreg == X86_EBX) {
3557 if (ins->sreg2 == X86_EDI)
3561 if (ins->sreg2 == X86_EBX)
3566 if (dreg != ins->dreg) {
3567 x86_push_reg (code, dreg);
3570 x86_mov_reg_reg (code, dreg, ins->sreg2, 4);
3571 x86_prefix (code, X86_LOCK_PREFIX);
3572 x86_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, 4);
3573 /* dreg contains the old value, add with sreg2 value */
3574 x86_alu_reg_reg (code, X86_ADD, dreg, ins->sreg2);
3576 if (ins->dreg != dreg) {
3577 x86_mov_reg_reg (code, ins->dreg, dreg, 4);
3578 x86_pop_reg (code, dreg);
3583 case OP_ATOMIC_EXCHANGE_I4:
3584 case OP_ATOMIC_CAS_IMM_I4: {
3586 int sreg2 = ins->sreg2;
3587 int breg = ins->inst_basereg;
3589 /* cmpxchg uses eax as comperand, need to make sure we can use it
3590 * hack to overcome limits in x86 reg allocator
3591 * (req: dreg == eax and sreg2 != eax and breg != eax)
3593 g_assert (ins->dreg == X86_EAX);
3595 /* We need the EAX reg for the cmpxchg */
3596 if (ins->sreg2 == X86_EAX) {
3597 x86_push_reg (code, X86_EDX);
3598 x86_mov_reg_reg (code, X86_EDX, X86_EAX, 4);
3602 if (breg == X86_EAX) {
3603 x86_push_reg (code, X86_ESI);
3604 x86_mov_reg_reg (code, X86_ESI, X86_EAX, 4);
3608 if (ins->opcode == OP_ATOMIC_CAS_IMM_I4) {
3609 x86_mov_reg_imm (code, X86_EAX, ins->backend.data);
3611 x86_prefix (code, X86_LOCK_PREFIX);
3612 x86_cmpxchg_membase_reg (code, breg, ins->inst_offset, sreg2);
3614 x86_mov_reg_membase (code, X86_EAX, breg, ins->inst_offset, 4);
3616 br [0] = code; x86_prefix (code, X86_LOCK_PREFIX);
3617 x86_cmpxchg_membase_reg (code, breg, ins->inst_offset, sreg2);
3618 br [1] = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
3619 x86_patch (br [1], br [0]);
3622 if (breg != ins->inst_basereg)
3623 x86_pop_reg (code, X86_ESI);
3625 if (ins->sreg2 != sreg2)
3626 x86_pop_reg (code, X86_EDX);
3630 #ifdef MONO_ARCH_SIMD_INTRINSICS
3632 x86_sse_alu_ps_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2);
3635 x86_sse_alu_ps_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2);
3638 x86_sse_alu_ps_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2);
3641 x86_sse_alu_ps_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2);
3644 x86_sse_alu_ps_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2);
3647 x86_sse_alu_ps_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2);
3650 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
3651 x86_sse_alu_ps_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0);
3654 x86_sse_alu_ps_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2);
3657 x86_sse_alu_ps_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2);
3660 x86_sse_alu_ps_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2);
3663 x86_sse_alu_ps_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2);
3666 x86_sse_alu_ps_reg_reg (code, X86_SSE_SQRT, ins->dreg, ins->sreg1);
3669 x86_sse_alu_ps_reg_reg (code, X86_SSE_RSQRT, ins->dreg, ins->sreg1);
3672 x86_sse_alu_ps_reg_reg (code, X86_SSE_RCP, ins->dreg, ins->sreg1);
3675 x86_sse_alu_sd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2);
3678 x86_sse_alu_sd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2);
3681 x86_sse_alu_sd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2);
3684 x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSHDUP, ins->dreg, ins->sreg1);
3687 x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSLDUP, ins->dreg, ins->sreg1);
3690 case OP_PSHUFLEW_HIGH:
3691 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
3692 x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 1);
3694 case OP_PSHUFLEW_LOW:
3695 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
3696 x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 0);
3699 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
3700 x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->sreg1, ins->inst_c0);
3704 x86_sse_alu_pd_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2);
3707 x86_sse_alu_pd_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2);
3710 x86_sse_alu_pd_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2);
3713 x86_sse_alu_pd_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2);
3716 x86_sse_alu_pd_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2);
3719 x86_sse_alu_pd_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2);
3722 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
3723 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0);
3726 x86_sse_alu_pd_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2);
3729 x86_sse_alu_pd_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2);
3732 x86_sse_alu_pd_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2);
3735 x86_sse_alu_pd_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2);
3738 x86_sse_alu_pd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2);
3741 x86_sse_alu_pd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2);
3744 x86_sse_alu_pd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2);
3747 x86_sse_alu_sd_reg_reg (code, X86_SSE_MOVDDUP, ins->dreg, ins->sreg1);
3750 case OP_EXTRACT_MASK:
3751 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMOVMSKB, ins->dreg, ins->sreg1);
3755 x86_sse_alu_pd_reg_reg (code, X86_SSE_PAND, ins->sreg1, ins->sreg2);
3758 x86_sse_alu_pd_reg_reg (code, X86_SSE_POR, ins->sreg1, ins->sreg2);
3761 x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->sreg1, ins->sreg2);
3765 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDB, ins->sreg1, ins->sreg2);
3768 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDW, ins->sreg1, ins->sreg2);
3771 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDD, ins->sreg1, ins->sreg2);
3774 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDQ, ins->sreg1, ins->sreg2);
3778 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBB, ins->sreg1, ins->sreg2);
3781 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBW, ins->sreg1, ins->sreg2);
3784 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBD, ins->sreg1, ins->sreg2);
3787 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBQ, ins->sreg1, ins->sreg2);
3791 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXUB, ins->sreg1, ins->sreg2);
3794 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUW, ins->sreg1, ins->sreg2);
3797 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUD, ins->sreg1, ins->sreg2);
3801 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSB, ins->sreg1, ins->sreg2);
3804 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXSW, ins->sreg1, ins->sreg2);
3807 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSD, ins->sreg1, ins->sreg2);
3811 x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGB, ins->sreg1, ins->sreg2);
3814 x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGW, ins->sreg1, ins->sreg2);
3818 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINUB, ins->sreg1, ins->sreg2);
3821 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUW, ins->sreg1, ins->sreg2);
3824 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUD, ins->sreg1, ins->sreg2);
3828 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSB, ins->sreg1, ins->sreg2);
3831 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINSW, ins->sreg1, ins->sreg2);
3834 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSD, ins->sreg1, ins->sreg2);
3838 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQB, ins->sreg1, ins->sreg2);
3841 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQW, ins->sreg1, ins->sreg2);
3844 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQD, ins->sreg1, ins->sreg2);
3847 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPEQQ, ins->sreg1, ins->sreg2);
3851 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTB, ins->sreg1, ins->sreg2);
3854 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTW, ins->sreg1, ins->sreg2);
3857 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTD, ins->sreg1, ins->sreg2);
3860 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPGTQ, ins->sreg1, ins->sreg2);
3863 case OP_PSUM_ABS_DIFF:
3864 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSADBW, ins->sreg1, ins->sreg2);
3867 case OP_UNPACK_LOWB:
3868 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLBW, ins->sreg1, ins->sreg2);
3870 case OP_UNPACK_LOWW:
3871 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLWD, ins->sreg1, ins->sreg2);
3873 case OP_UNPACK_LOWD:
3874 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLDQ, ins->sreg1, ins->sreg2);
3876 case OP_UNPACK_LOWQ:
3877 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLQDQ, ins->sreg1, ins->sreg2);
3879 case OP_UNPACK_LOWPS:
3880 x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2);
3882 case OP_UNPACK_LOWPD:
3883 x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2);
3886 case OP_UNPACK_HIGHB:
3887 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHBW, ins->sreg1, ins->sreg2);
3889 case OP_UNPACK_HIGHW:
3890 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHWD, ins->sreg1, ins->sreg2);
3892 case OP_UNPACK_HIGHD:
3893 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHDQ, ins->sreg1, ins->sreg2);
3895 case OP_UNPACK_HIGHQ:
3896 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHQDQ, ins->sreg1, ins->sreg2);
3898 case OP_UNPACK_HIGHPS:
3899 x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2);
3901 case OP_UNPACK_HIGHPD:
3902 x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2);
3906 x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSWB, ins->sreg1, ins->sreg2);
3909 x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSDW, ins->sreg1, ins->sreg2);
3912 x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKUSWB, ins->sreg1, ins->sreg2);
3915 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PACKUSDW, ins->sreg1, ins->sreg2);
3918 case OP_PADDB_SAT_UN:
3919 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSB, ins->sreg1, ins->sreg2);
3921 case OP_PSUBB_SAT_UN:
3922 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSB, ins->sreg1, ins->sreg2);
3924 case OP_PADDW_SAT_UN:
3925 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSW, ins->sreg1, ins->sreg2);
3927 case OP_PSUBW_SAT_UN:
3928 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSW, ins->sreg1, ins->sreg2);
3932 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSB, ins->sreg1, ins->sreg2);
3935 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSB, ins->sreg1, ins->sreg2);
3938 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSW, ins->sreg1, ins->sreg2);
3941 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSW, ins->sreg1, ins->sreg2);
3945 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULLW, ins->sreg1, ins->sreg2);
3948 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMULLD, ins->sreg1, ins->sreg2);
3951 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULUDQ, ins->sreg1, ins->sreg2);
3953 case OP_PMULW_HIGH_UN:
3954 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHUW, ins->sreg1, ins->sreg2);
3957 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHW, ins->sreg1, ins->sreg2);
3961 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHR, ins->dreg, ins->inst_imm);
3964 x86_sse_shift_reg_reg (code, X86_SSE_PSRLW_REG, ins->dreg, ins->sreg2);
3968 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SAR, ins->dreg, ins->inst_imm);
3971 x86_sse_shift_reg_reg (code, X86_SSE_PSRAW_REG, ins->dreg, ins->sreg2);
3975 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHL, ins->dreg, ins->inst_imm);
3978 x86_sse_shift_reg_reg (code, X86_SSE_PSLLW_REG, ins->dreg, ins->sreg2);
3982 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHR, ins->dreg, ins->inst_imm);
3985 x86_sse_shift_reg_reg (code, X86_SSE_PSRLD_REG, ins->dreg, ins->sreg2);
3989 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SAR, ins->dreg, ins->inst_imm);
3992 x86_sse_shift_reg_reg (code, X86_SSE_PSRAD_REG, ins->dreg, ins->sreg2);
3996 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHL, ins->dreg, ins->inst_imm);
3999 x86_sse_shift_reg_reg (code, X86_SSE_PSLLD_REG, ins->dreg, ins->sreg2);
4003 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHR, ins->dreg, ins->inst_imm);
4006 x86_sse_shift_reg_reg (code, X86_SSE_PSRLQ_REG, ins->dreg, ins->sreg2);
4010 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHL, ins->dreg, ins->inst_imm);
4013 x86_sse_shift_reg_reg (code, X86_SSE_PSLLQ_REG, ins->dreg, ins->sreg2);
4017 x86_movd_xreg_reg (code, ins->dreg, ins->sreg1);
4020 x86_movd_reg_xreg (code, ins->dreg, ins->sreg1);
4024 x86_movd_reg_xreg (code, ins->dreg, ins->sreg1);
4026 x86_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8);
4027 x86_widen_reg (code, ins->dreg, ins->dreg, ins->opcode == OP_EXTRACT_I1, FALSE);
4031 x86_movd_reg_xreg (code, ins->dreg, ins->sreg1);
4033 x86_shift_reg_imm (code, X86_SHR, ins->dreg, 16);
4034 x86_widen_reg (code, ins->dreg, ins->dreg, ins->opcode == OP_EXTRACT_I2, TRUE);
4038 x86_sse_alu_pd_membase_reg (code, X86_SSE_MOVHPD_MEMBASE_REG, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1);
4040 x86_sse_alu_sd_membase_reg (code, X86_SSE_MOVSD_MEMBASE_REG, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1);
4041 x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE);
4045 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->sreg1, ins->sreg2, ins->inst_c0);
4047 case OP_EXTRACTX_U2:
4048 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PEXTRW, ins->dreg, ins->sreg1, ins->inst_c0);
4050 case OP_INSERTX_U1_SLOW:
4051 /*sreg1 is the extracted ireg (scratch)
4052 /sreg2 is the to be inserted ireg (scratch)
4053 /dreg is the xreg to receive the value*/
4055 /*clear the bits from the extracted word*/
4056 x86_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00);
4057 /*shift the value to insert if needed*/
4058 if (ins->inst_c0 & 1)
4059 x86_shift_reg_imm (code, X86_SHL, ins->sreg2, 8);
4060 /*join them together*/
4061 x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
4062 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, ins->inst_c0 / 2);
4064 case OP_INSERTX_I4_SLOW:
4065 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg2, ins->inst_c0 * 2);
4066 x86_shift_reg_imm (code, X86_SHR, ins->sreg2, 16);
4067 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1);
4070 case OP_INSERTX_R4_SLOW:
4071 x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE);
4072 /*TODO if inst_c0 == 0 use movss*/
4073 x86_sse_alu_pd_reg_membase_imm (code, X86_SSE_PINSRW, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset + 0, ins->inst_c0 * 2);
4074 x86_sse_alu_pd_reg_membase_imm (code, X86_SSE_PINSRW, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset + 2, ins->inst_c0 * 2 + 1);
4076 case OP_INSERTX_R8_SLOW:
4077 x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE);
4079 x86_sse_alu_pd_reg_membase (code, X86_SSE_MOVHPD_REG_MEMBASE, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset);
4081 x86_sse_alu_pd_reg_membase (code, X86_SSE_MOVSD_REG_MEMBASE, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset);
4084 case OP_STOREX_MEMBASE_REG:
4085 case OP_STOREX_MEMBASE:
4086 x86_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
4088 case OP_LOADX_MEMBASE:
4089 x86_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
4091 case OP_LOADX_ALIGNED_MEMBASE:
4092 x86_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
4094 case OP_STOREX_ALIGNED_MEMBASE_REG:
4095 x86_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
4097 case OP_STOREX_NTA_MEMBASE_REG:
4098 x86_sse_alu_reg_membase (code, X86_SSE_MOVNTPS, ins->dreg, ins->sreg1, ins->inst_offset);
4100 case OP_PREFETCH_MEMBASE:
4101 x86_sse_alu_reg_membase (code, X86_SSE_PREFETCH, ins->backend.arg_info, ins->sreg1, ins->inst_offset);
4105 /*FIXME the peephole pass should have killed this*/
4106 if (ins->dreg != ins->sreg1)
4107 x86_movaps_reg_reg (code, ins->dreg, ins->sreg1);
4110 x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->dreg, ins->dreg);
4112 case OP_ICONV_TO_R8_RAW:
4113 x86_mov_membase_reg (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1, 4);
4114 x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE);
4117 case OP_FCONV_TO_R8_X:
4118 x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE);
4119 x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset);
4122 case OP_XCONV_R8_TO_I4:
4123 x86_cvttsd2si (code, ins->dreg, ins->sreg1);
4124 switch (ins->backend.source_opcode) {
4125 case OP_FCONV_TO_I1:
4126 x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
4128 case OP_FCONV_TO_U1:
4129 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4131 case OP_FCONV_TO_I2:
4132 x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
4134 case OP_FCONV_TO_U2:
4135 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
4141 /*FIXME this causes a partial register stall, maybe it would not be that bad to use shift + mask + or*/
4142 /*The +4 is to get a mov ?h, ?l over the same reg.*/
4143 x86_mov_reg_reg (code, ins->dreg + 4, ins->dreg, 1);
4144 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, 0);
4145 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, 1);
4146 x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0);
4149 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, 0);
4150 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, 1);
4151 x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0);
4154 x86_movd_xreg_reg (code, ins->dreg, ins->sreg1);
4155 x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0);
4158 x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE);
4159 x86_movd_xreg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset);
4160 x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0);
4163 x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE);
4164 x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset);
4165 x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0x44);
4169 g_warning ("unknown opcode %s\n", mono_inst_name (ins->opcode));
4170 g_assert_not_reached ();
4173 if (G_UNLIKELY ((code - cfg->native_code - offset) > max_len)) {
4174 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4175 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4176 g_assert_not_reached ();
4182 cfg->code_len = code - cfg->native_code;
4185 #endif /* DISABLE_JIT */
4188 mono_arch_register_lowlevel_calls (void)
4193 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4195 MonoJumpInfo *patch_info;
4196 gboolean compile_aot = !run_cctors;
4198 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4199 unsigned char *ip = patch_info->ip.i + code;
4200 const unsigned char *target;
4202 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4205 switch (patch_info->type) {
4206 case MONO_PATCH_INFO_BB:
4207 case MONO_PATCH_INFO_LABEL:
4210 /* No need to patch these */
4215 switch (patch_info->type) {
4216 case MONO_PATCH_INFO_IP:
4217 *((gconstpointer *)(ip)) = target;
4219 case MONO_PATCH_INFO_CLASS_INIT: {
4221 /* Might already been changed to a nop */
4222 x86_call_code (code, 0);
4223 x86_patch (ip, target);
4226 case MONO_PATCH_INFO_ABS:
4227 case MONO_PATCH_INFO_METHOD:
4228 case MONO_PATCH_INFO_METHOD_JUMP:
4229 case MONO_PATCH_INFO_INTERNAL_METHOD:
4230 case MONO_PATCH_INFO_BB:
4231 case MONO_PATCH_INFO_LABEL:
4232 case MONO_PATCH_INFO_RGCTX_FETCH:
4233 case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
4234 case MONO_PATCH_INFO_MONITOR_ENTER:
4235 case MONO_PATCH_INFO_MONITOR_EXIT:
4236 x86_patch (ip, target);
4238 case MONO_PATCH_INFO_NONE:
4241 guint32 offset = mono_arch_get_patch_offset (ip);
4242 *((gconstpointer *)(ip + offset)) = target;
4250 mono_arch_emit_prolog (MonoCompile *cfg)
4252 MonoMethod *method = cfg->method;
4254 MonoMethodSignature *sig;
4256 int alloc_size, pos, max_offset, i, cfa_offset;
4259 cfg->code_size = MAX (mono_method_get_header (method)->code_size * 4, 10240);
4261 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4262 cfg->code_size += 512;
4264 code = cfg->native_code = g_malloc (cfg->code_size);
4266 /* Offset between RSP and the CFA */
4270 cfa_offset = sizeof (gpointer);
4271 mono_emit_unwind_op_def_cfa (cfg, code, X86_ESP, sizeof (gpointer));
4272 // IP saved at CFA - 4
4273 /* There is no IP reg on x86 */
4274 mono_emit_unwind_op_offset (cfg, code, X86_NREG, -cfa_offset);
4276 x86_push_reg (code, X86_EBP);
4277 cfa_offset += sizeof (gpointer);
4278 mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
4279 mono_emit_unwind_op_offset (cfg, code, X86_EBP, - cfa_offset);
4280 x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
4281 mono_emit_unwind_op_def_cfa_reg (cfg, code, X86_EBP);
4283 alloc_size = cfg->stack_offset;
4286 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4287 /* Might need to attach the thread to the JIT or change the domain for the callback */
4288 if (appdomain_tls_offset != -1 && lmf_tls_offset != -1) {
4289 guint8 *buf, *no_domain_branch;
4291 code = mono_x86_emit_tls_get (code, X86_EAX, appdomain_tls_offset);
4292 x86_alu_reg_imm (code, X86_CMP, X86_EAX, GPOINTER_TO_UINT (cfg->domain));
4293 no_domain_branch = code;
4294 x86_branch8 (code, X86_CC_NE, 0, 0);
4295 code = mono_x86_emit_tls_get ( code, X86_EAX, lmf_tls_offset);
4296 x86_test_reg_reg (code, X86_EAX, X86_EAX);
4298 x86_branch8 (code, X86_CC_NE, 0, 0);
4299 x86_patch (no_domain_branch, code);
4300 x86_push_imm (code, cfg->domain);
4301 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
4302 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
4303 x86_patch (buf, code);
4304 #ifdef PLATFORM_WIN32
4305 /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
4306 /* FIXME: Add a separate key for LMF to avoid this */
4307 x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
4311 g_assert (!cfg->compile_aot);
4312 x86_push_imm (code, cfg->domain);
4313 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
4314 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
4318 if (method->save_lmf) {
4319 pos += sizeof (MonoLMF);
4321 /* save the current IP */
4322 mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4323 x86_push_imm_template (code);
4324 cfa_offset += sizeof (gpointer);
4326 /* save all caller saved regs */
4327 x86_push_reg (code, X86_EBP);
4328 cfa_offset += sizeof (gpointer);
4329 x86_push_reg (code, X86_ESI);
4330 cfa_offset += sizeof (gpointer);
4331 mono_emit_unwind_op_offset (cfg, code, X86_ESI, - cfa_offset);
4332 x86_push_reg (code, X86_EDI);
4333 cfa_offset += sizeof (gpointer);
4334 mono_emit_unwind_op_offset (cfg, code, X86_EDI, - cfa_offset);
4335 x86_push_reg (code, X86_EBX);
4336 cfa_offset += sizeof (gpointer);
4337 mono_emit_unwind_op_offset (cfg, code, X86_EBX, - cfa_offset);
4339 if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
4341 * Optimized version which uses the mono_lmf TLS variable instead of indirection
4342 * through the mono_lmf_addr TLS variable.
4344 /* %eax = previous_lmf */
4345 x86_prefix (code, X86_GS_PREFIX);
4346 x86_mov_reg_mem (code, X86_EAX, lmf_tls_offset, 4);
4347 /* skip esp + method_info + lmf */
4348 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 12);
4349 /* push previous_lmf */
4350 x86_push_reg (code, X86_EAX);
4352 x86_prefix (code, X86_GS_PREFIX);
4353 x86_mov_mem_reg (code, lmf_tls_offset, X86_ESP, 4);
4355 /* get the address of lmf for the current thread */
4357 * This is performance critical so we try to use some tricks to make
4361 if (lmf_addr_tls_offset != -1) {
4362 /* Load lmf quicky using the GS register */
4363 code = mono_x86_emit_tls_get (code, X86_EAX, lmf_addr_tls_offset);
4364 #ifdef PLATFORM_WIN32
4365 /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
4366 /* FIXME: Add a separate key for LMF to avoid this */
4367 x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
4370 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_get_lmf_addr");
4373 /* Skip esp + method info */
4374 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
4377 x86_push_reg (code, X86_EAX);
4378 /* push *lfm (previous_lmf) */
4379 x86_push_membase (code, X86_EAX, 0);
4381 x86_mov_membase_reg (code, X86_EAX, 0, X86_ESP, 4);
4385 if (cfg->used_int_regs & (1 << X86_EBX)) {
4386 x86_push_reg (code, X86_EBX);
4388 cfa_offset += sizeof (gpointer);
4389 mono_emit_unwind_op_offset (cfg, code, X86_EBX, - cfa_offset);
4392 if (cfg->used_int_regs & (1 << X86_EDI)) {
4393 x86_push_reg (code, X86_EDI);
4395 cfa_offset += sizeof (gpointer);
4396 mono_emit_unwind_op_offset (cfg, code, X86_EDI, - cfa_offset);
4399 if (cfg->used_int_regs & (1 << X86_ESI)) {
4400 x86_push_reg (code, X86_ESI);
4402 cfa_offset += sizeof (gpointer);
4403 mono_emit_unwind_op_offset (cfg, code, X86_ESI, - cfa_offset);
4409 /* the original alloc_size is already aligned: there is %ebp and retip pushed, so realign */
4410 if (mono_do_x86_stack_align) {
4411 int tot = alloc_size + pos + 4 + 4; /* ret ip + ebp */
4412 tot &= MONO_ARCH_FRAME_ALIGNMENT - 1;
4413 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - tot;
4417 /* See mono_emit_stack_alloc */
4418 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
4419 guint32 remaining_size = alloc_size;
4420 while (remaining_size >= 0x1000) {
4421 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
4422 x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
4423 remaining_size -= 0x1000;
4426 x86_alu_reg_imm (code, X86_SUB, X86_ESP, remaining_size);
4428 x86_alu_reg_imm (code, X86_SUB, X86_ESP, alloc_size);
4432 if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED ||
4433 cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE) {
4434 x86_alu_reg_imm (code, X86_AND, X86_ESP, -MONO_ARCH_FRAME_ALIGNMENT);
4437 #if DEBUG_STACK_ALIGNMENT
4438 /* check the stack is aligned */
4439 if (method->wrapper_type == MONO_WRAPPER_NONE) {
4440 x86_mov_reg_reg (code, X86_ECX, X86_ESP, 4);
4441 x86_alu_reg_imm (code, X86_AND, X86_ECX, MONO_ARCH_FRAME_ALIGNMENT - 1);
4442 x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
4443 x86_branch_disp (code, X86_CC_EQ, 3, FALSE);
4444 x86_breakpoint (code);
4448 /* compute max_offset in order to use short forward jumps */
4450 if (cfg->opt & MONO_OPT_BRANCH) {
4451 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4453 bb->max_offset = max_offset;
4455 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4457 /* max alignment for loops */
4458 if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
4459 max_offset += LOOP_ALIGNMENT;
4461 MONO_BB_FOR_EACH_INS (bb, ins) {
4462 if (ins->opcode == OP_LABEL)
4463 ins->inst_c1 = max_offset;
4465 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4470 /* store runtime generic context */
4471 if (cfg->rgctx_var) {
4472 g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET && cfg->rgctx_var->inst_basereg == X86_EBP);
4474 x86_mov_membase_reg (code, X86_EBP, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, 4);
4477 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4478 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4480 /* load arguments allocated to register from the stack */
4481 sig = mono_method_signature (method);
4484 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4485 inst = cfg->args [pos];
4486 if (inst->opcode == OP_REGVAR) {
4487 x86_mov_reg_membase (code, inst->dreg, X86_EBP, inst->inst_offset, 4);
4488 if (cfg->verbose_level > 2)
4489 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4494 cfg->code_len = code - cfg->native_code;
4496 g_assert (cfg->code_len < cfg->code_size);
4502 mono_arch_emit_epilog (MonoCompile *cfg)
4504 MonoMethod *method = cfg->method;
4505 MonoMethodSignature *sig = mono_method_signature (method);
4507 guint32 stack_to_pop;
4509 int max_epilog_size = 16;
4512 if (cfg->method->save_lmf)
4513 max_epilog_size += 128;
4515 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4516 cfg->code_size *= 2;
4517 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4518 mono_jit_stats.code_reallocs++;
4521 code = cfg->native_code + cfg->code_len;
4523 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4524 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4526 /* the code restoring the registers must be kept in sync with OP_JMP */
4529 if (method->save_lmf) {
4530 gint32 prev_lmf_reg;
4531 gint32 lmf_offset = -sizeof (MonoLMF);
4533 /* check if we need to restore protection of the stack after a stack overflow */
4534 if (mono_get_jit_tls_offset () != -1) {
4536 code = mono_x86_emit_tls_get (code, X86_ECX, mono_get_jit_tls_offset ());
4537 /* we load the value in a separate instruction: this mechanism may be
4538 * used later as a safer way to do thread interruption
4540 x86_mov_reg_membase (code, X86_ECX, X86_ECX, G_STRUCT_OFFSET (MonoJitTlsData, restore_stack_prot), 4);
4541 x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
4543 x86_branch8 (code, X86_CC_Z, 0, FALSE);
4544 /* note that the call trampoline will preserve eax/edx */
4545 x86_call_reg (code, X86_ECX);
4546 x86_patch (patch, code);
4548 /* FIXME: maybe save the jit tls in the prolog */
4550 if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
4552 * Optimized version which uses the mono_lmf TLS variable instead of indirection
4553 * through the mono_lmf_addr TLS variable.
4555 /* reg = previous_lmf */
4556 x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
4558 /* lmf = previous_lmf */
4559 x86_prefix (code, X86_GS_PREFIX);
4560 x86_mov_mem_reg (code, lmf_tls_offset, X86_ECX, 4);
4562 /* Find a spare register */
4563 switch (mini_type_get_underlying_type (cfg->generic_sharing_context, sig->ret)->type) {
4566 prev_lmf_reg = X86_EDI;
4567 cfg->used_int_regs |= (1 << X86_EDI);
4570 prev_lmf_reg = X86_EDX;
4574 /* reg = previous_lmf */
4575 x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
4578 x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 4);
4580 /* *(lmf) = previous_lmf */
4581 x86_mov_membase_reg (code, X86_ECX, 0, prev_lmf_reg, 4);
4584 /* restore caller saved regs */
4585 if (cfg->used_int_regs & (1 << X86_EBX)) {
4586 x86_mov_reg_membase (code, X86_EBX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), 4);
4589 if (cfg->used_int_regs & (1 << X86_EDI)) {
4590 x86_mov_reg_membase (code, X86_EDI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), 4);
4592 if (cfg->used_int_regs & (1 << X86_ESI)) {
4593 x86_mov_reg_membase (code, X86_ESI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), 4);
4596 /* EBP is restored by LEAVE */
4598 if (cfg->used_int_regs & (1 << X86_EBX)) {
4601 if (cfg->used_int_regs & (1 << X86_EDI)) {
4604 if (cfg->used_int_regs & (1 << X86_ESI)) {
4609 x86_lea_membase (code, X86_ESP, X86_EBP, pos);
4611 if (cfg->used_int_regs & (1 << X86_ESI)) {
4612 x86_pop_reg (code, X86_ESI);
4614 if (cfg->used_int_regs & (1 << X86_EDI)) {
4615 x86_pop_reg (code, X86_EDI);
4617 if (cfg->used_int_regs & (1 << X86_EBX)) {
4618 x86_pop_reg (code, X86_EBX);
4622 /* Load returned vtypes into registers if needed */
4623 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
4624 if (cinfo->ret.storage == ArgValuetypeInReg) {
4625 for (quad = 0; quad < 2; quad ++) {
4626 switch (cinfo->ret.pair_storage [quad]) {
4628 x86_mov_reg_membase (code, cinfo->ret.pair_regs [quad], cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), 4);
4630 case ArgOnFloatFpStack:
4631 x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), FALSE);
4633 case ArgOnDoubleFpStack:
4634 x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), TRUE);
4639 g_assert_not_reached ();
4646 if (CALLCONV_IS_STDCALL (sig)) {
4647 MonoJitArgumentInfo *arg_info = alloca (sizeof (MonoJitArgumentInfo) * (sig->param_count + 1));
4649 stack_to_pop = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
4650 } else if (MONO_TYPE_ISSTRUCT (mono_method_signature (cfg->method)->ret) && (cinfo->ret.storage == ArgOnStack))
4656 x86_ret_imm (code, stack_to_pop);
4660 cfg->code_len = code - cfg->native_code;
4662 g_assert (cfg->code_len < cfg->code_size);
4666 mono_arch_emit_exceptions (MonoCompile *cfg)
4668 MonoJumpInfo *patch_info;
4671 MonoClass *exc_classes [16];
4672 guint8 *exc_throw_start [16], *exc_throw_end [16];
4676 /* Compute needed space */
4677 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4678 if (patch_info->type == MONO_PATCH_INFO_EXC)
4683 * make sure we have enough space for exceptions
4684 * 16 is the size of two push_imm instructions and a call
4686 if (cfg->compile_aot)
4687 code_size = exc_count * 32;
4689 code_size = exc_count * 16;
4691 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4692 cfg->code_size *= 2;
4693 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4694 mono_jit_stats.code_reallocs++;
4697 code = cfg->native_code + cfg->code_len;
4700 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4701 switch (patch_info->type) {
4702 case MONO_PATCH_INFO_EXC: {
4703 MonoClass *exc_class;
4707 x86_patch (patch_info->ip.i + cfg->native_code, code);
4709 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4710 g_assert (exc_class);
4711 throw_ip = patch_info->ip.i;
4713 /* Find a throw sequence for the same exception class */
4714 for (i = 0; i < nthrows; ++i)
4715 if (exc_classes [i] == exc_class)
4718 x86_push_imm (code, (exc_throw_end [i] - cfg->native_code) - throw_ip);
4719 x86_jump_code (code, exc_throw_start [i]);
4720 patch_info->type = MONO_PATCH_INFO_NONE;
4725 /* Compute size of code following the push <OFFSET> */
4728 if ((code - cfg->native_code) - throw_ip < 126 - size) {
4729 /* Use the shorter form */
4731 x86_push_imm (code, 0);
4735 x86_push_imm (code, 0xf0f0f0f0);
4740 exc_classes [nthrows] = exc_class;
4741 exc_throw_start [nthrows] = code;
4744 x86_push_imm (code, exc_class->type_token - MONO_TOKEN_TYPE_DEF);
4745 patch_info->data.name = "mono_arch_throw_corlib_exception";
4746 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4747 patch_info->ip.i = code - cfg->native_code;
4748 x86_call_code (code, 0);
4749 x86_push_imm (buf, (code - cfg->native_code) - throw_ip);
4754 exc_throw_end [nthrows] = code;
4766 cfg->code_len = code - cfg->native_code;
4768 g_assert (cfg->code_len < cfg->code_size);
4772 mono_arch_flush_icache (guint8 *code, gint size)
4778 mono_arch_flush_register_windows (void)
4783 mono_arch_is_inst_imm (gint64 imm)
4789 * Support for fast access to the thread-local lmf structure using the GS
4790 * segment register on NPTL + kernel 2.6.x.
4793 static gboolean tls_offset_inited = FALSE;
4796 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4798 if (!tls_offset_inited) {
4799 if (!getenv ("MONO_NO_TLS")) {
4800 #ifdef PLATFORM_WIN32
4802 * We need to init this multiple times, since when we are first called, the key might not
4803 * be initialized yet.
4805 appdomain_tls_offset = mono_domain_get_tls_key ();
4806 lmf_tls_offset = mono_get_jit_tls_key ();
4807 thread_tls_offset = mono_thread_get_tls_key ();
4809 /* Only 64 tls entries can be accessed using inline code */
4810 if (appdomain_tls_offset >= 64)
4811 appdomain_tls_offset = -1;
4812 if (lmf_tls_offset >= 64)
4813 lmf_tls_offset = -1;
4814 if (thread_tls_offset >= 64)
4815 thread_tls_offset = -1;
4818 optimize_for_xen = access ("/proc/xen", F_OK) == 0;
4820 tls_offset_inited = TRUE;
4821 appdomain_tls_offset = mono_domain_get_tls_offset ();
4822 lmf_tls_offset = mono_get_lmf_tls_offset ();
4823 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4824 thread_tls_offset = mono_thread_get_tls_offset ();
4831 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4835 #ifdef MONO_ARCH_HAVE_IMT
4837 // Linear handler, the bsearch head compare is shorter
4838 //[2 + 4] x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
4839 //[1 + 1] x86_branch8(inst,cond,imm,is_signed)
4840 // x86_patch(ins,target)
4841 //[1 + 5] x86_jump_mem(inst,mem)
4844 #define BR_SMALL_SIZE 2
4845 #define BR_LARGE_SIZE 5
4846 #define JUMP_IMM_SIZE 6
4847 #define ENABLE_WRONG_METHOD_CHECK 0
4850 imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
4852 int i, distance = 0;
4853 for (i = start; i < target; ++i)
4854 distance += imt_entries [i]->chunk_size;
4859 * LOCKING: called with the domain lock held
4862 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4863 gpointer fail_tramp)
4867 guint8 *code, *start;
4869 for (i = 0; i < count; ++i) {
4870 MonoIMTCheckItem *item = imt_entries [i];
4871 if (item->is_equals) {
4872 if (item->check_target_idx) {
4873 if (!item->compare_done)
4874 item->chunk_size += CMP_SIZE;
4875 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
4878 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + JUMP_IMM_SIZE * 2;
4880 item->chunk_size += JUMP_IMM_SIZE;
4881 #if ENABLE_WRONG_METHOD_CHECK
4882 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
4887 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
4888 imt_entries [item->check_target_idx]->compare_done = TRUE;
4890 size += item->chunk_size;
4893 code = mono_method_alloc_generic_virtual_thunk (domain, size);
4895 code = mono_code_manager_reserve (domain->code_mp, size);
4897 for (i = 0; i < count; ++i) {
4898 MonoIMTCheckItem *item = imt_entries [i];
4899 item->code_target = code;
4900 if (item->is_equals) {
4901 if (item->check_target_idx) {
4902 if (!item->compare_done)
4903 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
4904 item->jmp_code = code;
4905 x86_branch8 (code, X86_CC_NE, 0, FALSE);
4907 x86_jump_code (code, item->value.target_code);
4909 x86_jump_mem (code, & (vtable->vtable [item->value.vtable_slot]));
4912 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
4913 item->jmp_code = code;
4914 x86_branch8 (code, X86_CC_NE, 0, FALSE);
4915 x86_jump_code (code, item->value.target_code);
4916 x86_patch (item->jmp_code, code);
4917 x86_jump_code (code, fail_tramp);
4918 item->jmp_code = NULL;
4920 /* enable the commented code to assert on wrong method */
4921 #if ENABLE_WRONG_METHOD_CHECK
4922 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
4923 item->jmp_code = code;
4924 x86_branch8 (code, X86_CC_NE, 0, FALSE);
4926 x86_jump_mem (code, & (vtable->vtable [item->value.vtable_slot]));
4927 #if ENABLE_WRONG_METHOD_CHECK
4928 x86_patch (item->jmp_code, code);
4929 x86_breakpoint (code);
4930 item->jmp_code = NULL;
4935 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
4936 item->jmp_code = code;
4937 if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
4938 x86_branch8 (code, X86_CC_GE, 0, FALSE);
4940 x86_branch32 (code, X86_CC_GE, 0, FALSE);
4943 /* patch the branches to get to the target items */
4944 for (i = 0; i < count; ++i) {
4945 MonoIMTCheckItem *item = imt_entries [i];
4946 if (item->jmp_code) {
4947 if (item->check_target_idx) {
4948 x86_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4954 mono_stats.imt_thunks_size += code - start;
4955 g_assert (code - start <= size);
4960 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
4962 return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
4966 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4968 MonoMethodSignature *sig = mono_method_signature (method);
4969 CallInfo *cinfo = get_call_info (gsctx, NULL, sig, FALSE);
4970 int this_argument_offset;
4971 MonoObject *this_argument;
4974 * this is the offset of the this arg from esp as saved at the start of
4975 * mono_arch_create_trampoline_code () in tramp-x86.c.
4977 this_argument_offset = 5;
4978 if (MONO_TYPE_ISSTRUCT (sig->ret) && (cinfo->ret.storage == ArgOnStack))
4979 this_argument_offset++;
4981 this_argument = * (MonoObject**) (((guint8*) regs [X86_ESP]) + this_argument_offset * sizeof (gpointer));
4984 return this_argument;
4989 mono_arch_find_static_call_vtable (gpointer *regs, guint8 *code)
4991 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
4995 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4997 MonoInst *ins = NULL;
5000 if (cmethod->klass == mono_defaults.math_class) {
5001 if (strcmp (cmethod->name, "Sin") == 0) {
5003 } else if (strcmp (cmethod->name, "Cos") == 0) {
5005 } else if (strcmp (cmethod->name, "Tan") == 0) {
5007 } else if (strcmp (cmethod->name, "Atan") == 0) {
5009 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5011 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5013 } else if (strcmp (cmethod->name, "Round") == 0 && fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) {
5018 MONO_INST_NEW (cfg, ins, opcode);
5019 ins->type = STACK_R8;
5020 ins->dreg = mono_alloc_freg (cfg);
5021 ins->sreg1 = args [0]->dreg;
5022 MONO_ADD_INS (cfg->cbb, ins);
5025 if (cfg->opt & MONO_OPT_CMOV) {
5028 if (strcmp (cmethod->name, "Min") == 0) {
5029 if (fsig->params [0]->type == MONO_TYPE_I4)
5031 } else if (strcmp (cmethod->name, "Max") == 0) {
5032 if (fsig->params [0]->type == MONO_TYPE_I4)
5037 MONO_INST_NEW (cfg, ins, opcode);
5038 ins->type = STACK_I4;
5039 ins->dreg = mono_alloc_ireg (cfg);
5040 ins->sreg1 = args [0]->dreg;
5041 ins->sreg2 = args [1]->dreg;
5042 MONO_ADD_INS (cfg->cbb, ins);
5047 /* OP_FREM is not IEEE compatible */
5048 else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
5049 MONO_INST_NEW (cfg, ins, OP_FREM);
5050 ins->inst_i0 = args [0];
5051 ins->inst_i1 = args [1];
5060 mono_arch_print_tree (MonoInst *tree, int arity)
5065 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5071 if (appdomain_tls_offset == -1)
5074 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5075 ins->inst_offset = appdomain_tls_offset;
5079 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
5083 if (thread_tls_offset == -1)
5086 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5087 ins->inst_offset = thread_tls_offset;
5092 mono_arch_get_patch_offset (guint8 *code)
5094 if ((code [0] == 0x8b) && (x86_modrm_mod (code [1]) == 0x2))
5096 else if ((code [0] == 0xba))
5098 else if ((code [0] == 0x68))
5101 else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x6))
5102 /* push <OFFSET>(<REG>) */
5104 else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x2))
5105 /* call *<OFFSET>(<REG>) */
5107 else if ((code [0] == 0xdd) || (code [0] == 0xd9))
5110 else if ((code [0] == 0x58) && (code [1] == 0x05))
5111 /* pop %eax; add <OFFSET>, %eax */
5113 else if ((code [0] >= 0x58) && (code [0] <= 0x58 + X86_NREG) && (code [1] == 0x81))
5114 /* pop <REG>; add <OFFSET>, <REG> */
5116 else if ((code [0] >= 0xb8) && (code [0] < 0xb8 + 8))
5117 /* mov <REG>, imm */
5120 g_assert_not_reached ();
5126 * mono_breakpoint_clean_code:
5128 * Copy @size bytes from @code - @offset to the buffer @buf. If the debugger inserted software
5129 * breakpoints in the original code, they are removed in the copy.
5131 * Returns TRUE if no sw breakpoint was present.
5134 mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size)
5137 gboolean can_write = TRUE;
5139 * If method_start is non-NULL we need to perform bound checks, since we access memory
5140 * at code - offset we could go before the start of the method and end up in a different
5141 * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes
5144 if (!method_start || code - offset >= method_start) {
5145 memcpy (buf, code - offset, size);
5147 int diff = code - method_start;
5148 memset (buf, 0, size);
5149 memcpy (buf + offset - diff, method_start, diff + size - offset);
5152 for (i = 0; i < MONO_BREAKPOINT_ARRAY_SIZE; ++i) {
5153 int idx = mono_breakpoint_info_index [i];
5157 ptr = mono_breakpoint_info [idx].address;
5158 if (ptr >= code && ptr < code + size) {
5159 guint8 saved_byte = mono_breakpoint_info [idx].saved_byte;
5161 /*g_print ("patching %p with 0x%02x (was: 0x%02x)\n", ptr, saved_byte, buf [ptr - code]);*/
5162 buf [ptr - code] = saved_byte;
5169 mono_arch_get_vcall_slot (guint8 *code, gpointer *regs, int *displacement)
5175 mono_breakpoint_clean_code (NULL, code, 8, buf, sizeof (buf));
5180 /* go to the start of the call instruction
5182 * address_byte = (m << 6) | (o << 3) | reg
5183 * call opcode: 0xff address_byte displacement
5185 * 0xff m=2,o=2 imm32
5190 * A given byte sequence can match more than case here, so we have to be
5191 * really careful about the ordering of the cases. Longer sequences
5194 if ((code [-2] == 0x8b) && (x86_modrm_mod (code [-1]) == 0x2) && (code [4] == 0xff) && (x86_modrm_reg (code [5]) == 0x2) && (x86_modrm_mod (code [5]) == 0x0)) {
5196 * This is an interface call
5197 * 8b 80 0c e8 ff ff mov 0xffffe80c(%eax),%eax
5198 * ff 10 call *(%eax)
5200 reg = x86_modrm_rm (code [5]);
5202 #ifdef MONO_ARCH_HAVE_IMT
5203 } else if ((code [-2] == 0xba) && (code [3] == 0xff) && (x86_modrm_mod (code [4]) == 1) && (x86_modrm_reg (code [4]) == 2) && ((signed char)code [5] < 0)) {
5204 /* IMT-based interface calls: with MONO_ARCH_IMT_REG == edx
5205 * ba 14 f8 28 08 mov $0x828f814,%edx
5206 * ff 50 fc call *0xfffffffc(%eax)
5208 reg = code [4] & 0x07;
5209 disp = (signed char)code [5];
5211 } else if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
5212 reg = code [4] & 0x07;
5213 disp = (signed char)code [5];
5215 if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
5216 reg = code [1] & 0x07;
5217 disp = *((gint32*)(code + 2));
5218 } else if ((code [1] == 0xe8)) {
5220 } else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
5222 * This is a interface call
5223 * 8b 40 30 mov 0x30(%eax),%eax
5224 * ff 10 call *(%eax)
5227 reg = code [5] & 0x07;
5233 *displacement = disp;
5238 mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
5242 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
5245 return (gpointer*)((char*)vt + displacement);
5249 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig,
5250 gssize *regs, guint8 *code)
5252 guint32 esp = regs [X86_ESP];
5257 gsctx = mono_get_generic_context_from_code (code);
5258 cinfo = get_call_info (gsctx, NULL, sig, FALSE);
5261 * The stack looks like:
5264 * <possible vtype return address>
5266 * <4 pointers pushed by mono_arch_create_trampoline_code ()>
5268 res = (((MonoObject**)esp) [5 + (cinfo->args [0].offset / 4)]);
5273 #define MAX_ARCH_DELEGATE_PARAMS 10
5276 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
5278 guint8 *code, *start;
5280 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
5283 /* FIXME: Support more cases */
5284 if (MONO_TYPE_ISSTRUCT (sig->ret))
5288 * The stack contains:
5294 static guint8* cached = NULL;
5298 start = code = mono_global_codeman_reserve (64);
5300 /* Replace the this argument with the target */
5301 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
5302 x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, target), 4);
5303 x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4);
5304 x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
5306 g_assert ((code - start) < 64);
5308 mono_debug_add_delegate_trampoline (start, code - start);
5310 mono_memory_barrier ();
5314 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
5316 /* 8 for mov_reg and jump, plus 8 for each parameter */
5317 int code_reserve = 8 + (sig->param_count * 8);
5319 for (i = 0; i < sig->param_count; ++i)
5320 if (!mono_is_regsize_var (sig->params [i]))
5323 code = cache [sig->param_count];
5328 * The stack contains:
5329 * <args in reverse order>
5334 * <args in reverse order>
5337 * without unbalancing the stack.
5338 * So move each arg up a spot in the stack (overwriting un-needed 'this' arg)
5339 * and leaving original spot of first arg as placeholder in stack so
5340 * when callee pops stack everything works.
5343 start = code = mono_global_codeman_reserve (code_reserve);
5345 /* store delegate for access to method_ptr */
5346 x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4);
5349 for (i = 0; i < sig->param_count; ++i) {
5350 x86_mov_reg_membase (code, X86_EAX, X86_ESP, (i+2)*4, 4);
5351 x86_mov_membase_reg (code, X86_ESP, (i+1)*4, X86_EAX, 4);
5354 x86_jump_membase (code, X86_ECX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
5356 g_assert ((code - start) < code_reserve);
5358 mono_debug_add_delegate_trampoline (start, code - start);
5360 mono_memory_barrier ();
5362 cache [sig->param_count] = start;
5369 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5372 case X86_ECX: return (gpointer)ctx->ecx;
5373 case X86_EDX: return (gpointer)ctx->edx;
5374 case X86_EBP: return (gpointer)ctx->ebp;
5375 case X86_ESP: return (gpointer)ctx->esp;
5376 default: return ((gpointer)(&ctx->eax)[reg]);
5380 #ifdef MONO_ARCH_SIMD_INTRINSICS
5383 get_float_to_x_spill_area (MonoCompile *cfg)
5385 if (!cfg->fconv_to_r8_x_var) {
5386 cfg->fconv_to_r8_x_var = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
5387 cfg->fconv_to_r8_x_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
5389 return cfg->fconv_to_r8_x_var;
5393 * Convert all fconv opts that MONO_OPT_SSE2 would get wrong.
5396 mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
5399 int dreg, src_opcode;
5401 if (!(cfg->opt & MONO_OPT_SSE2) || !(cfg->opt & MONO_OPT_SIMD))
5404 switch (src_opcode = ins->opcode) {
5405 case OP_FCONV_TO_I1:
5406 case OP_FCONV_TO_U1:
5407 case OP_FCONV_TO_I2:
5408 case OP_FCONV_TO_U2:
5409 case OP_FCONV_TO_I4:
5416 /* dreg is the IREG and sreg1 is the FREG */
5417 MONO_INST_NEW (cfg, fconv, OP_FCONV_TO_R8_X);
5418 fconv->klass = NULL; /*FIXME, what can I use here as the Mono.Simd lib might not be loaded yet*/
5419 fconv->sreg1 = ins->sreg1;
5420 fconv->dreg = mono_alloc_ireg (cfg);
5421 fconv->type = STACK_VTYPE;
5422 fconv->backend.spill_var = get_float_to_x_spill_area (cfg);
5424 mono_bblock_insert_before_ins (cfg->cbb, ins, fconv);
5428 ins->opcode = OP_XCONV_R8_TO_I4;
5430 ins->klass = mono_defaults.int32_class;
5431 ins->sreg1 = fconv->dreg;
5433 ins->type = STACK_I4;
5434 ins->backend.source_opcode = src_opcode;
5438 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
5442 if (!(cfg->opt & MONO_OPT_SIMD))
5445 /*TODO move this to simd-intrinsic.c once we support sse 4.1 dword extractors since we need the runtime caps info */
5446 switch (long_ins->opcode) {
5448 vreg = long_ins->sreg1;
5450 if (long_ins->inst_c0) {
5451 MONO_INST_NEW (cfg, ins, OP_PSHUFLED);
5452 ins->klass = long_ins->klass;
5453 ins->sreg1 = long_ins->sreg1;
5455 ins->type = STACK_VTYPE;
5456 ins->dreg = vreg = alloc_ireg (cfg);
5457 MONO_ADD_INS (cfg->cbb, ins);
5460 MONO_INST_NEW (cfg, ins, OP_EXTRACT_I4);
5461 ins->klass = mono_defaults.int32_class;
5463 ins->type = STACK_I4;
5464 ins->dreg = long_ins->dreg + 1;
5465 MONO_ADD_INS (cfg->cbb, ins);
5467 MONO_INST_NEW (cfg, ins, OP_PSHUFLED);
5468 ins->klass = long_ins->klass;
5469 ins->sreg1 = long_ins->sreg1;
5470 ins->inst_c0 = long_ins->inst_c0 ? 3 : 1;
5471 ins->type = STACK_VTYPE;
5472 ins->dreg = vreg = alloc_ireg (cfg);
5473 MONO_ADD_INS (cfg->cbb, ins);
5475 MONO_INST_NEW (cfg, ins, OP_EXTRACT_I4);
5476 ins->klass = mono_defaults.int32_class;
5478 ins->type = STACK_I4;
5479 ins->dreg = long_ins->dreg + 2;
5480 MONO_ADD_INS (cfg->cbb, ins);
5482 long_ins->opcode = OP_NOP;
5484 case OP_INSERTX_I8_SLOW:
5485 MONO_INST_NEW (cfg, ins, OP_INSERTX_I4_SLOW);
5486 ins->dreg = long_ins->dreg;
5487 ins->sreg1 = long_ins->dreg;
5488 ins->sreg2 = long_ins->sreg2 + 1;
5489 ins->inst_c0 = long_ins->inst_c0 * 2;
5490 MONO_ADD_INS (cfg->cbb, ins);
5492 MONO_INST_NEW (cfg, ins, OP_INSERTX_I4_SLOW);
5493 ins->dreg = long_ins->dreg;
5494 ins->sreg1 = long_ins->dreg;
5495 ins->sreg2 = long_ins->sreg2 + 2;
5496 ins->inst_c0 = long_ins->inst_c0 * 2 + 1;
5497 MONO_ADD_INS (cfg->cbb, ins);
5499 long_ins->opcode = OP_NOP;
5502 MONO_INST_NEW (cfg, ins, OP_ICONV_TO_X);
5503 ins->dreg = long_ins->dreg;
5504 ins->sreg1 = long_ins->sreg1 + 1;
5505 ins->klass = long_ins->klass;
5506 ins->type = STACK_VTYPE;
5507 MONO_ADD_INS (cfg->cbb, ins);
5509 MONO_INST_NEW (cfg, ins, OP_INSERTX_I4_SLOW);
5510 ins->dreg = long_ins->dreg;
5511 ins->sreg1 = long_ins->dreg;
5512 ins->sreg2 = long_ins->sreg1 + 2;
5514 ins->klass = long_ins->klass;
5515 ins->type = STACK_VTYPE;
5516 MONO_ADD_INS (cfg->cbb, ins);
5518 MONO_INST_NEW (cfg, ins, OP_PSHUFLED);
5519 ins->dreg = long_ins->dreg;
5520 ins->sreg1 = long_ins->dreg;;
5521 ins->inst_c0 = 0x44; /*Magic number for swizzling (X,Y,X,Y)*/
5522 ins->klass = long_ins->klass;
5523 ins->type = STACK_VTYPE;
5524 MONO_ADD_INS (cfg->cbb, ins);
5526 long_ins->opcode = OP_NOP;