2 * mini-x86.c: x86 backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/debug-helpers.h>
20 #include <mono/metadata/threads.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/metadata/mono-debug.h>
23 #include <mono/utils/mono-math.h>
24 #include <mono/utils/mono-counters.h>
31 /* On windows, these hold the key returned by TlsAlloc () */
32 static gint lmf_tls_offset = -1;
33 static gint lmf_addr_tls_offset = -1;
34 static gint appdomain_tls_offset = -1;
35 static gint thread_tls_offset = -1;
38 static gboolean optimize_for_xen = TRUE;
40 #define optimize_for_xen 0
44 static gboolean is_win32 = TRUE;
46 static gboolean is_win32 = FALSE;
49 /* This mutex protects architecture specific caches */
50 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
51 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
52 static CRITICAL_SECTION mini_arch_mutex;
54 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
59 /* Under windows, the default pinvoke calling convention is stdcall */
60 #define CALLCONV_IS_STDCALL(sig) ((((sig)->call_convention) == MONO_CALL_STDCALL) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_DEFAULT))
62 #define CALLCONV_IS_STDCALL(sig) (((sig)->call_convention) == MONO_CALL_STDCALL)
66 mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
69 mono_arch_regname (int reg)
72 case X86_EAX: return "%eax";
73 case X86_EBX: return "%ebx";
74 case X86_ECX: return "%ecx";
75 case X86_EDX: return "%edx";
76 case X86_ESP: return "%esp";
77 case X86_EBP: return "%ebp";
78 case X86_EDI: return "%edi";
79 case X86_ESI: return "%esi";
85 mono_arch_fregname (int reg)
110 mono_arch_xregname (int reg)
151 /* Only if storage == ArgValuetypeInReg */
152 ArgStorage pair_storage [2];
161 gboolean need_stack_align;
162 guint32 stack_align_amount;
170 #define FLOAT_PARAM_REGS 0
172 static X86_Reg_No param_regs [] = { 0 };
174 #if defined(PLATFORM_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
175 #define SMALL_STRUCTS_IN_REGS
176 static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX };
180 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
182 ainfo->offset = *stack_size;
184 if (*gr >= PARAM_REGS) {
185 ainfo->storage = ArgOnStack;
186 (*stack_size) += sizeof (gpointer);
189 ainfo->storage = ArgInIReg;
190 ainfo->reg = param_regs [*gr];
196 add_general_pair (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
198 ainfo->offset = *stack_size;
200 g_assert (PARAM_REGS == 0);
202 ainfo->storage = ArgOnStack;
203 (*stack_size) += sizeof (gpointer) * 2;
207 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
209 ainfo->offset = *stack_size;
211 if (*gr >= FLOAT_PARAM_REGS) {
212 ainfo->storage = ArgOnStack;
213 (*stack_size) += is_double ? 8 : 4;
216 /* A double register */
218 ainfo->storage = ArgInDoubleSSEReg;
220 ainfo->storage = ArgInFloatSSEReg;
228 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
230 guint32 *gr, guint32 *fr, guint32 *stack_size)
235 klass = mono_class_from_mono_type (type);
236 size = mini_type_stack_size_full (gsctx, &klass->byval_arg, NULL, sig->pinvoke);
238 #ifdef SMALL_STRUCTS_IN_REGS
239 if (sig->pinvoke && is_return) {
240 MonoMarshalType *info;
243 * the exact rules are not very well documented, the code below seems to work with the
244 * code generated by gcc 3.3.3 -mno-cygwin.
246 info = mono_marshal_load_type_info (klass);
249 ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
251 /* Special case structs with only a float member */
252 if ((info->native_size == 8) && (info->num_fields == 1) && (info->fields [0].field->type->type == MONO_TYPE_R8)) {
253 ainfo->storage = ArgValuetypeInReg;
254 ainfo->pair_storage [0] = ArgOnDoubleFpStack;
257 if ((info->native_size == 4) && (info->num_fields == 1) && (info->fields [0].field->type->type == MONO_TYPE_R4)) {
258 ainfo->storage = ArgValuetypeInReg;
259 ainfo->pair_storage [0] = ArgOnFloatFpStack;
262 if ((info->native_size == 1) || (info->native_size == 2) || (info->native_size == 4) || (info->native_size == 8)) {
263 ainfo->storage = ArgValuetypeInReg;
264 ainfo->pair_storage [0] = ArgInIReg;
265 ainfo->pair_regs [0] = return_regs [0];
266 if (info->native_size > 4) {
267 ainfo->pair_storage [1] = ArgInIReg;
268 ainfo->pair_regs [1] = return_regs [1];
275 ainfo->offset = *stack_size;
276 ainfo->storage = ArgOnStack;
277 *stack_size += ALIGN_TO (size, sizeof (gpointer));
283 * Obtain information about a call according to the calling convention.
284 * For x86 ELF, see the "System V Application Binary Interface Intel386
285 * Architecture Processor Supplment, Fourth Edition" document for more
287 * For x86 win32, see ???.
290 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
294 int n = sig->hasthis + sig->param_count;
295 guint32 stack_size = 0;
299 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
301 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
308 ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
309 switch (ret_type->type) {
310 case MONO_TYPE_BOOLEAN:
321 case MONO_TYPE_FNPTR:
322 case MONO_TYPE_CLASS:
323 case MONO_TYPE_OBJECT:
324 case MONO_TYPE_SZARRAY:
325 case MONO_TYPE_ARRAY:
326 case MONO_TYPE_STRING:
327 cinfo->ret.storage = ArgInIReg;
328 cinfo->ret.reg = X86_EAX;
332 cinfo->ret.storage = ArgInIReg;
333 cinfo->ret.reg = X86_EAX;
336 cinfo->ret.storage = ArgOnFloatFpStack;
339 cinfo->ret.storage = ArgOnDoubleFpStack;
341 case MONO_TYPE_GENERICINST:
342 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
343 cinfo->ret.storage = ArgInIReg;
344 cinfo->ret.reg = X86_EAX;
348 case MONO_TYPE_VALUETYPE: {
349 guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
351 add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
352 if (cinfo->ret.storage == ArgOnStack)
353 /* The caller passes the address where the value is stored */
354 add_general (&gr, &stack_size, &cinfo->ret);
357 case MONO_TYPE_TYPEDBYREF:
358 /* Same as a valuetype with size 24 */
359 add_general (&gr, &stack_size, &cinfo->ret);
363 cinfo->ret.storage = ArgNone;
366 g_error ("Can't handle as return value 0x%x", sig->ret->type);
372 add_general (&gr, &stack_size, cinfo->args + 0);
374 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
376 fr = FLOAT_PARAM_REGS;
378 /* Emit the signature cookie just before the implicit arguments */
379 add_general (&gr, &stack_size, &cinfo->sig_cookie);
382 for (i = 0; i < sig->param_count; ++i) {
383 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
386 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
387 /* We allways pass the sig cookie on the stack for simplicity */
389 * Prevent implicit arguments + the sig cookie from being passed
393 fr = FLOAT_PARAM_REGS;
395 /* Emit the signature cookie just before the implicit arguments */
396 add_general (&gr, &stack_size, &cinfo->sig_cookie);
399 if (sig->params [i]->byref) {
400 add_general (&gr, &stack_size, ainfo);
403 ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
404 switch (ptype->type) {
405 case MONO_TYPE_BOOLEAN:
408 add_general (&gr, &stack_size, ainfo);
413 add_general (&gr, &stack_size, ainfo);
417 add_general (&gr, &stack_size, ainfo);
422 case MONO_TYPE_FNPTR:
423 case MONO_TYPE_CLASS:
424 case MONO_TYPE_OBJECT:
425 case MONO_TYPE_STRING:
426 case MONO_TYPE_SZARRAY:
427 case MONO_TYPE_ARRAY:
428 add_general (&gr, &stack_size, ainfo);
430 case MONO_TYPE_GENERICINST:
431 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
432 add_general (&gr, &stack_size, ainfo);
436 case MONO_TYPE_VALUETYPE:
437 add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
439 case MONO_TYPE_TYPEDBYREF:
440 stack_size += sizeof (MonoTypedRef);
441 ainfo->storage = ArgOnStack;
445 add_general_pair (&gr, &stack_size, ainfo);
448 add_float (&fr, &stack_size, ainfo, FALSE);
451 add_float (&fr, &stack_size, ainfo, TRUE);
454 g_error ("unexpected type 0x%x", ptype->type);
455 g_assert_not_reached ();
459 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
461 fr = FLOAT_PARAM_REGS;
463 /* Emit the signature cookie just before the implicit arguments */
464 add_general (&gr, &stack_size, &cinfo->sig_cookie);
467 if (mono_do_x86_stack_align && (stack_size % MONO_ARCH_FRAME_ALIGNMENT) != 0) {
468 cinfo->need_stack_align = TRUE;
469 cinfo->stack_align_amount = MONO_ARCH_FRAME_ALIGNMENT - (stack_size % MONO_ARCH_FRAME_ALIGNMENT);
470 stack_size += cinfo->stack_align_amount;
473 cinfo->stack_usage = stack_size;
474 cinfo->reg_usage = gr;
475 cinfo->freg_usage = fr;
480 * mono_arch_get_argument_info:
481 * @csig: a method signature
482 * @param_count: the number of parameters to consider
483 * @arg_info: an array to store the result infos
485 * Gathers information on parameters such as size, alignment and
486 * padding. arg_info should be large enought to hold param_count + 1 entries.
488 * Returns the size of the argument area on the stack.
491 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
493 int k, args_size = 0;
499 cinfo = get_call_info (NULL, NULL, csig, FALSE);
501 if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
502 args_size += sizeof (gpointer);
506 arg_info [0].offset = offset;
509 args_size += sizeof (gpointer);
513 arg_info [0].size = args_size;
515 for (k = 0; k < param_count; k++) {
516 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
518 /* ignore alignment for now */
521 args_size += pad = (align - (args_size & (align - 1))) & (align - 1);
522 arg_info [k].pad = pad;
524 arg_info [k + 1].pad = 0;
525 arg_info [k + 1].size = size;
527 arg_info [k + 1].offset = offset;
531 if (mono_do_x86_stack_align && !CALLCONV_IS_STDCALL (csig))
532 align = MONO_ARCH_FRAME_ALIGNMENT;
535 args_size += pad = (align - (args_size & (align - 1))) & (align - 1);
536 arg_info [k].pad = pad;
543 static const guchar cpuid_impl [] = {
544 0x55, /* push %ebp */
545 0x89, 0xe5, /* mov %esp,%ebp */
546 0x53, /* push %ebx */
547 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
548 0x0f, 0xa2, /* cpuid */
549 0x50, /* push %eax */
550 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
551 0x89, 0x18, /* mov %ebx,(%eax) */
552 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
553 0x89, 0x08, /* mov %ecx,(%eax) */
554 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
555 0x89, 0x10, /* mov %edx,(%eax) */
557 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
558 0x89, 0x02, /* mov %eax,(%edx) */
564 typedef void (*CpuidFunc) (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx);
567 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
571 __asm__ __volatile__ (
574 "movl %%eax, %%edx\n"
575 "xorl $0x200000, %%eax\n"
580 "xorl %%edx, %%eax\n"
581 "andl $0x200000, %%eax\n"
603 /* Have to use the code manager to get around WinXP DEP */
604 static CpuidFunc func = NULL;
607 ptr = mono_global_codeman_reserve (sizeof (cpuid_impl));
608 memcpy (ptr, cpuid_impl, sizeof (cpuid_impl));
609 func = (CpuidFunc)ptr;
611 func (id, p_eax, p_ebx, p_ecx, p_edx);
614 * We use this approach because of issues with gcc and pic code, see:
615 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
616 __asm__ __volatile__ ("cpuid"
617 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
626 * Initialize the cpu to execute managed code.
629 mono_arch_cpu_init (void)
631 /* spec compliance requires running with double precision */
635 __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
636 fpcw &= ~X86_FPCW_PRECC_MASK;
637 fpcw |= X86_FPCW_PREC_DOUBLE;
638 __asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw));
639 __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
641 _control87 (_PC_53, MCW_PC);
646 * Initialize architecture specific code.
649 mono_arch_init (void)
651 InitializeCriticalSection (&mini_arch_mutex);
655 * Cleanup architecture specific code.
658 mono_arch_cleanup (void)
660 DeleteCriticalSection (&mini_arch_mutex);
664 * This function returns the optimizations supported on this cpu.
667 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
669 int eax, ebx, ecx, edx;
673 /* Feature Flags function, flags returned in EDX. */
674 if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
675 if (edx & (1 << 15)) {
676 opts |= MONO_OPT_CMOV;
678 opts |= MONO_OPT_FCMOV;
680 *exclude_mask |= MONO_OPT_FCMOV;
682 *exclude_mask |= MONO_OPT_CMOV;
684 opts |= MONO_OPT_SSE2;
686 *exclude_mask |= MONO_OPT_SSE2;
688 #ifdef MONO_ARCH_SIMD_INTRINSICS
689 /*SIMD intrinsics require at least SSE2.*/
690 if (!(opts & MONO_OPT_SSE2))
691 *exclude_mask |= MONO_OPT_SIMD;
698 * This function test for all SSE functions supported.
700 * Returns a bitmask corresponding to all supported versions.
702 * TODO detect other versions like SSE4a.
705 mono_arch_cpu_enumerate_simd_versions (void)
707 int eax, ebx, ecx, edx;
708 guint32 sse_opts = 0;
710 if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
712 sse_opts |= 1 << SIMD_VERSION_SSE1;
714 sse_opts |= 1 << SIMD_VERSION_SSE2;
716 sse_opts |= 1 << SIMD_VERSION_SSE3;
718 sse_opts |= 1 << SIMD_VERSION_SSSE3;
720 sse_opts |= 1 << SIMD_VERSION_SSE41;
722 sse_opts |= 1 << SIMD_VERSION_SSE42;
728 * Determine whenever the trap whose info is in SIGINFO is caused by
732 mono_arch_is_int_overflow (void *sigctx, void *info)
737 mono_arch_sigctx_to_monoctx (sigctx, &ctx);
739 ip = (guint8*)ctx.eip;
741 if ((ip [0] == 0xf7) && (x86_modrm_mod (ip [1]) == 0x3) && (x86_modrm_reg (ip [1]) == 0x7)) {
745 switch (x86_modrm_rm (ip [1])) {
765 g_assert_not_reached ();
777 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
782 for (i = 0; i < cfg->num_varinfo; i++) {
783 MonoInst *ins = cfg->varinfo [i];
784 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
787 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
790 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
791 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
794 /* we dont allocate I1 to registers because there is no simply way to sign extend
795 * 8bit quantities in caller saved registers on x86 */
796 if (mono_is_regsize_var (ins->inst_vtype) && (ins->inst_vtype->type != MONO_TYPE_I1)) {
797 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
798 g_assert (i == vmv->idx);
799 vars = g_list_prepend (vars, vmv);
803 vars = mono_varlist_sort (cfg, vars, 0);
809 mono_arch_get_global_int_regs (MonoCompile *cfg)
813 /* we can use 3 registers for global allocation */
814 regs = g_list_prepend (regs, (gpointer)X86_EBX);
815 regs = g_list_prepend (regs, (gpointer)X86_ESI);
816 regs = g_list_prepend (regs, (gpointer)X86_EDI);
822 * mono_arch_regalloc_cost:
824 * Return the cost, in number of memory references, of the action of
825 * allocating the variable VMV into a register during global register
829 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
831 MonoInst *ins = cfg->varinfo [vmv->idx];
833 if (cfg->method->save_lmf)
834 /* The register is already saved */
835 return (ins->opcode == OP_ARG) ? 1 : 0;
837 /* push+pop+possible load if it is an argument */
838 return (ins->opcode == OP_ARG) ? 3 : 2;
842 * Set var information according to the calling convention. X86 version.
843 * The locals var stuff should most likely be split in another method.
846 mono_arch_allocate_vars (MonoCompile *cfg)
848 MonoMethodSignature *sig;
849 MonoMethodHeader *header;
851 guint32 locals_stack_size, locals_stack_align;
856 header = mono_method_get_header (cfg->method);
857 sig = mono_method_signature (cfg->method);
859 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
861 cfg->frame_reg = X86_EBP;
864 /* Reserve space to save LMF and caller saved registers */
866 if (cfg->method->save_lmf) {
867 offset += sizeof (MonoLMF);
869 if (cfg->used_int_regs & (1 << X86_EBX)) {
873 if (cfg->used_int_regs & (1 << X86_EDI)) {
877 if (cfg->used_int_regs & (1 << X86_ESI)) {
882 switch (cinfo->ret.storage) {
883 case ArgValuetypeInReg:
884 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
886 cfg->ret->opcode = OP_REGOFFSET;
887 cfg->ret->inst_basereg = X86_EBP;
888 cfg->ret->inst_offset = - offset;
894 /* Allocate locals */
895 offsets = mono_allocate_stack_slots (cfg, &locals_stack_size, &locals_stack_align);
896 if (locals_stack_align) {
897 offset += (locals_stack_align - 1);
898 offset &= ~(locals_stack_align - 1);
901 * EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we
902 * have locals larger than 8 bytes we need to make sure that
903 * they have the appropriate offset.
905 if (MONO_ARCH_FRAME_ALIGNMENT > 8 && locals_stack_align > 8)
906 offset += MONO_ARCH_FRAME_ALIGNMENT - sizeof (gpointer) * 2;
907 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
908 if (offsets [i] != -1) {
909 MonoInst *inst = cfg->varinfo [i];
910 inst->opcode = OP_REGOFFSET;
911 inst->inst_basereg = X86_EBP;
912 inst->inst_offset = - (offset + offsets [i]);
913 //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
916 offset += locals_stack_size;
920 * Allocate arguments+return value
923 switch (cinfo->ret.storage) {
925 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
927 * In the new IR, the cfg->vret_addr variable represents the
928 * vtype return value.
930 cfg->vret_addr->opcode = OP_REGOFFSET;
931 cfg->vret_addr->inst_basereg = cfg->frame_reg;
932 cfg->vret_addr->inst_offset = cinfo->ret.offset + ARGS_OFFSET;
933 if (G_UNLIKELY (cfg->verbose_level > 1)) {
934 printf ("vret_addr =");
935 mono_print_ins (cfg->vret_addr);
938 cfg->ret->opcode = OP_REGOFFSET;
939 cfg->ret->inst_basereg = X86_EBP;
940 cfg->ret->inst_offset = cinfo->ret.offset + ARGS_OFFSET;
943 case ArgValuetypeInReg:
946 cfg->ret->opcode = OP_REGVAR;
947 cfg->ret->inst_c0 = cinfo->ret.reg;
948 cfg->ret->dreg = cinfo->ret.reg;
951 case ArgOnFloatFpStack:
952 case ArgOnDoubleFpStack:
955 g_assert_not_reached ();
958 if (sig->call_convention == MONO_CALL_VARARG) {
959 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
960 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
963 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
964 ArgInfo *ainfo = &cinfo->args [i];
965 inst = cfg->args [i];
966 if (inst->opcode != OP_REGVAR) {
967 inst->opcode = OP_REGOFFSET;
968 inst->inst_basereg = X86_EBP;
970 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
973 offset += (MONO_ARCH_FRAME_ALIGNMENT - 1);
974 offset &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
976 cfg->stack_offset = offset;
980 mono_arch_create_vars (MonoCompile *cfg)
982 MonoMethodSignature *sig;
985 sig = mono_method_signature (cfg->method);
987 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
989 if (cinfo->ret.storage == ArgValuetypeInReg)
990 cfg->ret_var_is_local = TRUE;
991 if ((cinfo->ret.storage != ArgValuetypeInReg) && MONO_TYPE_ISSTRUCT (sig->ret)) {
992 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
997 * It is expensive to adjust esp for each individual fp argument pushed on the stack
998 * so we try to do it just once when we have multiple fp arguments in a row.
999 * We don't use this mechanism generally because for int arguments the generated code
1000 * is slightly bigger and new generation cpus optimize away the dependency chains
1001 * created by push instructions on the esp value.
1002 * fp_arg_setup is the first argument in the execution sequence where the esp register
1005 static G_GNUC_UNUSED int
1006 collect_fp_stack_space (MonoMethodSignature *sig, int start_arg, int *fp_arg_setup)
1011 for (; start_arg < sig->param_count; ++start_arg) {
1012 t = mini_type_get_underlying_type (NULL, sig->params [start_arg]);
1013 if (!t->byref && t->type == MONO_TYPE_R8) {
1014 fp_space += sizeof (double);
1015 *fp_arg_setup = start_arg;
1024 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1026 MonoMethodSignature *tmp_sig;
1028 /* FIXME: Add support for signature tokens to AOT */
1029 cfg->disable_aot = TRUE;
1032 * mono_ArgIterator_Setup assumes the signature cookie is
1033 * passed first and all the arguments which were before it are
1034 * passed on the stack after the signature. So compensate by
1035 * passing a different signature.
1037 tmp_sig = mono_metadata_signature_dup (call->signature);
1038 tmp_sig->param_count -= call->signature->sentinelpos;
1039 tmp_sig->sentinelpos = 0;
1040 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1042 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_X86_PUSH_IMM, -1, -1, tmp_sig);
1046 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1049 MonoMethodSignature *sig;
1052 int sentinelpos = 0;
1054 sig = call->signature;
1055 n = sig->param_count + sig->hasthis;
1057 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
1059 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1060 sentinelpos = sig->sentinelpos + (sig->hasthis ? 1 : 0);
1062 if (cinfo->need_stack_align) {
1063 MONO_INST_NEW (cfg, arg, OP_SUB_IMM);
1064 arg->dreg = X86_ESP;
1065 arg->sreg1 = X86_ESP;
1066 arg->inst_imm = cinfo->stack_align_amount;
1067 MONO_ADD_INS (cfg->cbb, arg);
1070 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1073 if (cinfo->ret.storage == ArgValuetypeInReg) {
1074 if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
1076 * Tell the JIT to use a more efficient calling convention: call using
1077 * OP_CALL, compute the result location after the call, and save the
1080 call->vret_in_reg = TRUE;
1083 * The valuetype is in EAX:EDX after the call, needs to be copied to
1084 * the stack. Save the address here, so the call instruction can
1087 MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
1088 vtarg->sreg1 = call->vret_var->dreg;
1089 MONO_ADD_INS (cfg->cbb, vtarg);
1094 /* Handle the case where there are no implicit arguments */
1095 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) {
1096 emit_sig_cookie (cfg, call, cinfo);
1099 /* Arguments are pushed in the reverse order */
1100 for (i = n - 1; i >= 0; i --) {
1101 ArgInfo *ainfo = cinfo->args + i;
1104 if (i >= sig->hasthis)
1105 t = sig->params [i - sig->hasthis];
1107 t = &mono_defaults.int_class->byval_arg;
1108 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
1110 MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
1112 in = call->args [i];
1113 arg->cil_code = in->cil_code;
1114 arg->sreg1 = in->dreg;
1115 arg->type = in->type;
1117 g_assert (in->dreg != -1);
1119 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) {
1123 g_assert (in->klass);
1125 if (t->type == MONO_TYPE_TYPEDBYREF) {
1126 size = sizeof (MonoTypedRef);
1127 align = sizeof (gpointer);
1130 size = mini_type_stack_size_full (cfg->generic_sharing_context, &in->klass->byval_arg, &align, sig->pinvoke);
1134 arg->opcode = OP_OUTARG_VT;
1135 arg->sreg1 = in->dreg;
1136 arg->klass = in->klass;
1137 arg->backend.size = size;
1139 MONO_ADD_INS (cfg->cbb, arg);
1143 switch (ainfo->storage) {
1145 arg->opcode = OP_X86_PUSH;
1147 if (t->type == MONO_TYPE_R4) {
1148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 4);
1149 arg->opcode = OP_STORER4_MEMBASE_REG;
1150 arg->inst_destbasereg = X86_ESP;
1151 arg->inst_offset = 0;
1152 } else if (t->type == MONO_TYPE_R8) {
1153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
1154 arg->opcode = OP_STORER8_MEMBASE_REG;
1155 arg->inst_destbasereg = X86_ESP;
1156 arg->inst_offset = 0;
1157 } else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8) {
1159 MONO_EMIT_NEW_UNALU (cfg, OP_X86_PUSH, -1, in->dreg + 2);
1164 g_assert_not_reached ();
1167 MONO_ADD_INS (cfg->cbb, arg);
1170 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) {
1171 /* Emit the signature cookie just before the implicit arguments */
1172 emit_sig_cookie (cfg, call, cinfo);
1176 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1179 if (cinfo->ret.storage == ArgValuetypeInReg) {
1182 else if (cinfo->ret.storage == ArgInIReg) {
1184 /* The return address is passed in a register */
1185 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1186 vtarg->sreg1 = call->inst.dreg;
1187 vtarg->dreg = mono_alloc_ireg (cfg);
1188 MONO_ADD_INS (cfg->cbb, vtarg);
1190 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1193 MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
1194 vtarg->type = STACK_MP;
1195 vtarg->sreg1 = call->vret_var->dreg;
1196 MONO_ADD_INS (cfg->cbb, vtarg);
1199 /* if the function returns a struct, the called method already does a ret $0x4 */
1200 cinfo->stack_usage -= 4;
1203 call->stack_usage = cinfo->stack_usage;
1207 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1210 int size = ins->backend.size;
1213 MONO_INST_NEW (cfg, arg, OP_X86_PUSH_MEMBASE);
1214 arg->sreg1 = src->dreg;
1216 MONO_ADD_INS (cfg->cbb, arg);
1217 } else if (size <= 20) {
1218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, ALIGN_TO (size, 4));
1219 mini_emit_memcpy (cfg, X86_ESP, 0, src->dreg, 0, size, 4);
1221 MONO_INST_NEW (cfg, arg, OP_X86_PUSH_OBJ);
1222 arg->inst_basereg = src->dreg;
1223 arg->inst_offset = 0;
1224 arg->inst_imm = size;
1226 MONO_ADD_INS (cfg->cbb, arg);
1231 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1233 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1236 if (ret->type == MONO_TYPE_R4) {
1239 } else if (ret->type == MONO_TYPE_R8) {
1242 } else if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1243 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EAX, val->dreg + 1);
1244 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EDX, val->dreg + 2);
1249 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1253 * Allow tracing to work with this interface (with an optional argument)
1256 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1260 g_assert (MONO_ARCH_FRAME_ALIGNMENT >= 8);
1261 x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 8);
1263 /* if some args are passed in registers, we need to save them here */
1264 x86_push_reg (code, X86_EBP);
1266 if (cfg->compile_aot) {
1267 x86_push_imm (code, cfg->method);
1268 x86_mov_reg_imm (code, X86_EAX, func);
1269 x86_call_reg (code, X86_EAX);
1271 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
1272 x86_push_imm (code, cfg->method);
1273 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
1274 x86_call_code (code, 0);
1276 x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT);
1290 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1293 int arg_size = 0, save_mode = SAVE_NONE;
1294 MonoMethod *method = cfg->method;
1296 switch (mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type) {
1297 case MONO_TYPE_VOID:
1298 /* special case string .ctor icall */
1299 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1300 save_mode = SAVE_EAX;
1302 save_mode = SAVE_NONE;
1306 save_mode = SAVE_EAX_EDX;
1310 save_mode = SAVE_FP;
1312 case MONO_TYPE_GENERICINST:
1313 if (!mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
1314 save_mode = SAVE_EAX;
1318 case MONO_TYPE_VALUETYPE:
1319 save_mode = SAVE_STRUCT;
1322 save_mode = SAVE_EAX;
1326 switch (save_mode) {
1328 x86_push_reg (code, X86_EDX);
1329 x86_push_reg (code, X86_EAX);
1330 if (enable_arguments) {
1331 x86_push_reg (code, X86_EDX);
1332 x86_push_reg (code, X86_EAX);
1337 x86_push_reg (code, X86_EAX);
1338 if (enable_arguments) {
1339 x86_push_reg (code, X86_EAX);
1344 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1345 x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
1346 if (enable_arguments) {
1347 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1348 x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
1353 if (enable_arguments) {
1354 x86_push_membase (code, X86_EBP, 8);
1363 if (cfg->compile_aot) {
1364 x86_push_imm (code, method);
1365 x86_mov_reg_imm (code, X86_EAX, func);
1366 x86_call_reg (code, X86_EAX);
1368 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
1369 x86_push_imm (code, method);
1370 mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
1371 x86_call_code (code, 0);
1373 x86_alu_reg_imm (code, X86_ADD, X86_ESP, arg_size + 4);
1375 switch (save_mode) {
1377 x86_pop_reg (code, X86_EAX);
1378 x86_pop_reg (code, X86_EDX);
1381 x86_pop_reg (code, X86_EAX);
1384 x86_fld_membase (code, X86_ESP, 0, TRUE);
1385 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
1395 #define EMIT_COND_BRANCH(ins,cond,sign) \
1396 if (ins->flags & MONO_INST_BRLABEL) { \
1397 if (ins->inst_i0->inst_c0) { \
1398 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
1400 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1401 if ((cfg->opt & MONO_OPT_BRANCH) && \
1402 x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
1403 x86_branch8 (code, cond, 0, sign); \
1405 x86_branch32 (code, cond, 0, sign); \
1408 if (ins->inst_true_bb->native_offset) { \
1409 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1411 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1412 if ((cfg->opt & MONO_OPT_BRANCH) && \
1413 x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1414 x86_branch8 (code, cond, 0, sign); \
1416 x86_branch32 (code, cond, 0, sign); \
1421 * Emit an exception if condition is fail and
1422 * if possible do a directly branch to target
1424 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
1426 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1427 if (tins == NULL) { \
1428 mono_add_patch_info (cfg, code - cfg->native_code, \
1429 MONO_PATCH_INFO_EXC, exc_name); \
1430 x86_branch32 (code, cond, 0, signed); \
1432 EMIT_COND_BRANCH (tins, cond, signed); \
1436 #define EMIT_FPCOMPARE(code) do { \
1437 x86_fcompp (code); \
1438 x86_fnstsw (code); \
1443 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
1445 mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
1446 x86_call_code (code, 0);
1451 #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_IADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_ISBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB_IMM)))
1454 * mono_peephole_pass_1:
1456 * Perform peephole opts which should/can be performed before local regalloc
1459 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1463 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1464 MonoInst *last_ins = ins->prev;
1466 switch (ins->opcode) {
1469 if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
1471 * X86_LEA is like ADD, but doesn't have the
1472 * sreg1==dreg restriction.
1474 ins->opcode = OP_X86_LEA_MEMBASE;
1475 ins->inst_basereg = ins->sreg1;
1476 } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1477 ins->opcode = OP_X86_INC_REG;
1481 if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
1482 ins->opcode = OP_X86_LEA_MEMBASE;
1483 ins->inst_basereg = ins->sreg1;
1484 ins->inst_imm = -ins->inst_imm;
1485 } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1486 ins->opcode = OP_X86_DEC_REG;
1488 case OP_COMPARE_IMM:
1489 case OP_ICOMPARE_IMM:
1490 /* OP_COMPARE_IMM (reg, 0)
1492 * OP_X86_TEST_NULL (reg)
1495 ins->opcode = OP_X86_TEST_NULL;
1497 case OP_X86_COMPARE_MEMBASE_IMM:
1499 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1500 * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1502 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1503 * OP_COMPARE_IMM reg, imm
1505 * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1507 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1508 ins->inst_basereg == last_ins->inst_destbasereg &&
1509 ins->inst_offset == last_ins->inst_offset) {
1510 ins->opcode = OP_COMPARE_IMM;
1511 ins->sreg1 = last_ins->sreg1;
1513 /* check if we can remove cmp reg,0 with test null */
1515 ins->opcode = OP_X86_TEST_NULL;
1519 case OP_X86_PUSH_MEMBASE:
1520 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG ||
1521 last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1522 ins->inst_basereg == last_ins->inst_destbasereg &&
1523 ins->inst_offset == last_ins->inst_offset) {
1524 ins->opcode = OP_X86_PUSH;
1525 ins->sreg1 = last_ins->sreg1;
1530 mono_peephole_ins (bb, ins);
1535 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1539 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1540 switch (ins->opcode) {
1542 /* reg = 0 -> XOR (reg, reg) */
1543 /* XOR sets cflags on x86, so we cant do it always */
1544 if (ins->inst_c0 == 0 && (!ins->next || (ins->next && INST_IGNORES_CFLAGS (ins->next->opcode)))) {
1547 ins->opcode = OP_IXOR;
1548 ins->sreg1 = ins->dreg;
1549 ins->sreg2 = ins->dreg;
1552 * Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG
1553 * since it takes 3 bytes instead of 7.
1555 for (ins2 = ins->next; ins2; ins2 = ins2->next) {
1556 if ((ins2->opcode == OP_STORE_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
1557 ins2->opcode = OP_STORE_MEMBASE_REG;
1558 ins2->sreg1 = ins->dreg;
1560 else if ((ins2->opcode == OP_STOREI4_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
1561 ins2->opcode = OP_STOREI4_MEMBASE_REG;
1562 ins2->sreg1 = ins->dreg;
1564 else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM)) {
1565 /* Continue iteration */
1574 if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1575 ins->opcode = OP_X86_INC_REG;
1579 if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
1580 ins->opcode = OP_X86_DEC_REG;
1584 mono_peephole_ins (bb, ins);
1589 * mono_arch_lowering_pass:
1591 * Converts complex opcodes into simpler ones so that each IR instruction
1592 * corresponds to one machine instruction.
1595 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1597 MonoInst *ins, *next;
1600 * FIXME: Need to add more instructions, but the current machine
1601 * description can't model some parts of the composite instructions like
1604 MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) {
1605 switch (ins->opcode) {
1608 case OP_IDIV_UN_IMM:
1609 case OP_IREM_UN_IMM:
1611 * Keep the cases where we could generated optimized code, otherwise convert
1612 * to the non-imm variant.
1614 if ((ins->opcode == OP_IREM_IMM) && mono_is_power_of_two (ins->inst_imm) >= 0)
1616 mono_decompose_op_imm (cfg, bb, ins);
1623 bb->max_vreg = cfg->next_vreg;
1627 branch_cc_table [] = {
1628 X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1629 X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1630 X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1633 /* Maps CMP_... constants to X86_CC_... constants */
1636 X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT,
1637 X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT
1641 cc_signed_table [] = {
1642 TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
1643 FALSE, FALSE, FALSE, FALSE
1646 static unsigned char*
1647 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int size, gboolean is_signed)
1649 #define XMM_TEMP_REG 0
1650 /*This SSE2 optimization must not be done which OPT_SIMD in place as it clobbers xmm0.*/
1651 /*The xmm pass decomposes OP_FCONV_ ops anyway anyway.*/
1652 if (cfg->opt & MONO_OPT_SSE2 && size < 8 && !(cfg->opt & MONO_OPT_SIMD)) {
1653 /* optimize by assigning a local var for this use so we avoid
1654 * the stack manipulations */
1655 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1656 x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
1657 x86_movsd_reg_membase (code, XMM_TEMP_REG, X86_ESP, 0);
1658 x86_cvttsd2si (code, dreg, XMM_TEMP_REG);
1659 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
1661 x86_widen_reg (code, dreg, dreg, is_signed, FALSE);
1663 x86_widen_reg (code, dreg, dreg, is_signed, TRUE);
1666 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
1667 x86_fnstcw_membase(code, X86_ESP, 0);
1668 x86_mov_reg_membase (code, dreg, X86_ESP, 0, 2);
1669 x86_alu_reg_imm (code, X86_OR, dreg, 0xc00);
1670 x86_mov_membase_reg (code, X86_ESP, 2, dreg, 2);
1671 x86_fldcw_membase (code, X86_ESP, 2);
1673 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1674 x86_fist_pop_membase (code, X86_ESP, 0, TRUE);
1675 x86_pop_reg (code, dreg);
1676 /* FIXME: need the high register
1677 * x86_pop_reg (code, dreg_high);
1680 x86_push_reg (code, X86_EAX); // SP = SP - 4
1681 x86_fist_pop_membase (code, X86_ESP, 0, FALSE);
1682 x86_pop_reg (code, dreg);
1684 x86_fldcw_membase (code, X86_ESP, 0);
1685 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
1688 x86_widen_reg (code, dreg, dreg, is_signed, FALSE);
1690 x86_widen_reg (code, dreg, dreg, is_signed, TRUE);
1694 static unsigned char*
1695 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
1697 int sreg = tree->sreg1;
1698 int need_touch = FALSE;
1700 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
1709 * If requested stack size is larger than one page,
1710 * perform stack-touch operation
1713 * Generate stack probe code.
1714 * Under Windows, it is necessary to allocate one page at a time,
1715 * "touching" stack after each successful sub-allocation. This is
1716 * because of the way stack growth is implemented - there is a
1717 * guard page before the lowest stack page that is currently commited.
1718 * Stack normally grows sequentially so OS traps access to the
1719 * guard page and commits more pages when needed.
1721 x86_test_reg_imm (code, sreg, ~0xFFF);
1722 br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
1724 br[2] = code; /* loop */
1725 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
1726 x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
1729 * By the end of the loop, sreg2 is smaller than 0x1000, so the init routine
1730 * that follows only initializes the last part of the area.
1732 /* Same as the init code below with size==0x1000 */
1733 if (tree->flags & MONO_INST_INIT) {
1734 x86_push_reg (code, X86_EAX);
1735 x86_push_reg (code, X86_ECX);
1736 x86_push_reg (code, X86_EDI);
1737 x86_mov_reg_imm (code, X86_ECX, (0x1000 >> 2));
1738 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
1739 x86_lea_membase (code, X86_EDI, X86_ESP, 12);
1741 x86_prefix (code, X86_REP_PREFIX);
1743 x86_pop_reg (code, X86_EDI);
1744 x86_pop_reg (code, X86_ECX);
1745 x86_pop_reg (code, X86_EAX);
1748 x86_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
1749 x86_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
1750 br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
1751 x86_patch (br[3], br[2]);
1752 x86_test_reg_reg (code, sreg, sreg);
1753 br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
1754 x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
1756 br[1] = code; x86_jump8 (code, 0);
1758 x86_patch (br[0], code);
1759 x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
1760 x86_patch (br[1], code);
1761 x86_patch (br[4], code);
1764 x86_alu_reg_reg (code, X86_SUB, X86_ESP, tree->sreg1);
1766 if (tree->flags & MONO_INST_INIT) {
1768 if (tree->dreg != X86_EAX && sreg != X86_EAX) {
1769 x86_push_reg (code, X86_EAX);
1772 if (tree->dreg != X86_ECX && sreg != X86_ECX) {
1773 x86_push_reg (code, X86_ECX);
1776 if (tree->dreg != X86_EDI && sreg != X86_EDI) {
1777 x86_push_reg (code, X86_EDI);
1781 x86_shift_reg_imm (code, X86_SHR, sreg, 2);
1782 if (sreg != X86_ECX)
1783 x86_mov_reg_reg (code, X86_ECX, sreg, 4);
1784 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
1786 x86_lea_membase (code, X86_EDI, X86_ESP, offset);
1788 x86_prefix (code, X86_REP_PREFIX);
1791 if (tree->dreg != X86_EDI && sreg != X86_EDI)
1792 x86_pop_reg (code, X86_EDI);
1793 if (tree->dreg != X86_ECX && sreg != X86_ECX)
1794 x86_pop_reg (code, X86_ECX);
1795 if (tree->dreg != X86_EAX && sreg != X86_EAX)
1796 x86_pop_reg (code, X86_EAX);
1803 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
1808 /* Move return value to the target register */
1809 switch (ins->opcode) {
1812 case OP_CALL_MEMBASE:
1813 if (ins->dreg != X86_EAX)
1814 x86_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
1818 case OP_VCALL_MEMBASE:
1821 case OP_VCALL2_MEMBASE:
1822 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
1823 if (cinfo->ret.storage == ArgValuetypeInReg) {
1824 /* Pop the destination address from the stack */
1825 x86_pop_reg (code, X86_ECX);
1827 for (quad = 0; quad < 2; quad ++) {
1828 switch (cinfo->ret.pair_storage [quad]) {
1830 g_assert (cinfo->ret.pair_regs [quad] != X86_ECX);
1831 x86_mov_membase_reg (code, X86_ECX, (quad * sizeof (gpointer)), cinfo->ret.pair_regs [quad], sizeof (gpointer));
1836 g_assert_not_reached ();
1842 MonoCallInst *call = (MonoCallInst*)ins;
1843 if (call->method && !mono_method_signature (call->method)->ret->byref && mono_method_signature (call->method)->ret->type == MONO_TYPE_R4) {
1844 /* Avoid some precision issues by saving/reloading the return value */
1845 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
1846 x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE);
1847 x86_fld_membase (code, X86_ESP, 0, FALSE);
1848 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
1860 * mono_x86_emit_tls_get:
1861 * @code: buffer to store code to
1862 * @dreg: hard register where to place the result
1863 * @tls_offset: offset info
1865 * mono_x86_emit_tls_get emits in @code the native code that puts in
1866 * the dreg register the item in the thread local storage identified
1869 * Returns: a pointer to the end of the stored code
1872 mono_x86_emit_tls_get (guint8* code, int dreg, int tls_offset)
1874 #ifdef PLATFORM_WIN32
1876 * See the Under the Hood article in the May 1996 issue of Microsoft Systems
1877 * Journal and/or a disassembly of the TlsGet () function.
1879 g_assert (tls_offset < 64);
1880 x86_prefix (code, X86_FS_PREFIX);
1881 x86_mov_reg_mem (code, dreg, 0x18, 4);
1882 /* Dunno what this does but TlsGetValue () contains it */
1883 x86_alu_membase_imm (code, X86_AND, dreg, 0x34, 0);
1884 x86_mov_reg_membase (code, dreg, dreg, 3600 + (tls_offset * 4), 4);
1886 if (optimize_for_xen) {
1887 x86_prefix (code, X86_GS_PREFIX);
1888 x86_mov_reg_mem (code, dreg, 0, 4);
1889 x86_mov_reg_membase (code, dreg, dreg, tls_offset, 4);
1891 x86_prefix (code, X86_GS_PREFIX);
1892 x86_mov_reg_mem (code, dreg, tls_offset, 4);
1899 * emit_load_volatile_arguments:
1901 * Load volatile arguments from the stack to the original input registers.
1902 * Required before a tail call.
1905 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
1907 MonoMethod *method = cfg->method;
1908 MonoMethodSignature *sig;
1913 /* FIXME: Generate intermediate code instead */
1915 sig = mono_method_signature (method);
1917 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
1919 /* This is the opposite of the code in emit_prolog */
1921 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1922 ArgInfo *ainfo = cinfo->args + i;
1924 inst = cfg->args [i];
1926 if (sig->hasthis && (i == 0))
1927 arg_type = &mono_defaults.object_class->byval_arg;
1929 arg_type = sig->params [i - sig->hasthis];
1932 * On x86, the arguments are either in their original stack locations, or in
1935 if (inst->opcode == OP_REGVAR) {
1936 g_assert (ainfo->storage == ArgOnStack);
1938 x86_mov_membase_reg (code, X86_EBP, inst->inst_offset, inst->dreg, 4);
1945 #define REAL_PRINT_REG(text,reg) \
1946 mono_assert (reg >= 0); \
1947 x86_push_reg (code, X86_EAX); \
1948 x86_push_reg (code, X86_EDX); \
1949 x86_push_reg (code, X86_ECX); \
1950 x86_push_reg (code, reg); \
1951 x86_push_imm (code, reg); \
1952 x86_push_imm (code, text " %d %p\n"); \
1953 x86_mov_reg_imm (code, X86_EAX, printf); \
1954 x86_call_reg (code, X86_EAX); \
1955 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 3*4); \
1956 x86_pop_reg (code, X86_ECX); \
1957 x86_pop_reg (code, X86_EDX); \
1958 x86_pop_reg (code, X86_EAX);
1960 /* benchmark and set based on cpu */
1961 #define LOOP_ALIGNMENT 8
1962 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
1965 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
1970 guint8 *code = cfg->native_code + cfg->code_len;
1973 if (cfg->opt & MONO_OPT_LOOP) {
1974 int pad, align = LOOP_ALIGNMENT;
1975 /* set alignment depending on cpu */
1976 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
1978 /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
1979 x86_padding (code, pad);
1980 cfg->code_len += pad;
1981 bb->native_offset = cfg->code_len;
1985 if (cfg->verbose_level > 2)
1986 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
1988 cpos = bb->max_offset;
1990 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
1991 MonoProfileCoverageInfo *cov = cfg->coverage_info;
1992 g_assert (!cfg->compile_aot);
1995 cov->data [bb->dfn].cil_code = bb->cil_code;
1996 /* this is not thread save, but good enough */
1997 x86_inc_mem (code, &cov->data [bb->dfn].count);
2000 offset = code - cfg->native_code;
2002 mono_debug_open_block (cfg, bb, offset);
2004 MONO_BB_FOR_EACH_INS (bb, ins) {
2005 offset = code - cfg->native_code;
2007 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2009 if (G_UNLIKELY (offset > (cfg->code_size - max_len - 16))) {
2010 cfg->code_size *= 2;
2011 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2012 code = cfg->native_code + offset;
2013 mono_jit_stats.code_reallocs++;
2016 if (cfg->debug_info)
2017 mono_debug_record_line_number (cfg, ins, offset);
2019 switch (ins->opcode) {
2021 x86_mul_reg (code, ins->sreg2, TRUE);
2024 x86_mul_reg (code, ins->sreg2, FALSE);
2026 case OP_X86_SETEQ_MEMBASE:
2027 case OP_X86_SETNE_MEMBASE:
2028 x86_set_membase (code, ins->opcode == OP_X86_SETEQ_MEMBASE ? X86_CC_EQ : X86_CC_NE,
2029 ins->inst_basereg, ins->inst_offset, TRUE);
2031 case OP_STOREI1_MEMBASE_IMM:
2032 x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
2034 case OP_STOREI2_MEMBASE_IMM:
2035 x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
2037 case OP_STORE_MEMBASE_IMM:
2038 case OP_STOREI4_MEMBASE_IMM:
2039 x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
2041 case OP_STOREI1_MEMBASE_REG:
2042 x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
2044 case OP_STOREI2_MEMBASE_REG:
2045 x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
2047 case OP_STORE_MEMBASE_REG:
2048 case OP_STOREI4_MEMBASE_REG:
2049 x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
2051 case OP_STORE_MEM_IMM:
2052 x86_mov_mem_imm (code, ins->inst_p0, ins->inst_c0, 4);
2055 x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
2059 /* These are created by the cprop pass so they use inst_imm as the source */
2060 x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
2063 x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, FALSE);
2066 x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, TRUE);
2068 case OP_LOAD_MEMBASE:
2069 case OP_LOADI4_MEMBASE:
2070 case OP_LOADU4_MEMBASE:
2071 x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
2073 case OP_LOADU1_MEMBASE:
2074 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
2076 case OP_LOADI1_MEMBASE:
2077 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
2079 case OP_LOADU2_MEMBASE:
2080 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
2082 case OP_LOADI2_MEMBASE:
2083 x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
2085 case OP_ICONV_TO_I1:
2087 x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
2089 case OP_ICONV_TO_I2:
2091 x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
2093 case OP_ICONV_TO_U1:
2094 x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
2096 case OP_ICONV_TO_U2:
2097 x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
2101 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
2103 case OP_COMPARE_IMM:
2104 case OP_ICOMPARE_IMM:
2105 x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
2107 case OP_X86_COMPARE_MEMBASE_REG:
2108 x86_alu_membase_reg (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2110 case OP_X86_COMPARE_MEMBASE_IMM:
2111 x86_alu_membase_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2113 case OP_X86_COMPARE_MEMBASE8_IMM:
2114 x86_alu_membase8_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2116 case OP_X86_COMPARE_REG_MEMBASE:
2117 x86_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
2119 case OP_X86_COMPARE_MEM_IMM:
2120 x86_alu_mem_imm (code, X86_CMP, ins->inst_offset, ins->inst_imm);
2122 case OP_X86_TEST_NULL:
2123 x86_test_reg_reg (code, ins->sreg1, ins->sreg1);
2125 case OP_X86_ADD_MEMBASE_IMM:
2126 x86_alu_membase_imm (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2128 case OP_X86_ADD_REG_MEMBASE:
2129 x86_alu_reg_membase (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset);
2131 case OP_X86_SUB_MEMBASE_IMM:
2132 x86_alu_membase_imm (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2134 case OP_X86_SUB_REG_MEMBASE:
2135 x86_alu_reg_membase (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset);
2137 case OP_X86_AND_MEMBASE_IMM:
2138 x86_alu_membase_imm (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2140 case OP_X86_OR_MEMBASE_IMM:
2141 x86_alu_membase_imm (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2143 case OP_X86_XOR_MEMBASE_IMM:
2144 x86_alu_membase_imm (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2146 case OP_X86_ADD_MEMBASE_REG:
2147 x86_alu_membase_reg (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2149 case OP_X86_SUB_MEMBASE_REG:
2150 x86_alu_membase_reg (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2152 case OP_X86_AND_MEMBASE_REG:
2153 x86_alu_membase_reg (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2155 case OP_X86_OR_MEMBASE_REG:
2156 x86_alu_membase_reg (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2158 case OP_X86_XOR_MEMBASE_REG:
2159 x86_alu_membase_reg (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2161 case OP_X86_INC_MEMBASE:
2162 x86_inc_membase (code, ins->inst_basereg, ins->inst_offset);
2164 case OP_X86_INC_REG:
2165 x86_inc_reg (code, ins->dreg);
2167 case OP_X86_DEC_MEMBASE:
2168 x86_dec_membase (code, ins->inst_basereg, ins->inst_offset);
2170 case OP_X86_DEC_REG:
2171 x86_dec_reg (code, ins->dreg);
2173 case OP_X86_MUL_REG_MEMBASE:
2174 x86_imul_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
2176 case OP_X86_AND_REG_MEMBASE:
2177 x86_alu_reg_membase (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset);
2179 case OP_X86_OR_REG_MEMBASE:
2180 x86_alu_reg_membase (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset);
2182 case OP_X86_XOR_REG_MEMBASE:
2183 x86_alu_reg_membase (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset);
2186 x86_breakpoint (code);
2188 case OP_RELAXED_NOP:
2189 x86_prefix (code, X86_REP_PREFIX);
2197 case OP_DUMMY_STORE:
2198 case OP_NOT_REACHED:
2204 x86_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
2208 x86_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
2213 x86_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
2217 x86_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
2222 x86_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
2226 x86_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
2231 x86_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
2235 x86_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
2238 x86_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
2242 x86_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
2247 * The code is the same for div/rem, the allocator will allocate dreg
2248 * to RAX/RDX as appropriate.
2250 if (ins->sreg2 == X86_EDX) {
2251 /* cdq clobbers this */
2252 x86_push_reg (code, ins->sreg2);
2254 x86_div_membase (code, X86_ESP, 0, TRUE);
2255 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2258 x86_div_reg (code, ins->sreg2, TRUE);
2263 if (ins->sreg2 == X86_EDX) {
2264 x86_push_reg (code, ins->sreg2);
2265 x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX);
2266 x86_div_membase (code, X86_ESP, 0, FALSE);
2267 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2269 x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX);
2270 x86_div_reg (code, ins->sreg2, FALSE);
2274 x86_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
2276 x86_div_reg (code, ins->sreg2, TRUE);
2279 int power = mono_is_power_of_two (ins->inst_imm);
2281 g_assert (ins->sreg1 == X86_EAX);
2282 g_assert (ins->dreg == X86_EAX);
2283 g_assert (power >= 0);
2286 /* Based on http://compilers.iecc.com/comparch/article/93-04-079 */
2288 x86_alu_reg_imm (code, X86_AND, X86_EAX, 1);
2290 * If the divident is >= 0, this does not nothing. If it is positive, it
2291 * it transforms %eax=0 into %eax=0, and %eax=1 into %eax=-1.
2293 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EDX);
2294 x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
2296 /* Based on gcc code */
2298 /* Add compensation for negative dividents */
2300 x86_shift_reg_imm (code, X86_SHR, X86_EDX, 32 - power);
2301 x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_EDX);
2302 /* Compute remainder */
2303 x86_alu_reg_imm (code, X86_AND, X86_EAX, (1 << power) - 1);
2304 /* Remove compensation */
2305 x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
2310 x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
2314 x86_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
2317 x86_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
2321 x86_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
2324 g_assert (ins->sreg2 == X86_ECX);
2325 x86_shift_reg (code, X86_SHL, ins->dreg);
2328 g_assert (ins->sreg2 == X86_ECX);
2329 x86_shift_reg (code, X86_SAR, ins->dreg);
2333 x86_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
2336 case OP_ISHR_UN_IMM:
2337 x86_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
2340 g_assert (ins->sreg2 == X86_ECX);
2341 x86_shift_reg (code, X86_SHR, ins->dreg);
2345 x86_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
2348 guint8 *jump_to_end;
2350 /* handle shifts below 32 bits */
2351 x86_shld_reg (code, ins->backend.reg3, ins->sreg1);
2352 x86_shift_reg (code, X86_SHL, ins->sreg1);
2354 x86_test_reg_imm (code, X86_ECX, 32);
2355 jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
2357 /* handle shift over 32 bit */
2358 x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
2359 x86_clear_reg (code, ins->sreg1);
2361 x86_patch (jump_to_end, code);
2365 guint8 *jump_to_end;
2367 /* handle shifts below 32 bits */
2368 x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
2369 x86_shift_reg (code, X86_SAR, ins->backend.reg3);
2371 x86_test_reg_imm (code, X86_ECX, 32);
2372 jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
2374 /* handle shifts over 31 bits */
2375 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2376 x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 31);
2378 x86_patch (jump_to_end, code);
2382 guint8 *jump_to_end;
2384 /* handle shifts below 32 bits */
2385 x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
2386 x86_shift_reg (code, X86_SHR, ins->backend.reg3);
2388 x86_test_reg_imm (code, X86_ECX, 32);
2389 jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
2391 /* handle shifts over 31 bits */
2392 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2393 x86_clear_reg (code, ins->backend.reg3);
2395 x86_patch (jump_to_end, code);
2399 if (ins->inst_imm >= 32) {
2400 x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
2401 x86_clear_reg (code, ins->sreg1);
2402 x86_shift_reg_imm (code, X86_SHL, ins->backend.reg3, ins->inst_imm - 32);
2404 x86_shld_reg_imm (code, ins->backend.reg3, ins->sreg1, ins->inst_imm);
2405 x86_shift_reg_imm (code, X86_SHL, ins->sreg1, ins->inst_imm);
2409 if (ins->inst_imm >= 32) {
2410 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2411 x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 0x1f);
2412 x86_shift_reg_imm (code, X86_SAR, ins->sreg1, ins->inst_imm - 32);
2414 x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
2415 x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, ins->inst_imm);
2418 case OP_LSHR_UN_IMM:
2419 if (ins->inst_imm >= 32) {
2420 x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
2421 x86_clear_reg (code, ins->backend.reg3);
2422 x86_shift_reg_imm (code, X86_SHR, ins->sreg1, ins->inst_imm - 32);
2424 x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
2425 x86_shift_reg_imm (code, X86_SHR, ins->backend.reg3, ins->inst_imm);
2429 x86_not_reg (code, ins->sreg1);
2432 x86_neg_reg (code, ins->sreg1);
2436 x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
2440 switch (ins->inst_imm) {
2444 if (ins->dreg != ins->sreg1)
2445 x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
2446 x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2449 /* LEA r1, [r2 + r2*2] */
2450 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2453 /* LEA r1, [r2 + r2*4] */
2454 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2457 /* LEA r1, [r2 + r2*2] */
2459 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2460 x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2463 /* LEA r1, [r2 + r2*8] */
2464 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3);
2467 /* LEA r1, [r2 + r2*4] */
2469 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2470 x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2473 /* LEA r1, [r2 + r2*2] */
2475 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2476 x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
2479 /* LEA r1, [r2 + r2*4] */
2480 /* LEA r1, [r1 + r1*4] */
2481 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2482 x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
2485 /* LEA r1, [r2 + r2*4] */
2487 /* LEA r1, [r1 + r1*4] */
2488 x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2489 x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
2490 x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
2493 x86_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2498 x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
2499 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2501 case OP_IMUL_OVF_UN: {
2502 /* the mul operation and the exception check should most likely be split */
2503 int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
2504 /*g_assert (ins->sreg2 == X86_EAX);
2505 g_assert (ins->dreg == X86_EAX);*/
2506 if (ins->sreg2 == X86_EAX) {
2507 non_eax_reg = ins->sreg1;
2508 } else if (ins->sreg1 == X86_EAX) {
2509 non_eax_reg = ins->sreg2;
2511 /* no need to save since we're going to store to it anyway */
2512 if (ins->dreg != X86_EAX) {
2514 x86_push_reg (code, X86_EAX);
2516 x86_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
2517 non_eax_reg = ins->sreg2;
2519 if (ins->dreg == X86_EDX) {
2522 x86_push_reg (code, X86_EAX);
2524 } else if (ins->dreg != X86_EAX) {
2526 x86_push_reg (code, X86_EDX);
2528 x86_mul_reg (code, non_eax_reg, FALSE);
2529 /* save before the check since pop and mov don't change the flags */
2530 if (ins->dreg != X86_EAX)
2531 x86_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
2533 x86_pop_reg (code, X86_EDX);
2535 x86_pop_reg (code, X86_EAX);
2536 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2540 x86_mov_reg_imm (code, ins->dreg, ins->inst_c0);
2543 g_assert_not_reached ();
2544 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2545 x86_mov_reg_imm (code, ins->dreg, 0);
2548 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2549 x86_mov_reg_imm (code, ins->dreg, 0);
2551 case OP_LOAD_GOTADDR:
2552 x86_call_imm (code, 0);
2554 * The patch needs to point to the pop, since the GOT offset needs
2555 * to be added to that address.
2557 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL);
2558 x86_pop_reg (code, ins->dreg);
2559 x86_alu_reg_imm (code, X86_ADD, ins->dreg, 0xf0f0f0f0);
2562 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
2563 x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, 0xf0f0f0f0, 4);
2565 case OP_X86_PUSH_GOT_ENTRY:
2566 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
2567 x86_push_membase (code, ins->inst_basereg, 0xf0f0f0f0);
2570 x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
2574 * Note: this 'frame destruction' logic is useful for tail calls, too.
2575 * Keep in sync with the code in emit_epilog.
2579 /* FIXME: no tracing support... */
2580 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2581 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
2582 /* reset offset to make max_len work */
2583 offset = code - cfg->native_code;
2585 g_assert (!cfg->method->save_lmf);
2587 code = emit_load_volatile_arguments (cfg, code);
2589 if (cfg->used_int_regs & (1 << X86_EBX))
2591 if (cfg->used_int_regs & (1 << X86_EDI))
2593 if (cfg->used_int_regs & (1 << X86_ESI))
2596 x86_lea_membase (code, X86_ESP, X86_EBP, pos);
2598 if (cfg->used_int_regs & (1 << X86_ESI))
2599 x86_pop_reg (code, X86_ESI);
2600 if (cfg->used_int_regs & (1 << X86_EDI))
2601 x86_pop_reg (code, X86_EDI);
2602 if (cfg->used_int_regs & (1 << X86_EBX))
2603 x86_pop_reg (code, X86_EBX);
2605 /* restore ESP/EBP */
2607 offset = code - cfg->native_code;
2608 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2609 x86_jump32 (code, 0);
2611 cfg->disable_aot = TRUE;
2615 /* ensure ins->sreg1 is not NULL
2616 * note that cmp DWORD PTR [eax], eax is one byte shorter than
2617 * cmp DWORD PTR [eax], 0
2619 x86_alu_membase_reg (code, X86_CMP, ins->sreg1, 0, ins->sreg1);
2622 int hreg = ins->sreg1 == X86_EAX? X86_ECX: X86_EAX;
2623 x86_push_reg (code, hreg);
2624 x86_lea_membase (code, hreg, X86_EBP, cfg->sig_cookie);
2625 x86_mov_membase_reg (code, ins->sreg1, 0, hreg, 4);
2626 x86_pop_reg (code, hreg);
2635 call = (MonoCallInst*)ins;
2636 if (ins->flags & MONO_INST_HAS_METHOD)
2637 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2639 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2640 if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
2641 /* a pop is one byte, while an add reg, imm is 3. So if there are 4 or 8
2642 * bytes to pop, we want to use pops. GCC does this (note it won't happen
2643 * for P4 or i686 because gcc will avoid using pop push at all. But we aren't
2644 * smart enough to do that optimization yet
2646 * It turns out that on my P4, doing two pops for 8 bytes on the stack makes
2647 * mcs botstrap slow down. However, doing 1 pop for 4 bytes creates a small,
2648 * (most likely from locality benefits). People with other processors should
2649 * check on theirs to see what happens.
2651 if (call->stack_usage == 4) {
2652 /* we want to use registers that won't get used soon, so use
2653 * ecx, as eax will get allocated first. edx is used by long calls,
2654 * so we can't use that.
2657 x86_pop_reg (code, X86_ECX);
2659 x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
2662 code = emit_move_return_value (cfg, ins, code);
2668 case OP_VOIDCALL_REG:
2670 call = (MonoCallInst*)ins;
2671 x86_call_reg (code, ins->sreg1);
2672 if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
2673 if (call->stack_usage == 4)
2674 x86_pop_reg (code, X86_ECX);
2676 x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
2678 code = emit_move_return_value (cfg, ins, code);
2680 case OP_FCALL_MEMBASE:
2681 case OP_LCALL_MEMBASE:
2682 case OP_VCALL_MEMBASE:
2683 case OP_VCALL2_MEMBASE:
2684 case OP_VOIDCALL_MEMBASE:
2685 case OP_CALL_MEMBASE:
2686 call = (MonoCallInst*)ins;
2687 x86_call_membase (code, ins->sreg1, ins->inst_offset);
2688 if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
2689 if (call->stack_usage == 4)
2690 x86_pop_reg (code, X86_ECX);
2692 x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
2694 code = emit_move_return_value (cfg, ins, code);
2697 x86_push_reg (code, ins->sreg1);
2699 case OP_X86_PUSH_IMM:
2700 x86_push_imm (code, ins->inst_imm);
2702 case OP_X86_PUSH_MEMBASE:
2703 x86_push_membase (code, ins->inst_basereg, ins->inst_offset);
2705 case OP_X86_PUSH_OBJ:
2706 x86_alu_reg_imm (code, X86_SUB, X86_ESP, ins->inst_imm);
2707 x86_push_reg (code, X86_EDI);
2708 x86_push_reg (code, X86_ESI);
2709 x86_push_reg (code, X86_ECX);
2710 if (ins->inst_offset)
2711 x86_lea_membase (code, X86_ESI, ins->inst_basereg, ins->inst_offset);
2713 x86_mov_reg_reg (code, X86_ESI, ins->inst_basereg, 4);
2714 x86_lea_membase (code, X86_EDI, X86_ESP, 12);
2715 x86_mov_reg_imm (code, X86_ECX, (ins->inst_imm >> 2));
2717 x86_prefix (code, X86_REP_PREFIX);
2719 x86_pop_reg (code, X86_ECX);
2720 x86_pop_reg (code, X86_ESI);
2721 x86_pop_reg (code, X86_EDI);
2724 x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
2726 case OP_X86_LEA_MEMBASE:
2727 x86_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
2730 x86_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
2733 /* keep alignment */
2734 x86_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1);
2735 x86_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
2736 code = mono_emit_stack_alloc (code, ins);
2737 x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
2739 case OP_LOCALLOC_IMM: {
2740 guint32 size = ins->inst_imm;
2741 size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
2743 if (ins->flags & MONO_INST_INIT) {
2744 /* FIXME: Optimize this */
2745 x86_mov_reg_imm (code, ins->dreg, size);
2746 ins->sreg1 = ins->dreg;
2748 code = mono_emit_stack_alloc (code, ins);
2749 x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
2751 x86_alu_reg_imm (code, X86_SUB, X86_ESP, size);
2752 x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
2757 x86_push_reg (code, ins->sreg1);
2758 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
2759 (gpointer)"mono_arch_throw_exception");
2763 x86_push_reg (code, ins->sreg1);
2764 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
2765 (gpointer)"mono_arch_rethrow_exception");
2768 case OP_CALL_HANDLER:
2769 x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
2770 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2771 x86_call_imm (code, 0);
2772 x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
2774 case OP_START_HANDLER: {
2775 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2776 x86_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, X86_ESP, 4);
2779 case OP_ENDFINALLY: {
2780 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2781 x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4);
2785 case OP_ENDFILTER: {
2786 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2787 x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4);
2788 /* The local allocator will put the result into EAX */
2794 ins->inst_c0 = code - cfg->native_code;
2797 if (ins->flags & MONO_INST_BRLABEL) {
2798 if (ins->inst_i0->inst_c0) {
2799 x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
2801 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
2802 if ((cfg->opt & MONO_OPT_BRANCH) &&
2803 x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
2804 x86_jump8 (code, 0);
2806 x86_jump32 (code, 0);
2809 if (ins->inst_target_bb->native_offset) {
2810 x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
2812 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2813 if ((cfg->opt & MONO_OPT_BRANCH) &&
2814 x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
2815 x86_jump8 (code, 0);
2817 x86_jump32 (code, 0);
2822 x86_jump_reg (code, ins->sreg1);
2835 x86_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
2836 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
2838 case OP_COND_EXC_EQ:
2839 case OP_COND_EXC_NE_UN:
2840 case OP_COND_EXC_LT:
2841 case OP_COND_EXC_LT_UN:
2842 case OP_COND_EXC_GT:
2843 case OP_COND_EXC_GT_UN:
2844 case OP_COND_EXC_GE:
2845 case OP_COND_EXC_GE_UN:
2846 case OP_COND_EXC_LE:
2847 case OP_COND_EXC_LE_UN:
2848 case OP_COND_EXC_IEQ:
2849 case OP_COND_EXC_INE_UN:
2850 case OP_COND_EXC_ILT:
2851 case OP_COND_EXC_ILT_UN:
2852 case OP_COND_EXC_IGT:
2853 case OP_COND_EXC_IGT_UN:
2854 case OP_COND_EXC_IGE:
2855 case OP_COND_EXC_IGE_UN:
2856 case OP_COND_EXC_ILE:
2857 case OP_COND_EXC_ILE_UN:
2858 EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->inst_p1);
2860 case OP_COND_EXC_OV:
2861 case OP_COND_EXC_NO:
2863 case OP_COND_EXC_NC:
2864 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
2866 case OP_COND_EXC_IOV:
2867 case OP_COND_EXC_INO:
2868 case OP_COND_EXC_IC:
2869 case OP_COND_EXC_INC:
2870 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_IEQ], (ins->opcode < OP_COND_EXC_INE_UN), ins->inst_p1);
2882 EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
2890 case OP_CMOV_INE_UN:
2891 case OP_CMOV_IGE_UN:
2892 case OP_CMOV_IGT_UN:
2893 case OP_CMOV_ILE_UN:
2894 case OP_CMOV_ILT_UN:
2895 g_assert (ins->dreg == ins->sreg1);
2896 x86_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2);
2899 /* floating point opcodes */
2901 double d = *(double *)ins->inst_p0;
2903 if ((d == 0.0) && (mono_signbit (d) == 0)) {
2905 } else if (d == 1.0) {
2908 if (cfg->compile_aot) {
2909 guint32 *val = (guint32*)&d;
2910 x86_push_imm (code, val [1]);
2911 x86_push_imm (code, val [0]);
2912 x86_fld_membase (code, X86_ESP, 0, TRUE);
2913 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
2916 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R8, ins->inst_p0);
2917 x86_fld (code, NULL, TRUE);
2923 float f = *(float *)ins->inst_p0;
2925 if ((f == 0.0) && (mono_signbit (f) == 0)) {
2927 } else if (f == 1.0) {
2930 if (cfg->compile_aot) {
2931 guint32 val = *(guint32*)&f;
2932 x86_push_imm (code, val);
2933 x86_fld_membase (code, X86_ESP, 0, FALSE);
2934 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2937 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R4, ins->inst_p0);
2938 x86_fld (code, NULL, FALSE);
2943 case OP_STORER8_MEMBASE_REG:
2944 x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
2946 case OP_LOADR8_SPILL_MEMBASE:
2947 x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
2950 case OP_LOADR8_MEMBASE:
2951 x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
2953 case OP_STORER4_MEMBASE_REG:
2954 x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
2956 case OP_LOADR4_MEMBASE:
2957 x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
2959 case OP_ICONV_TO_R4: /* FIXME: change precision */
2960 case OP_ICONV_TO_R8:
2961 x86_push_reg (code, ins->sreg1);
2962 x86_fild_membase (code, X86_ESP, 0, FALSE);
2963 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
2965 case OP_ICONV_TO_R_UN:
2966 x86_push_imm (code, 0);
2967 x86_push_reg (code, ins->sreg1);
2968 x86_fild_membase (code, X86_ESP, 0, TRUE);
2969 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
2971 case OP_X86_FP_LOAD_I8:
2972 x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
2974 case OP_X86_FP_LOAD_I4:
2975 x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
2977 case OP_FCONV_TO_R4:
2978 /* FIXME: nothing to do ?? */
2980 case OP_FCONV_TO_I1:
2981 code = emit_float_to_int (cfg, code, ins->dreg, 1, TRUE);
2983 case OP_FCONV_TO_U1:
2984 code = emit_float_to_int (cfg, code, ins->dreg, 1, FALSE);
2986 case OP_FCONV_TO_I2:
2987 code = emit_float_to_int (cfg, code, ins->dreg, 2, TRUE);
2989 case OP_FCONV_TO_U2:
2990 code = emit_float_to_int (cfg, code, ins->dreg, 2, FALSE);
2992 case OP_FCONV_TO_I4:
2994 code = emit_float_to_int (cfg, code, ins->dreg, 4, TRUE);
2996 case OP_FCONV_TO_I8:
2997 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
2998 x86_fnstcw_membase(code, X86_ESP, 0);
2999 x86_mov_reg_membase (code, ins->dreg, X86_ESP, 0, 2);
3000 x86_alu_reg_imm (code, X86_OR, ins->dreg, 0xc00);
3001 x86_mov_membase_reg (code, X86_ESP, 2, ins->dreg, 2);
3002 x86_fldcw_membase (code, X86_ESP, 2);
3003 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
3004 x86_fist_pop_membase (code, X86_ESP, 0, TRUE);
3005 x86_pop_reg (code, ins->dreg);
3006 x86_pop_reg (code, ins->backend.reg3);
3007 x86_fldcw_membase (code, X86_ESP, 0);
3008 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
3010 case OP_LCONV_TO_R8_2:
3011 x86_push_reg (code, ins->sreg2);
3012 x86_push_reg (code, ins->sreg1);
3013 x86_fild_membase (code, X86_ESP, 0, TRUE);
3014 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
3016 case OP_LCONV_TO_R4_2:
3017 x86_push_reg (code, ins->sreg2);
3018 x86_push_reg (code, ins->sreg1);
3019 x86_fild_membase (code, X86_ESP, 0, TRUE);
3020 /* Change precision */
3021 x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE);
3022 x86_fld_membase (code, X86_ESP, 0, FALSE);
3023 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
3025 case OP_LCONV_TO_R_UN:
3026 case OP_LCONV_TO_R_UN_2: {
3027 static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
3030 /* load 64bit integer to FP stack */
3031 x86_push_imm (code, 0);
3032 x86_push_reg (code, ins->sreg2);
3033 x86_push_reg (code, ins->sreg1);
3034 x86_fild_membase (code, X86_ESP, 0, TRUE);
3035 /* store as 80bit FP value */
3036 x86_fst80_membase (code, X86_ESP, 0);
3038 /* test if lreg is negative */
3039 x86_test_reg_reg (code, ins->sreg2, ins->sreg2);
3040 br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
3042 /* add correction constant mn */
3043 x86_fld80_mem (code, mn);
3044 x86_fld80_membase (code, X86_ESP, 0);
3045 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3046 x86_fst80_membase (code, X86_ESP, 0);
3048 x86_patch (br, code);
3050 x86_fld80_membase (code, X86_ESP, 0);
3051 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 12);
3055 case OP_LCONV_TO_OVF_I:
3056 case OP_LCONV_TO_OVF_I4_2: {
3057 guint8 *br [3], *label [1];
3061 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3063 x86_test_reg_reg (code, ins->sreg1, ins->sreg1);
3065 /* If the low word top bit is set, see if we are negative */
3066 br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
3067 /* We are not negative (no top bit set, check for our top word to be zero */
3068 x86_test_reg_reg (code, ins->sreg2, ins->sreg2);
3069 br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
3072 /* throw exception */
3073 tins = mono_branch_optimize_exception_target (cfg, bb, "OverflowException");
3075 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, tins->inst_true_bb);
3076 if ((cfg->opt & MONO_OPT_BRANCH) && x86_is_imm8 (tins->inst_true_bb->max_offset - cpos))
3077 x86_jump8 (code, 0);
3079 x86_jump32 (code, 0);
3081 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
3082 x86_jump32 (code, 0);
3086 x86_patch (br [0], code);
3087 /* our top bit is set, check that top word is 0xfffffff */
3088 x86_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
3090 x86_patch (br [1], code);
3091 /* nope, emit exception */
3092 br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
3093 x86_patch (br [2], label [0]);
3095 if (ins->dreg != ins->sreg1)
3096 x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
3100 /* Not needed on the fp stack */
3103 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3106 x86_fp_op_reg (code, X86_FSUB, 1, TRUE);
3109 x86_fp_op_reg (code, X86_FMUL, 1, TRUE);
3112 x86_fp_op_reg (code, X86_FDIV, 1, TRUE);
3120 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3125 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3132 * it really doesn't make sense to inline all this code,
3133 * it's here just to show that things may not be as simple
3136 guchar *check_pos, *end_tan, *pop_jump;
3137 x86_push_reg (code, X86_EAX);
3140 x86_test_reg_imm (code, X86_EAX, X86_FP_C2);
3142 x86_branch8 (code, X86_CC_NE, 0, FALSE);
3143 x86_fstp (code, 0); /* pop the 1.0 */
3145 x86_jump8 (code, 0);
3147 x86_fp_op (code, X86_FADD, 0);
3151 x86_test_reg_imm (code, X86_EAX, X86_FP_C2);
3153 x86_branch8 (code, X86_CC_NE, 0, FALSE);
3156 x86_patch (pop_jump, code);
3157 x86_fstp (code, 0); /* pop the 1.0 */
3158 x86_patch (check_pos, code);
3159 x86_patch (end_tan, code);
3161 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3162 x86_pop_reg (code, X86_EAX);
3169 x86_fp_op_reg (code, X86_FADD, 1, TRUE);
3178 g_assert (cfg->opt & MONO_OPT_CMOV);
3179 g_assert (ins->dreg == ins->sreg1);
3180 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3181 x86_cmov_reg (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2);
3184 g_assert (cfg->opt & MONO_OPT_CMOV);
3185 g_assert (ins->dreg == ins->sreg1);
3186 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3187 x86_cmov_reg (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2);
3190 g_assert (cfg->opt & MONO_OPT_CMOV);
3191 g_assert (ins->dreg == ins->sreg1);
3192 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3193 x86_cmov_reg (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2);
3196 g_assert (cfg->opt & MONO_OPT_CMOV);
3197 g_assert (ins->dreg == ins->sreg1);
3198 x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3199 x86_cmov_reg (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2);
3205 x86_fxch (code, ins->inst_imm);
3210 x86_push_reg (code, X86_EAX);
3211 /* we need to exchange ST(0) with ST(1) */
3214 /* this requires a loop, because fprem somtimes
3215 * returns a partial remainder */
3217 /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
3218 /* x86_fprem1 (code); */
3221 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_C2);
3223 x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
3228 x86_pop_reg (code, X86_EAX);
3232 if (cfg->opt & MONO_OPT_FCMOV) {
3233 x86_fcomip (code, 1);
3237 /* this overwrites EAX */
3238 EMIT_FPCOMPARE(code);
3239 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3242 if (cfg->opt & MONO_OPT_FCMOV) {
3243 /* zeroing the register at the start results in
3244 * shorter and faster code (we can also remove the widening op)
3246 guchar *unordered_check;
3247 x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3248 x86_fcomip (code, 1);
3250 unordered_check = code;
3251 x86_branch8 (code, X86_CC_P, 0, FALSE);
3252 x86_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
3253 x86_patch (unordered_check, code);
3256 if (ins->dreg != X86_EAX)
3257 x86_push_reg (code, X86_EAX);
3259 EMIT_FPCOMPARE(code);
3260 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3261 x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000);
3262 x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3263 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3265 if (ins->dreg != X86_EAX)
3266 x86_pop_reg (code, X86_EAX);
3270 if (cfg->opt & MONO_OPT_FCMOV) {
3271 /* zeroing the register at the start results in
3272 * shorter and faster code (we can also remove the widening op)
3274 x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3275 x86_fcomip (code, 1);
3277 if (ins->opcode == OP_FCLT_UN) {
3278 guchar *unordered_check = code;
3279 guchar *jump_to_end;
3280 x86_branch8 (code, X86_CC_P, 0, FALSE);
3281 x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3283 x86_jump8 (code, 0);
3284 x86_patch (unordered_check, code);
3285 x86_inc_reg (code, ins->dreg);
3286 x86_patch (jump_to_end, code);
3288 x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3292 if (ins->dreg != X86_EAX)
3293 x86_push_reg (code, X86_EAX);
3295 EMIT_FPCOMPARE(code);
3296 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3297 if (ins->opcode == OP_FCLT_UN) {
3298 guchar *is_not_zero_check, *end_jump;
3299 is_not_zero_check = code;
3300 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3302 x86_jump8 (code, 0);
3303 x86_patch (is_not_zero_check, code);
3304 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3306 x86_patch (end_jump, code);
3308 x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3309 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3311 if (ins->dreg != X86_EAX)
3312 x86_pop_reg (code, X86_EAX);
3316 if (cfg->opt & MONO_OPT_FCMOV) {
3317 /* zeroing the register at the start results in
3318 * shorter and faster code (we can also remove the widening op)
3320 guchar *unordered_check;
3321 x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3322 x86_fcomip (code, 1);
3324 if (ins->opcode == OP_FCGT) {
3325 unordered_check = code;
3326 x86_branch8 (code, X86_CC_P, 0, FALSE);
3327 x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3328 x86_patch (unordered_check, code);
3330 x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3334 if (ins->dreg != X86_EAX)
3335 x86_push_reg (code, X86_EAX);
3337 EMIT_FPCOMPARE(code);
3338 x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
3339 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3340 if (ins->opcode == OP_FCGT_UN) {
3341 guchar *is_not_zero_check, *end_jump;
3342 is_not_zero_check = code;
3343 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3345 x86_jump8 (code, 0);
3346 x86_patch (is_not_zero_check, code);
3347 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3349 x86_patch (end_jump, code);
3351 x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3352 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3354 if (ins->dreg != X86_EAX)
3355 x86_pop_reg (code, X86_EAX);
3358 if (cfg->opt & MONO_OPT_FCMOV) {
3359 guchar *jump = code;
3360 x86_branch8 (code, X86_CC_P, 0, TRUE);
3361 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3362 x86_patch (jump, code);
3365 x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000);
3366 EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
3369 /* Branch if C013 != 100 */
3370 if (cfg->opt & MONO_OPT_FCMOV) {
3371 /* branch if !ZF or (PF|CF) */
3372 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3373 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3374 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
3377 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3);
3378 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3381 if (cfg->opt & MONO_OPT_FCMOV) {
3382 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3385 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3388 if (cfg->opt & MONO_OPT_FCMOV) {
3389 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3390 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3393 if (ins->opcode == OP_FBLT_UN) {
3394 guchar *is_not_zero_check, *end_jump;
3395 is_not_zero_check = code;
3396 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3398 x86_jump8 (code, 0);
3399 x86_patch (is_not_zero_check, code);
3400 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3402 x86_patch (end_jump, code);
3404 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3408 if (cfg->opt & MONO_OPT_FCMOV) {
3409 if (ins->opcode == OP_FBGT) {
3412 /* skip branch if C1=1 */
3414 x86_branch8 (code, X86_CC_P, 0, FALSE);
3415 /* branch if (C0 | C3) = 1 */
3416 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
3417 x86_patch (br1, code);
3419 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
3423 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3424 if (ins->opcode == OP_FBGT_UN) {
3425 guchar *is_not_zero_check, *end_jump;
3426 is_not_zero_check = code;
3427 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3429 x86_jump8 (code, 0);
3430 x86_patch (is_not_zero_check, code);
3431 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
3433 x86_patch (end_jump, code);
3435 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3438 /* Branch if C013 == 100 or 001 */
3439 if (cfg->opt & MONO_OPT_FCMOV) {
3442 /* skip branch if C1=1 */
3444 x86_branch8 (code, X86_CC_P, 0, FALSE);
3445 /* branch if (C0 | C3) = 1 */
3446 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
3447 x86_patch (br1, code);
3450 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3451 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3452 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3);
3453 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3456 /* Branch if C013 == 000 */
3457 if (cfg->opt & MONO_OPT_FCMOV) {
3458 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
3461 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3464 /* Branch if C013=000 or 100 */
3465 if (cfg->opt & MONO_OPT_FCMOV) {
3468 /* skip branch if C1=1 */
3470 x86_branch8 (code, X86_CC_P, 0, FALSE);
3471 /* branch if C0=0 */
3472 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
3473 x86_patch (br1, code);
3476 x86_alu_reg_imm (code, X86_AND, X86_EAX, (X86_FP_C0|X86_FP_C1));
3477 x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0);
3478 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3481 /* Branch if C013 != 001 */
3482 if (cfg->opt & MONO_OPT_FCMOV) {
3483 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3484 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
3487 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3488 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3492 x86_push_reg (code, X86_EAX);
3495 x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4100);
3496 x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
3497 x86_pop_reg (code, X86_EAX);
3499 /* Have to clean up the fp stack before throwing the exception */
3501 x86_branch8 (code, X86_CC_NE, 0, FALSE);
3504 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
3506 x86_patch (br1, code);
3510 code = mono_x86_emit_tls_get (code, ins->dreg, ins->inst_offset);
3513 case OP_MEMORY_BARRIER: {
3514 /* Not needed on x86 */
3517 case OP_ATOMIC_ADD_I4: {
3518 int dreg = ins->dreg;
3520 if (dreg == ins->inst_basereg) {
3521 x86_push_reg (code, ins->sreg2);
3525 if (dreg != ins->sreg2)
3526 x86_mov_reg_reg (code, ins->dreg, ins->sreg2, 4);
3528 x86_prefix (code, X86_LOCK_PREFIX);
3529 x86_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, 4);
3531 if (dreg != ins->dreg) {
3532 x86_mov_reg_reg (code, ins->dreg, dreg, 4);
3533 x86_pop_reg (code, dreg);
3538 case OP_ATOMIC_ADD_NEW_I4: {
3539 int dreg = ins->dreg;
3541 /* hack: limit in regalloc, dreg != sreg1 && dreg != sreg2 */
3542 if (ins->sreg2 == dreg) {
3543 if (dreg == X86_EBX) {
3545 if (ins->inst_basereg == X86_EDI)
3549 if (ins->inst_basereg == X86_EBX)
3552 } else if (ins->inst_basereg == dreg) {
3553 if (dreg == X86_EBX) {
3555 if (ins->sreg2 == X86_EDI)
3559 if (ins->sreg2 == X86_EBX)
3564 if (dreg != ins->dreg) {
3565 x86_push_reg (code, dreg);
3568 x86_mov_reg_reg (code, dreg, ins->sreg2, 4);
3569 x86_prefix (code, X86_LOCK_PREFIX);
3570 x86_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, 4);
3571 /* dreg contains the old value, add with sreg2 value */
3572 x86_alu_reg_reg (code, X86_ADD, dreg, ins->sreg2);
3574 if (ins->dreg != dreg) {
3575 x86_mov_reg_reg (code, ins->dreg, dreg, 4);
3576 x86_pop_reg (code, dreg);
3581 case OP_ATOMIC_EXCHANGE_I4:
3582 case OP_ATOMIC_CAS_IMM_I4: {
3584 int sreg2 = ins->sreg2;
3585 int breg = ins->inst_basereg;
3587 /* cmpxchg uses eax as comperand, need to make sure we can use it
3588 * hack to overcome limits in x86 reg allocator
3589 * (req: dreg == eax and sreg2 != eax and breg != eax)
3591 g_assert (ins->dreg == X86_EAX);
3593 /* We need the EAX reg for the cmpxchg */
3594 if (ins->sreg2 == X86_EAX) {
3595 x86_push_reg (code, X86_EDX);
3596 x86_mov_reg_reg (code, X86_EDX, X86_EAX, 4);
3600 if (breg == X86_EAX) {
3601 x86_push_reg (code, X86_ESI);
3602 x86_mov_reg_reg (code, X86_ESI, X86_EAX, 4);
3606 if (ins->opcode == OP_ATOMIC_CAS_IMM_I4) {
3607 x86_mov_reg_imm (code, X86_EAX, ins->backend.data);
3609 x86_prefix (code, X86_LOCK_PREFIX);
3610 x86_cmpxchg_membase_reg (code, breg, ins->inst_offset, sreg2);
3612 x86_mov_reg_membase (code, X86_EAX, breg, ins->inst_offset, 4);
3614 br [0] = code; x86_prefix (code, X86_LOCK_PREFIX);
3615 x86_cmpxchg_membase_reg (code, breg, ins->inst_offset, sreg2);
3616 br [1] = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
3617 x86_patch (br [1], br [0]);
3620 if (breg != ins->inst_basereg)
3621 x86_pop_reg (code, X86_ESI);
3623 if (ins->sreg2 != sreg2)
3624 x86_pop_reg (code, X86_EDX);
3628 #ifdef MONO_ARCH_SIMD_INTRINSICS
3630 x86_sse_alu_ps_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2);
3633 x86_sse_alu_ps_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2);
3636 x86_sse_alu_ps_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2);
3639 x86_sse_alu_ps_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2);
3642 x86_sse_alu_ps_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2);
3645 x86_sse_alu_ps_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2);
3648 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
3649 x86_sse_alu_ps_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0);
3652 x86_sse_alu_ps_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2);
3655 x86_sse_alu_ps_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2);
3658 x86_sse_alu_ps_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2);
3661 x86_sse_alu_ps_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2);
3664 x86_sse_alu_ps_reg_reg (code, X86_SSE_SQRT, ins->dreg, ins->sreg1);
3667 x86_sse_alu_ps_reg_reg (code, X86_SSE_RSQRT, ins->dreg, ins->sreg1);
3670 x86_sse_alu_ps_reg_reg (code, X86_SSE_RCP, ins->dreg, ins->sreg1);
3673 x86_sse_alu_sd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2);
3676 x86_sse_alu_sd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2);
3679 x86_sse_alu_sd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2);
3682 x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSHDUP, ins->dreg, ins->sreg1);
3685 x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSLDUP, ins->dreg, ins->sreg1);
3688 case OP_PSHUFLEW_HIGH:
3689 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
3690 x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 1);
3692 case OP_PSHUFLEW_LOW:
3693 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
3694 x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 0);
3697 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
3698 x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->sreg1, ins->inst_c0);
3702 x86_sse_alu_pd_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2);
3705 x86_sse_alu_pd_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2);
3708 x86_sse_alu_pd_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2);
3711 x86_sse_alu_pd_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2);
3714 x86_sse_alu_pd_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2);
3717 x86_sse_alu_pd_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2);
3720 g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
3721 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0);
3724 x86_sse_alu_pd_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2);
3727 x86_sse_alu_pd_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2);
3730 x86_sse_alu_pd_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2);
3733 x86_sse_alu_pd_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2);
3736 x86_sse_alu_pd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2);
3739 x86_sse_alu_pd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2);
3742 x86_sse_alu_pd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2);
3745 x86_sse_alu_sd_reg_reg (code, X86_SSE_MOVDDUP, ins->dreg, ins->sreg1);
3748 case OP_EXTRACT_MASK:
3749 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMOVMSKB, ins->dreg, ins->sreg1);
3753 x86_sse_alu_pd_reg_reg (code, X86_SSE_PAND, ins->sreg1, ins->sreg2);
3756 x86_sse_alu_pd_reg_reg (code, X86_SSE_POR, ins->sreg1, ins->sreg2);
3759 x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->sreg1, ins->sreg2);
3763 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDB, ins->sreg1, ins->sreg2);
3766 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDW, ins->sreg1, ins->sreg2);
3769 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDD, ins->sreg1, ins->sreg2);
3772 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDQ, ins->sreg1, ins->sreg2);
3776 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBB, ins->sreg1, ins->sreg2);
3779 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBW, ins->sreg1, ins->sreg2);
3782 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBD, ins->sreg1, ins->sreg2);
3785 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBQ, ins->sreg1, ins->sreg2);
3789 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXUB, ins->sreg1, ins->sreg2);
3792 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUW, ins->sreg1, ins->sreg2);
3795 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUD, ins->sreg1, ins->sreg2);
3799 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSB, ins->sreg1, ins->sreg2);
3802 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXSW, ins->sreg1, ins->sreg2);
3805 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSD, ins->sreg1, ins->sreg2);
3809 x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGB, ins->sreg1, ins->sreg2);
3812 x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGW, ins->sreg1, ins->sreg2);
3816 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINUB, ins->sreg1, ins->sreg2);
3819 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUW, ins->sreg1, ins->sreg2);
3822 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUD, ins->sreg1, ins->sreg2);
3826 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSB, ins->sreg1, ins->sreg2);
3829 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINSW, ins->sreg1, ins->sreg2);
3832 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSD, ins->sreg1, ins->sreg2);
3836 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQB, ins->sreg1, ins->sreg2);
3839 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQW, ins->sreg1, ins->sreg2);
3842 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQD, ins->sreg1, ins->sreg2);
3845 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPEQQ, ins->sreg1, ins->sreg2);
3849 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTB, ins->sreg1, ins->sreg2);
3852 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTW, ins->sreg1, ins->sreg2);
3855 x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTD, ins->sreg1, ins->sreg2);
3858 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPGTQ, ins->sreg1, ins->sreg2);
3861 case OP_PSUM_ABS_DIFF:
3862 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSADBW, ins->sreg1, ins->sreg2);
3865 case OP_UNPACK_LOWB:
3866 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLBW, ins->sreg1, ins->sreg2);
3868 case OP_UNPACK_LOWW:
3869 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLWD, ins->sreg1, ins->sreg2);
3871 case OP_UNPACK_LOWD:
3872 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLDQ, ins->sreg1, ins->sreg2);
3874 case OP_UNPACK_LOWQ:
3875 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLQDQ, ins->sreg1, ins->sreg2);
3877 case OP_UNPACK_LOWPS:
3878 x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2);
3880 case OP_UNPACK_LOWPD:
3881 x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2);
3884 case OP_UNPACK_HIGHB:
3885 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHBW, ins->sreg1, ins->sreg2);
3887 case OP_UNPACK_HIGHW:
3888 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHWD, ins->sreg1, ins->sreg2);
3890 case OP_UNPACK_HIGHD:
3891 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHDQ, ins->sreg1, ins->sreg2);
3893 case OP_UNPACK_HIGHQ:
3894 x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHQDQ, ins->sreg1, ins->sreg2);
3896 case OP_UNPACK_HIGHPS:
3897 x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2);
3899 case OP_UNPACK_HIGHPD:
3900 x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2);
3904 x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSWB, ins->sreg1, ins->sreg2);
3907 x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSDW, ins->sreg1, ins->sreg2);
3910 x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKUSWB, ins->sreg1, ins->sreg2);
3913 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PACKUSDW, ins->sreg1, ins->sreg2);
3916 case OP_PADDB_SAT_UN:
3917 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSB, ins->sreg1, ins->sreg2);
3919 case OP_PSUBB_SAT_UN:
3920 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSB, ins->sreg1, ins->sreg2);
3922 case OP_PADDW_SAT_UN:
3923 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSW, ins->sreg1, ins->sreg2);
3925 case OP_PSUBW_SAT_UN:
3926 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSW, ins->sreg1, ins->sreg2);
3930 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSB, ins->sreg1, ins->sreg2);
3933 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSB, ins->sreg1, ins->sreg2);
3936 x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSW, ins->sreg1, ins->sreg2);
3939 x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSW, ins->sreg1, ins->sreg2);
3943 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULLW, ins->sreg1, ins->sreg2);
3946 x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMULLD, ins->sreg1, ins->sreg2);
3949 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULUDQ, ins->sreg1, ins->sreg2);
3951 case OP_PMULW_HIGH_UN:
3952 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHUW, ins->sreg1, ins->sreg2);
3955 x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHW, ins->sreg1, ins->sreg2);
3959 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHR, ins->dreg, ins->inst_imm);
3962 x86_sse_shift_reg_reg (code, X86_SSE_PSRLW_REG, ins->dreg, ins->sreg2);
3966 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SAR, ins->dreg, ins->inst_imm);
3969 x86_sse_shift_reg_reg (code, X86_SSE_PSRAW_REG, ins->dreg, ins->sreg2);
3973 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHL, ins->dreg, ins->inst_imm);
3976 x86_sse_shift_reg_reg (code, X86_SSE_PSLLW_REG, ins->dreg, ins->sreg2);
3980 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHR, ins->dreg, ins->inst_imm);
3983 x86_sse_shift_reg_reg (code, X86_SSE_PSRLD_REG, ins->dreg, ins->sreg2);
3987 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SAR, ins->dreg, ins->inst_imm);
3990 x86_sse_shift_reg_reg (code, X86_SSE_PSRAD_REG, ins->dreg, ins->sreg2);
3994 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHL, ins->dreg, ins->inst_imm);
3997 x86_sse_shift_reg_reg (code, X86_SSE_PSLLD_REG, ins->dreg, ins->sreg2);
4001 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHR, ins->dreg, ins->inst_imm);
4004 x86_sse_shift_reg_reg (code, X86_SSE_PSRLQ_REG, ins->dreg, ins->sreg2);
4008 x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHL, ins->dreg, ins->inst_imm);
4011 x86_sse_shift_reg_reg (code, X86_SSE_PSLLQ_REG, ins->dreg, ins->sreg2);
4015 x86_movd_xreg_reg (code, ins->dreg, ins->sreg1);
4018 x86_movd_reg_xreg (code, ins->dreg, ins->sreg1);
4022 x86_movd_reg_xreg (code, ins->dreg, ins->sreg1);
4024 x86_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8);
4025 x86_widen_reg (code, ins->dreg, ins->dreg, ins->opcode == OP_EXTRACT_I1, FALSE);
4029 x86_movd_reg_xreg (code, ins->dreg, ins->sreg1);
4031 x86_shift_reg_imm (code, X86_SHR, ins->dreg, 16);
4032 x86_widen_reg (code, ins->dreg, ins->dreg, ins->opcode == OP_EXTRACT_I2, TRUE);
4036 x86_sse_alu_pd_membase_reg (code, X86_SSE_MOVHPD_MEMBASE_REG, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1);
4038 x86_sse_alu_sd_membase_reg (code, X86_SSE_MOVSD_MEMBASE_REG, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1);
4039 x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE);
4043 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->sreg1, ins->sreg2, ins->inst_c0);
4045 case OP_EXTRACTX_U2:
4046 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PEXTRW, ins->dreg, ins->sreg1, ins->inst_c0);
4048 case OP_INSERTX_U1_SLOW:
4049 /*sreg1 is the extracted ireg (scratch)
4050 /sreg2 is the to be inserted ireg (scratch)
4051 /dreg is the xreg to receive the value*/
4053 /*clear the bits from the extracted word*/
4054 x86_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00);
4055 /*shift the value to insert if needed*/
4056 if (ins->inst_c0 & 1)
4057 x86_shift_reg_imm (code, X86_SHL, ins->sreg2, 8);
4058 /*join them together*/
4059 x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
4060 x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, ins->inst_c0 / 2);
4063 case OP_STOREX_MEMBASE_REG:
4064 case OP_STOREX_MEMBASE:
4065 x86_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
4067 case OP_LOADX_MEMBASE:
4068 x86_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
4070 case OP_LOADX_ALIGNED_MEMBASE:
4071 x86_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
4073 case OP_STOREX_ALIGNED_MEMBASE_REG:
4074 x86_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
4076 case OP_STOREX_NTA_MEMBASE_REG:
4077 x86_sse_alu_reg_membase (code, X86_SSE_MOVNTPS, ins->dreg, ins->sreg1, ins->inst_offset);
4079 case OP_PREFETCH_MEMBASE:
4080 x86_sse_alu_reg_membase (code, X86_SSE_PREFETCH, ins->backend.arg_info, ins->sreg1, ins->inst_offset);
4084 /*FIXME the peephole pass should have killed this*/
4085 if (ins->dreg != ins->sreg1)
4086 x86_movaps_reg_reg (code, ins->dreg, ins->sreg1);
4089 x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->dreg, ins->dreg);
4091 case OP_ICONV_TO_R8_RAW:
4092 x86_mov_membase_reg (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1, 4);
4093 x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE);
4096 case OP_FCONV_TO_R8_X:
4097 x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE);
4098 x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset);
4101 case OP_XCONV_R8_TO_I4:
4102 x86_cvttsd2si (code, ins->dreg, ins->sreg1);
4103 switch (ins->backend.source_opcode) {
4104 case OP_FCONV_TO_I1:
4105 x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
4107 case OP_FCONV_TO_U1:
4108 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4110 case OP_FCONV_TO_I2:
4111 x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
4113 case OP_FCONV_TO_U2:
4114 x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
4120 g_warning ("unknown opcode %s\n", mono_inst_name (ins->opcode));
4121 g_assert_not_reached ();
4124 if (G_UNLIKELY ((code - cfg->native_code - offset) > max_len)) {
4125 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4126 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4127 g_assert_not_reached ();
4133 cfg->code_len = code - cfg->native_code;
4137 mono_arch_register_lowlevel_calls (void)
4142 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4144 MonoJumpInfo *patch_info;
4145 gboolean compile_aot = !run_cctors;
4147 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4148 unsigned char *ip = patch_info->ip.i + code;
4149 const unsigned char *target;
4151 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4154 switch (patch_info->type) {
4155 case MONO_PATCH_INFO_BB:
4156 case MONO_PATCH_INFO_LABEL:
4159 /* No need to patch these */
4164 switch (patch_info->type) {
4165 case MONO_PATCH_INFO_IP:
4166 *((gconstpointer *)(ip)) = target;
4168 case MONO_PATCH_INFO_CLASS_INIT: {
4170 /* Might already been changed to a nop */
4171 x86_call_code (code, 0);
4172 x86_patch (ip, target);
4175 case MONO_PATCH_INFO_ABS:
4176 case MONO_PATCH_INFO_METHOD:
4177 case MONO_PATCH_INFO_METHOD_JUMP:
4178 case MONO_PATCH_INFO_INTERNAL_METHOD:
4179 case MONO_PATCH_INFO_BB:
4180 case MONO_PATCH_INFO_LABEL:
4181 case MONO_PATCH_INFO_RGCTX_FETCH:
4182 case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
4183 case MONO_PATCH_INFO_MONITOR_ENTER:
4184 case MONO_PATCH_INFO_MONITOR_EXIT:
4185 x86_patch (ip, target);
4187 case MONO_PATCH_INFO_NONE:
4190 guint32 offset = mono_arch_get_patch_offset (ip);
4191 *((gconstpointer *)(ip + offset)) = target;
4199 mono_arch_emit_prolog (MonoCompile *cfg)
4201 MonoMethod *method = cfg->method;
4203 MonoMethodSignature *sig;
4205 int alloc_size, pos, max_offset, i;
4208 cfg->code_size = MAX (mono_method_get_header (method)->code_size * 4, 10240);
4210 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4211 cfg->code_size += 512;
4213 code = cfg->native_code = g_malloc (cfg->code_size);
4215 x86_push_reg (code, X86_EBP);
4216 x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
4218 alloc_size = cfg->stack_offset;
4221 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4222 /* Might need to attach the thread to the JIT or change the domain for the callback */
4223 if (appdomain_tls_offset != -1 && lmf_tls_offset != -1) {
4224 guint8 *buf, *no_domain_branch;
4226 code = mono_x86_emit_tls_get (code, X86_EAX, appdomain_tls_offset);
4227 x86_alu_reg_imm (code, X86_CMP, X86_EAX, GPOINTER_TO_UINT (cfg->domain));
4228 no_domain_branch = code;
4229 x86_branch8 (code, X86_CC_NE, 0, 0);
4230 code = mono_x86_emit_tls_get ( code, X86_EAX, lmf_tls_offset);
4231 x86_test_reg_reg (code, X86_EAX, X86_EAX);
4233 x86_branch8 (code, X86_CC_NE, 0, 0);
4234 x86_patch (no_domain_branch, code);
4235 x86_push_imm (code, cfg->domain);
4236 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
4237 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
4238 x86_patch (buf, code);
4239 #ifdef PLATFORM_WIN32
4240 /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
4241 /* FIXME: Add a separate key for LMF to avoid this */
4242 x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
4246 g_assert (!cfg->compile_aot);
4247 x86_push_imm (code, cfg->domain);
4248 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
4249 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
4253 if (method->save_lmf) {
4254 pos += sizeof (MonoLMF);
4256 /* save the current IP */
4257 mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4258 x86_push_imm_template (code);
4260 /* save all caller saved regs */
4261 x86_push_reg (code, X86_EBP);
4262 x86_push_reg (code, X86_ESI);
4263 x86_push_reg (code, X86_EDI);
4264 x86_push_reg (code, X86_EBX);
4266 if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
4268 * Optimized version which uses the mono_lmf TLS variable instead of indirection
4269 * through the mono_lmf_addr TLS variable.
4271 /* %eax = previous_lmf */
4272 x86_prefix (code, X86_GS_PREFIX);
4273 x86_mov_reg_mem (code, X86_EAX, lmf_tls_offset, 4);
4274 /* skip esp + method_info + lmf */
4275 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 12);
4276 /* push previous_lmf */
4277 x86_push_reg (code, X86_EAX);
4279 x86_prefix (code, X86_GS_PREFIX);
4280 x86_mov_mem_reg (code, lmf_tls_offset, X86_ESP, 4);
4282 /* get the address of lmf for the current thread */
4284 * This is performance critical so we try to use some tricks to make
4288 if (lmf_addr_tls_offset != -1) {
4289 /* Load lmf quicky using the GS register */
4290 code = mono_x86_emit_tls_get (code, X86_EAX, lmf_addr_tls_offset);
4291 #ifdef PLATFORM_WIN32
4292 /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
4293 /* FIXME: Add a separate key for LMF to avoid this */
4294 x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
4297 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_get_lmf_addr");
4300 /* Skip esp + method info */
4301 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
4304 x86_push_reg (code, X86_EAX);
4305 /* push *lfm (previous_lmf) */
4306 x86_push_membase (code, X86_EAX, 0);
4308 x86_mov_membase_reg (code, X86_EAX, 0, X86_ESP, 4);
4312 if (cfg->used_int_regs & (1 << X86_EBX)) {
4313 x86_push_reg (code, X86_EBX);
4317 if (cfg->used_int_regs & (1 << X86_EDI)) {
4318 x86_push_reg (code, X86_EDI);
4322 if (cfg->used_int_regs & (1 << X86_ESI)) {
4323 x86_push_reg (code, X86_ESI);
4330 /* the original alloc_size is already aligned: there is %ebp and retip pushed, so realign */
4331 if (mono_do_x86_stack_align) {
4332 int tot = alloc_size + pos + 4 + 4; /* ret ip + ebp */
4333 tot &= MONO_ARCH_FRAME_ALIGNMENT - 1;
4334 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - tot;
4338 /* See mono_emit_stack_alloc */
4339 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
4340 guint32 remaining_size = alloc_size;
4341 while (remaining_size >= 0x1000) {
4342 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
4343 x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
4344 remaining_size -= 0x1000;
4347 x86_alu_reg_imm (code, X86_SUB, X86_ESP, remaining_size);
4349 x86_alu_reg_imm (code, X86_SUB, X86_ESP, alloc_size);
4353 if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED ||
4354 cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE) {
4355 x86_alu_reg_imm (code, X86_AND, X86_ESP, -MONO_ARCH_FRAME_ALIGNMENT);
4358 #if DEBUG_STACK_ALIGNMENT
4359 /* check the stack is aligned */
4360 if (method->wrapper_type == MONO_WRAPPER_NONE) {
4361 x86_mov_reg_reg (code, X86_ECX, X86_ESP, 4);
4362 x86_alu_reg_imm (code, X86_AND, X86_ECX, MONO_ARCH_FRAME_ALIGNMENT - 1);
4363 x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
4364 x86_branch_disp (code, X86_CC_EQ, 3, FALSE);
4365 x86_breakpoint (code);
4369 /* compute max_offset in order to use short forward jumps */
4371 if (cfg->opt & MONO_OPT_BRANCH) {
4372 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4374 bb->max_offset = max_offset;
4376 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4378 /* max alignment for loops */
4379 if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
4380 max_offset += LOOP_ALIGNMENT;
4382 MONO_BB_FOR_EACH_INS (bb, ins) {
4383 if (ins->opcode == OP_LABEL)
4384 ins->inst_c1 = max_offset;
4386 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4391 /* store runtime generic context */
4392 if (cfg->rgctx_var) {
4393 g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET && cfg->rgctx_var->inst_basereg == X86_EBP);
4395 x86_mov_membase_reg (code, X86_EBP, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, 4);
4398 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4399 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4401 /* load arguments allocated to register from the stack */
4402 sig = mono_method_signature (method);
4405 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4406 inst = cfg->args [pos];
4407 if (inst->opcode == OP_REGVAR) {
4408 x86_mov_reg_membase (code, inst->dreg, X86_EBP, inst->inst_offset, 4);
4409 if (cfg->verbose_level > 2)
4410 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4415 cfg->code_len = code - cfg->native_code;
4417 g_assert (cfg->code_len < cfg->code_size);
4423 mono_arch_emit_epilog (MonoCompile *cfg)
4425 MonoMethod *method = cfg->method;
4426 MonoMethodSignature *sig = mono_method_signature (method);
4428 guint32 stack_to_pop;
4430 int max_epilog_size = 16;
4433 if (cfg->method->save_lmf)
4434 max_epilog_size += 128;
4436 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4437 cfg->code_size *= 2;
4438 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4439 mono_jit_stats.code_reallocs++;
4442 code = cfg->native_code + cfg->code_len;
4444 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4445 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4447 /* the code restoring the registers must be kept in sync with OP_JMP */
4450 if (method->save_lmf) {
4451 gint32 prev_lmf_reg;
4452 gint32 lmf_offset = -sizeof (MonoLMF);
4454 /* check if we need to restore protection of the stack after a stack overflow */
4455 if (mono_get_jit_tls_offset () != -1) {
4457 code = mono_x86_emit_tls_get (code, X86_ECX, mono_get_jit_tls_offset ());
4458 /* we load the value in a separate instruction: this mechanism may be
4459 * used later as a safer way to do thread interruption
4461 x86_mov_reg_membase (code, X86_ECX, X86_ECX, G_STRUCT_OFFSET (MonoJitTlsData, restore_stack_prot), 4);
4462 x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
4464 x86_branch8 (code, X86_CC_Z, 0, FALSE);
4465 /* note that the call trampoline will preserve eax/edx */
4466 x86_call_reg (code, X86_ECX);
4467 x86_patch (patch, code);
4469 /* FIXME: maybe save the jit tls in the prolog */
4471 if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
4473 * Optimized version which uses the mono_lmf TLS variable instead of indirection
4474 * through the mono_lmf_addr TLS variable.
4476 /* reg = previous_lmf */
4477 x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
4479 /* lmf = previous_lmf */
4480 x86_prefix (code, X86_GS_PREFIX);
4481 x86_mov_mem_reg (code, lmf_tls_offset, X86_ECX, 4);
4483 /* Find a spare register */
4484 switch (mini_type_get_underlying_type (cfg->generic_sharing_context, sig->ret)->type) {
4487 prev_lmf_reg = X86_EDI;
4488 cfg->used_int_regs |= (1 << X86_EDI);
4491 prev_lmf_reg = X86_EDX;
4495 /* reg = previous_lmf */
4496 x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
4499 x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 4);
4501 /* *(lmf) = previous_lmf */
4502 x86_mov_membase_reg (code, X86_ECX, 0, prev_lmf_reg, 4);
4505 /* restore caller saved regs */
4506 if (cfg->used_int_regs & (1 << X86_EBX)) {
4507 x86_mov_reg_membase (code, X86_EBX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), 4);
4510 if (cfg->used_int_regs & (1 << X86_EDI)) {
4511 x86_mov_reg_membase (code, X86_EDI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), 4);
4513 if (cfg->used_int_regs & (1 << X86_ESI)) {
4514 x86_mov_reg_membase (code, X86_ESI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), 4);
4517 /* EBP is restored by LEAVE */
4519 if (cfg->used_int_regs & (1 << X86_EBX)) {
4522 if (cfg->used_int_regs & (1 << X86_EDI)) {
4525 if (cfg->used_int_regs & (1 << X86_ESI)) {
4530 x86_lea_membase (code, X86_ESP, X86_EBP, pos);
4532 if (cfg->used_int_regs & (1 << X86_ESI)) {
4533 x86_pop_reg (code, X86_ESI);
4535 if (cfg->used_int_regs & (1 << X86_EDI)) {
4536 x86_pop_reg (code, X86_EDI);
4538 if (cfg->used_int_regs & (1 << X86_EBX)) {
4539 x86_pop_reg (code, X86_EBX);
4543 /* Load returned vtypes into registers if needed */
4544 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
4545 if (cinfo->ret.storage == ArgValuetypeInReg) {
4546 for (quad = 0; quad < 2; quad ++) {
4547 switch (cinfo->ret.pair_storage [quad]) {
4549 x86_mov_reg_membase (code, cinfo->ret.pair_regs [quad], cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), 4);
4551 case ArgOnFloatFpStack:
4552 x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), FALSE);
4554 case ArgOnDoubleFpStack:
4555 x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), TRUE);
4560 g_assert_not_reached ();
4567 if (CALLCONV_IS_STDCALL (sig)) {
4568 MonoJitArgumentInfo *arg_info = alloca (sizeof (MonoJitArgumentInfo) * (sig->param_count + 1));
4570 stack_to_pop = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
4571 } else if (MONO_TYPE_ISSTRUCT (mono_method_signature (cfg->method)->ret) && (cinfo->ret.storage == ArgOnStack))
4577 x86_ret_imm (code, stack_to_pop);
4581 cfg->code_len = code - cfg->native_code;
4583 g_assert (cfg->code_len < cfg->code_size);
4587 mono_arch_emit_exceptions (MonoCompile *cfg)
4589 MonoJumpInfo *patch_info;
4592 MonoClass *exc_classes [16];
4593 guint8 *exc_throw_start [16], *exc_throw_end [16];
4597 /* Compute needed space */
4598 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4599 if (patch_info->type == MONO_PATCH_INFO_EXC)
4604 * make sure we have enough space for exceptions
4605 * 16 is the size of two push_imm instructions and a call
4607 if (cfg->compile_aot)
4608 code_size = exc_count * 32;
4610 code_size = exc_count * 16;
4612 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4613 cfg->code_size *= 2;
4614 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4615 mono_jit_stats.code_reallocs++;
4618 code = cfg->native_code + cfg->code_len;
4621 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4622 switch (patch_info->type) {
4623 case MONO_PATCH_INFO_EXC: {
4624 MonoClass *exc_class;
4628 x86_patch (patch_info->ip.i + cfg->native_code, code);
4630 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4631 g_assert (exc_class);
4632 throw_ip = patch_info->ip.i;
4634 /* Find a throw sequence for the same exception class */
4635 for (i = 0; i < nthrows; ++i)
4636 if (exc_classes [i] == exc_class)
4639 x86_push_imm (code, (exc_throw_end [i] - cfg->native_code) - throw_ip);
4640 x86_jump_code (code, exc_throw_start [i]);
4641 patch_info->type = MONO_PATCH_INFO_NONE;
4646 /* Compute size of code following the push <OFFSET> */
4649 if ((code - cfg->native_code) - throw_ip < 126 - size) {
4650 /* Use the shorter form */
4652 x86_push_imm (code, 0);
4656 x86_push_imm (code, 0xf0f0f0f0);
4661 exc_classes [nthrows] = exc_class;
4662 exc_throw_start [nthrows] = code;
4665 x86_push_imm (code, exc_class->type_token - MONO_TOKEN_TYPE_DEF);
4666 patch_info->data.name = "mono_arch_throw_corlib_exception";
4667 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4668 patch_info->ip.i = code - cfg->native_code;
4669 x86_call_code (code, 0);
4670 x86_push_imm (buf, (code - cfg->native_code) - throw_ip);
4675 exc_throw_end [nthrows] = code;
4687 cfg->code_len = code - cfg->native_code;
4689 g_assert (cfg->code_len < cfg->code_size);
4693 mono_arch_flush_icache (guint8 *code, gint size)
4699 mono_arch_flush_register_windows (void)
4704 mono_arch_is_inst_imm (gint64 imm)
4710 * Support for fast access to the thread-local lmf structure using the GS
4711 * segment register on NPTL + kernel 2.6.x.
4714 static gboolean tls_offset_inited = FALSE;
4717 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4719 if (!tls_offset_inited) {
4720 if (!getenv ("MONO_NO_TLS")) {
4721 #ifdef PLATFORM_WIN32
4723 * We need to init this multiple times, since when we are first called, the key might not
4724 * be initialized yet.
4726 appdomain_tls_offset = mono_domain_get_tls_key ();
4727 lmf_tls_offset = mono_get_jit_tls_key ();
4728 thread_tls_offset = mono_thread_get_tls_key ();
4730 /* Only 64 tls entries can be accessed using inline code */
4731 if (appdomain_tls_offset >= 64)
4732 appdomain_tls_offset = -1;
4733 if (lmf_tls_offset >= 64)
4734 lmf_tls_offset = -1;
4735 if (thread_tls_offset >= 64)
4736 thread_tls_offset = -1;
4739 optimize_for_xen = access ("/proc/xen", F_OK) == 0;
4741 tls_offset_inited = TRUE;
4742 appdomain_tls_offset = mono_domain_get_tls_offset ();
4743 lmf_tls_offset = mono_get_lmf_tls_offset ();
4744 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4745 thread_tls_offset = mono_thread_get_tls_offset ();
4752 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4756 #ifdef MONO_ARCH_HAVE_IMT
4758 // Linear handler, the bsearch head compare is shorter
4759 //[2 + 4] x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
4760 //[1 + 1] x86_branch8(inst,cond,imm,is_signed)
4761 // x86_patch(ins,target)
4762 //[1 + 5] x86_jump_mem(inst,mem)
4765 #define BR_SMALL_SIZE 2
4766 #define BR_LARGE_SIZE 5
4767 #define JUMP_IMM_SIZE 6
4768 #define ENABLE_WRONG_METHOD_CHECK 0
4771 imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
4773 int i, distance = 0;
4774 for (i = start; i < target; ++i)
4775 distance += imt_entries [i]->chunk_size;
4780 * LOCKING: called with the domain lock held
4783 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4784 gpointer fail_tramp)
4788 guint8 *code, *start;
4790 for (i = 0; i < count; ++i) {
4791 MonoIMTCheckItem *item = imt_entries [i];
4792 if (item->is_equals) {
4793 if (item->check_target_idx) {
4794 if (!item->compare_done)
4795 item->chunk_size += CMP_SIZE;
4796 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
4799 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + JUMP_IMM_SIZE * 2;
4801 item->chunk_size += JUMP_IMM_SIZE;
4802 #if ENABLE_WRONG_METHOD_CHECK
4803 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
4808 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
4809 imt_entries [item->check_target_idx]->compare_done = TRUE;
4811 size += item->chunk_size;
4814 code = mono_method_alloc_generic_virtual_thunk (domain, size);
4816 code = mono_code_manager_reserve (domain->code_mp, size);
4818 for (i = 0; i < count; ++i) {
4819 MonoIMTCheckItem *item = imt_entries [i];
4820 item->code_target = code;
4821 if (item->is_equals) {
4822 if (item->check_target_idx) {
4823 if (!item->compare_done)
4824 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
4825 item->jmp_code = code;
4826 x86_branch8 (code, X86_CC_NE, 0, FALSE);
4828 x86_jump_code (code, item->value.target_code);
4830 x86_jump_mem (code, & (vtable->vtable [item->value.vtable_slot]));
4833 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
4834 item->jmp_code = code;
4835 x86_branch8 (code, X86_CC_NE, 0, FALSE);
4836 x86_jump_code (code, item->value.target_code);
4837 x86_patch (item->jmp_code, code);
4838 x86_jump_code (code, fail_tramp);
4839 item->jmp_code = NULL;
4841 /* enable the commented code to assert on wrong method */
4842 #if ENABLE_WRONG_METHOD_CHECK
4843 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
4844 item->jmp_code = code;
4845 x86_branch8 (code, X86_CC_NE, 0, FALSE);
4847 x86_jump_mem (code, & (vtable->vtable [item->value.vtable_slot]));
4848 #if ENABLE_WRONG_METHOD_CHECK
4849 x86_patch (item->jmp_code, code);
4850 x86_breakpoint (code);
4851 item->jmp_code = NULL;
4856 x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->key);
4857 item->jmp_code = code;
4858 if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
4859 x86_branch8 (code, X86_CC_GE, 0, FALSE);
4861 x86_branch32 (code, X86_CC_GE, 0, FALSE);
4864 /* patch the branches to get to the target items */
4865 for (i = 0; i < count; ++i) {
4866 MonoIMTCheckItem *item = imt_entries [i];
4867 if (item->jmp_code) {
4868 if (item->check_target_idx) {
4869 x86_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4875 mono_stats.imt_thunks_size += code - start;
4876 g_assert (code - start <= size);
4881 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
4883 return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
4887 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4889 MonoMethodSignature *sig = mono_method_signature (method);
4890 CallInfo *cinfo = get_call_info (gsctx, NULL, sig, FALSE);
4891 int this_argument_offset;
4892 MonoObject *this_argument;
4895 * this is the offset of the this arg from esp as saved at the start of
4896 * mono_arch_create_trampoline_code () in tramp-x86.c.
4898 this_argument_offset = 5;
4899 if (MONO_TYPE_ISSTRUCT (sig->ret) && (cinfo->ret.storage == ArgOnStack))
4900 this_argument_offset++;
4902 this_argument = * (MonoObject**) (((guint8*) regs [X86_ESP]) + this_argument_offset * sizeof (gpointer));
4905 return this_argument;
4910 mono_arch_find_static_call_vtable (gpointer *regs, guint8 *code)
4912 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
4916 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4918 MonoInst *ins = NULL;
4921 if (cmethod->klass == mono_defaults.math_class) {
4922 if (strcmp (cmethod->name, "Sin") == 0) {
4924 } else if (strcmp (cmethod->name, "Cos") == 0) {
4926 } else if (strcmp (cmethod->name, "Tan") == 0) {
4928 } else if (strcmp (cmethod->name, "Atan") == 0) {
4930 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4932 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4934 } else if (strcmp (cmethod->name, "Round") == 0 && fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) {
4939 MONO_INST_NEW (cfg, ins, opcode);
4940 ins->type = STACK_R8;
4941 ins->dreg = mono_alloc_freg (cfg);
4942 ins->sreg1 = args [0]->dreg;
4943 MONO_ADD_INS (cfg->cbb, ins);
4946 if (cfg->opt & MONO_OPT_CMOV) {
4949 if (strcmp (cmethod->name, "Min") == 0) {
4950 if (fsig->params [0]->type == MONO_TYPE_I4)
4952 } else if (strcmp (cmethod->name, "Max") == 0) {
4953 if (fsig->params [0]->type == MONO_TYPE_I4)
4958 MONO_INST_NEW (cfg, ins, opcode);
4959 ins->type = STACK_I4;
4960 ins->dreg = mono_alloc_ireg (cfg);
4961 ins->sreg1 = args [0]->dreg;
4962 ins->sreg2 = args [1]->dreg;
4963 MONO_ADD_INS (cfg->cbb, ins);
4968 /* OP_FREM is not IEEE compatible */
4969 else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
4970 MONO_INST_NEW (cfg, ins, OP_FREM);
4971 ins->inst_i0 = args [0];
4972 ins->inst_i1 = args [1];
4981 mono_arch_print_tree (MonoInst *tree, int arity)
4986 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4992 if (appdomain_tls_offset == -1)
4995 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
4996 ins->inst_offset = appdomain_tls_offset;
5000 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
5004 if (thread_tls_offset == -1)
5007 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5008 ins->inst_offset = thread_tls_offset;
5013 mono_arch_get_patch_offset (guint8 *code)
5015 if ((code [0] == 0x8b) && (x86_modrm_mod (code [1]) == 0x2))
5017 else if ((code [0] == 0xba))
5019 else if ((code [0] == 0x68))
5022 else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x6))
5023 /* push <OFFSET>(<REG>) */
5025 else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x2))
5026 /* call *<OFFSET>(<REG>) */
5028 else if ((code [0] == 0xdd) || (code [0] == 0xd9))
5031 else if ((code [0] == 0x58) && (code [1] == 0x05))
5032 /* pop %eax; add <OFFSET>, %eax */
5034 else if ((code [0] >= 0x58) && (code [0] <= 0x58 + X86_NREG) && (code [1] == 0x81))
5035 /* pop <REG>; add <OFFSET>, <REG> */
5037 else if ((code [0] >= 0xb8) && (code [0] < 0xb8 + 8))
5038 /* mov <REG>, imm */
5041 g_assert_not_reached ();
5047 * mono_breakpoint_clean_code:
5049 * Copy @size bytes from @code - @offset to the buffer @buf. If the debugger inserted software
5050 * breakpoints in the original code, they are removed in the copy.
5052 * Returns TRUE if no sw breakpoint was present.
5055 mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size)
5058 gboolean can_write = TRUE;
5060 * If method_start is non-NULL we need to perform bound checks, since we access memory
5061 * at code - offset we could go before the start of the method and end up in a different
5062 * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes
5065 if (!method_start || code - offset >= method_start) {
5066 memcpy (buf, code - offset, size);
5068 int diff = code - method_start;
5069 memset (buf, 0, size);
5070 memcpy (buf + offset - diff, method_start, diff + size - offset);
5073 for (i = 0; i < MONO_BREAKPOINT_ARRAY_SIZE; ++i) {
5074 int idx = mono_breakpoint_info_index [i];
5078 ptr = mono_breakpoint_info [idx].address;
5079 if (ptr >= code && ptr < code + size) {
5080 guint8 saved_byte = mono_breakpoint_info [idx].saved_byte;
5082 /*g_print ("patching %p with 0x%02x (was: 0x%02x)\n", ptr, saved_byte, buf [ptr - code]);*/
5083 buf [ptr - code] = saved_byte;
5090 mono_arch_get_vcall_slot (guint8 *code, gpointer *regs, int *displacement)
5096 mono_breakpoint_clean_code (NULL, code, 8, buf, sizeof (buf));
5101 /* go to the start of the call instruction
5103 * address_byte = (m << 6) | (o << 3) | reg
5104 * call opcode: 0xff address_byte displacement
5106 * 0xff m=2,o=2 imm32
5111 * A given byte sequence can match more than case here, so we have to be
5112 * really careful about the ordering of the cases. Longer sequences
5115 if ((code [-2] == 0x8b) && (x86_modrm_mod (code [-1]) == 0x2) && (code [4] == 0xff) && (x86_modrm_reg (code [5]) == 0x2) && (x86_modrm_mod (code [5]) == 0x0)) {
5117 * This is an interface call
5118 * 8b 80 0c e8 ff ff mov 0xffffe80c(%eax),%eax
5119 * ff 10 call *(%eax)
5121 reg = x86_modrm_rm (code [5]);
5123 #ifdef MONO_ARCH_HAVE_IMT
5124 } else if ((code [-2] == 0xba) && (code [3] == 0xff) && (x86_modrm_mod (code [4]) == 1) && (x86_modrm_reg (code [4]) == 2) && ((signed char)code [5] < 0)) {
5125 /* IMT-based interface calls: with MONO_ARCH_IMT_REG == edx
5126 * ba 14 f8 28 08 mov $0x828f814,%edx
5127 * ff 50 fc call *0xfffffffc(%eax)
5129 reg = code [4] & 0x07;
5130 disp = (signed char)code [5];
5132 } else if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
5133 reg = code [4] & 0x07;
5134 disp = (signed char)code [5];
5136 if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
5137 reg = code [1] & 0x07;
5138 disp = *((gint32*)(code + 2));
5139 } else if ((code [1] == 0xe8)) {
5141 } else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
5143 * This is a interface call
5144 * 8b 40 30 mov 0x30(%eax),%eax
5145 * ff 10 call *(%eax)
5148 reg = code [5] & 0x07;
5154 *displacement = disp;
5159 mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
5163 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
5166 return (gpointer*)((char*)vt + displacement);
5170 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig,
5171 gssize *regs, guint8 *code)
5173 guint32 esp = regs [X86_ESP];
5178 gsctx = mono_get_generic_context_from_code (code);
5179 cinfo = get_call_info (gsctx, NULL, sig, FALSE);
5182 * The stack looks like:
5185 * <possible vtype return address>
5187 * <4 pointers pushed by mono_arch_create_trampoline_code ()>
5189 res = (((MonoObject**)esp) [5 + (cinfo->args [0].offset / 4)]);
5194 #define MAX_ARCH_DELEGATE_PARAMS 10
5197 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
5199 guint8 *code, *start;
5201 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
5204 /* FIXME: Support more cases */
5205 if (MONO_TYPE_ISSTRUCT (sig->ret))
5209 * The stack contains:
5215 static guint8* cached = NULL;
5219 start = code = mono_global_codeman_reserve (64);
5221 /* Replace the this argument with the target */
5222 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
5223 x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, target), 4);
5224 x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4);
5225 x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
5227 g_assert ((code - start) < 64);
5229 mono_debug_add_delegate_trampoline (start, code - start);
5231 mono_memory_barrier ();
5235 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
5237 /* 8 for mov_reg and jump, plus 8 for each parameter */
5238 int code_reserve = 8 + (sig->param_count * 8);
5240 for (i = 0; i < sig->param_count; ++i)
5241 if (!mono_is_regsize_var (sig->params [i]))
5244 code = cache [sig->param_count];
5249 * The stack contains:
5250 * <args in reverse order>
5255 * <args in reverse order>
5258 * without unbalancing the stack.
5259 * So move each arg up a spot in the stack (overwriting un-needed 'this' arg)
5260 * and leaving original spot of first arg as placeholder in stack so
5261 * when callee pops stack everything works.
5264 start = code = mono_global_codeman_reserve (code_reserve);
5266 /* store delegate for access to method_ptr */
5267 x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4);
5270 for (i = 0; i < sig->param_count; ++i) {
5271 x86_mov_reg_membase (code, X86_EAX, X86_ESP, (i+2)*4, 4);
5272 x86_mov_membase_reg (code, X86_ESP, (i+1)*4, X86_EAX, 4);
5275 x86_jump_membase (code, X86_ECX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
5277 g_assert ((code - start) < code_reserve);
5279 mono_debug_add_delegate_trampoline (start, code - start);
5281 mono_memory_barrier ();
5283 cache [sig->param_count] = start;
5290 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5293 case X86_ECX: return (gpointer)ctx->ecx;
5294 case X86_EDX: return (gpointer)ctx->edx;
5295 case X86_EBP: return (gpointer)ctx->ebp;
5296 case X86_ESP: return (gpointer)ctx->esp;
5297 default: return ((gpointer)(&ctx->eax)[reg]);
5301 #ifdef MONO_ARCH_SIMD_INTRINSICS
5304 get_float_to_x_spill_area (MonoCompile *cfg)
5306 if (!cfg->fconv_to_r8_x_var) {
5307 cfg->fconv_to_r8_x_var = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
5308 cfg->fconv_to_r8_x_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
5310 return cfg->fconv_to_r8_x_var;
5314 * Convert all fconv opts that MONO_OPT_SSE2 would get wrong.
5317 mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
5320 int dreg, src_opcode;
5322 if (!(cfg->opt & MONO_OPT_SSE2) || !(cfg->opt & MONO_OPT_SIMD))
5325 switch (src_opcode = ins->opcode) {
5326 case OP_FCONV_TO_I1:
5327 case OP_FCONV_TO_U1:
5328 case OP_FCONV_TO_I2:
5329 case OP_FCONV_TO_U2:
5330 case OP_FCONV_TO_I4:
5337 /* dreg is the IREG and sreg1 is the FREG */
5338 MONO_INST_NEW (cfg, fconv, OP_FCONV_TO_R8_X);
5339 fconv->klass = NULL; /*FIXME, what can I use here as the Mono.Simd lib might not be loaded yet*/
5340 fconv->sreg1 = ins->sreg1;
5341 fconv->dreg = mono_alloc_ireg (cfg);
5342 fconv->type = STACK_VTYPE;
5343 fconv->backend.spill_var = get_float_to_x_spill_area (cfg);
5345 mono_bblock_insert_before_ins (cfg->cbb, ins, fconv);
5349 ins->opcode = OP_XCONV_R8_TO_I4;
5351 ins->klass = mono_defaults.int32_class;
5352 ins->sreg1 = fconv->dreg;
5354 ins->type = STACK_I4;
5355 ins->backend.source_opcode = src_opcode;
5359 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
5363 if (!(cfg->opt & MONO_OPT_SIMD) || long_ins->opcode != OP_EXTRACT_I8)
5365 /*TODO move this to simd-intrinsic.c once we support sse 4.1 dword extractors since we need the runtime caps info */
5367 vreg = long_ins->sreg1;
5369 if (long_ins->inst_c0) {
5370 MONO_INST_NEW (cfg, ins, OP_PSHUFLED);
5371 ins->klass = long_ins->klass;
5372 ins->sreg1 = long_ins->sreg1;
5374 ins->type = STACK_VTYPE;
5375 ins->dreg = vreg = alloc_ireg (cfg);
5376 MONO_ADD_INS (cfg->cbb, ins);
5379 MONO_INST_NEW (cfg, ins, OP_EXTRACT_I4);
5380 ins->klass = mono_defaults.int32_class;
5382 ins->type = STACK_I4;
5383 ins->dreg = long_ins->dreg + 1;
5384 MONO_ADD_INS (cfg->cbb, ins);
5386 MONO_INST_NEW (cfg, ins, OP_PSHUFLED);
5387 ins->klass = long_ins->klass;
5388 ins->sreg1 = long_ins->sreg1;
5389 ins->inst_c0 = long_ins->inst_c0 ? 3 : 1;
5390 ins->type = STACK_VTYPE;
5391 ins->dreg = vreg = alloc_ireg (cfg);
5392 MONO_ADD_INS (cfg->cbb, ins);
5394 MONO_INST_NEW (cfg, ins, OP_EXTRACT_I4);
5395 ins->klass = mono_defaults.int32_class;
5397 ins->type = STACK_I4;
5398 ins->dreg = long_ins->dreg + 2;
5399 MONO_ADD_INS (cfg->cbb, ins);
5401 long_ins->opcode = OP_NOP;