2005-03-06 Zoltan Varga <vargaz@freemail.hu>
[mono.git] / mono / mini / mini-amd64.c
1 /*
2  * mini-amd64.c: AMD64 backend for the Mono code generator
3  *
4  * Based on mini-x86.c.
5  *
6  * Authors:
7  *   Paolo Molaro (lupus@ximian.com)
8  *   Dietmar Maurer (dietmar@ximian.com)
9  *   Patrik Torstensson
10  *
11  * (C) 2003 Ximian, Inc.
12  */
13 #include "mini.h"
14 #include <string.h>
15 #include <math.h>
16
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/metadata/profiler-private.h>
21 #include <mono/utils/mono-math.h>
22
23 #include "trace.h"
24 #include "mini-amd64.h"
25 #include "inssel.h"
26 #include "cpu-amd64.h"
27
28 static gint lmf_tls_offset = -1;
29 static gint appdomain_tls_offset = -1;
30 static gint thread_tls_offset = -1;
31
32 /* Use SSE2 instructions for fp arithmetic */
33 static gboolean use_sse2 = FALSE;
34
35 /* xmm15 is reserved for use by some opcodes */
36 #define AMD64_CALLEE_FREGS 0xef
37
38 #define FPSTACK_SIZE 6
39
40 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
41
42 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
43
44 #ifdef PLATFORM_WIN32
45 /* Under windows, the default pinvoke calling convention is stdcall */
46 #define CALLCONV_IS_STDCALL(call_conv) (((call_conv) == MONO_CALL_STDCALL) || ((call_conv) == MONO_CALL_DEFAULT))
47 #else
48 #define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
49 #endif
50
51 #define SIGNAL_STACK_SIZE (64 * 1024)
52
53 #define ARGS_OFFSET 16
54 #define GP_SCRATCH_REG AMD64_R11
55
56 /*
57  * AMD64 register usage:
58  * - callee saved registers are used for global register allocation
59  * - %r11 is used for materializing 64 bit constants in opcodes
60  * - the rest is used for local allocation
61  */
62
63 /*
64  * FIXME: 
65  * - Use xmm registers instead of the x87 stack
66  * - Allocate arguments to global registers
67  * - implement emulated opcodes
68  * - (all archs) do not store trampoline addresses in method->info since they
69  *   are domain specific.   
70  */
71
72 #define NOT_IMPLEMENTED g_assert_not_reached ()
73
74 const char*
75 mono_arch_regname (int reg) {
76         switch (reg) {
77         case AMD64_RAX: return "%rax";
78         case AMD64_RBX: return "%rbx";
79         case AMD64_RCX: return "%rcx";
80         case AMD64_RDX: return "%rdx";
81         case AMD64_RSP: return "%rsp";  
82         case AMD64_RBP: return "%rbp";
83         case AMD64_RDI: return "%rdi";
84         case AMD64_RSI: return "%rsi";
85         case AMD64_R8: return "%r8";
86         case AMD64_R9: return "%r9";
87         case AMD64_R10: return "%r10";
88         case AMD64_R11: return "%r11";
89         case AMD64_R12: return "%r12";
90         case AMD64_R13: return "%r13";
91         case AMD64_R14: return "%r14";
92         case AMD64_R15: return "%r15";
93         }
94         return "unknown";
95 }
96
97 static const char * xmmregs [] = {
98         "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8",
99         "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
100 };
101
102 static const char*
103 mono_arch_fregname (int reg)
104 {
105         if (reg < AMD64_XMM_NREG)
106                 return xmmregs [reg];
107         else
108                 return "unknown";
109 }
110
111 static const char*
112 mono_amd64_regname (int reg, gboolean fp)
113 {
114         if (fp)
115                 return mono_arch_fregname (reg);
116         else
117                 return mono_arch_regname (reg);
118 }
119
120 static inline void 
121 amd64_patch (unsigned char* code, gpointer target)
122 {
123         /* Skip REX */
124         if ((code [0] >= 0x40) && (code [0] <= 0x4f))
125                 code += 1;
126
127         if ((code [0] & 0xf8) == 0xb8) {
128                 /* amd64_set_reg_template */
129                 *(guint64*)(code + 1) = (guint64)target;
130         }
131         else if (code [0] == 0x8b) {
132                 /* mov 0(%rip), %dreg */
133                 *(guint32*)(code + 2) = (guint32)(guint64)target - 7;
134         }
135         else if ((code [0] == 0xff) && (code [1] == 0x15)) {
136                 /* call *<OFFSET>(%rip) */
137                 *(guint32*)(code + 2) = ((guint32)(guint64)target) - 7;
138         }
139         else
140                 x86_patch (code, (unsigned char*)target);
141 }
142
143 typedef enum {
144         ArgInIReg,
145         ArgInFloatSSEReg,
146         ArgInDoubleSSEReg,
147         ArgOnStack,
148         ArgValuetypeInReg,
149         ArgNone /* only in pair_storage */
150 } ArgStorage;
151
152 typedef struct {
153         gint16 offset;
154         gint8  reg;
155         ArgStorage storage;
156
157         /* Only if storage == ArgValuetypeInReg */
158         ArgStorage pair_storage [2];
159         gint8 pair_regs [2];
160 } ArgInfo;
161
162 typedef struct {
163         int nargs;
164         guint32 stack_usage;
165         guint32 reg_usage;
166         guint32 freg_usage;
167         gboolean need_stack_align;
168         ArgInfo ret;
169         ArgInfo sig_cookie;
170         ArgInfo args [1];
171 } CallInfo;
172
173 #define DEBUG(a) if (cfg->verbose_level > 1) a
174
175 #define NEW_ICONST(cfg,dest,val) do {   \
176                 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst));       \
177                 (dest)->opcode = OP_ICONST;     \
178                 (dest)->inst_c0 = (val);        \
179                 (dest)->type = STACK_I4;        \
180         } while (0)
181
182 #define PARAM_REGS 6
183
184 static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
185
186 static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
187
188 static void inline
189 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
190 {
191     ainfo->offset = *stack_size;
192
193     if (*gr >= PARAM_REGS) {
194                 ainfo->storage = ArgOnStack;
195                 (*stack_size) += sizeof (gpointer);
196     }
197     else {
198                 ainfo->storage = ArgInIReg;
199                 ainfo->reg = param_regs [*gr];
200                 (*gr) ++;
201     }
202 }
203
204 #define FLOAT_PARAM_REGS 8
205
206 static void inline
207 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
208 {
209     ainfo->offset = *stack_size;
210
211     if (*gr >= FLOAT_PARAM_REGS) {
212                 ainfo->storage = ArgOnStack;
213                 (*stack_size) += sizeof (gpointer);
214     }
215     else {
216                 /* A double register */
217                 if (is_double)
218                         ainfo->storage = ArgInDoubleSSEReg;
219                 else
220                         ainfo->storage = ArgInFloatSSEReg;
221                 ainfo->reg = *gr;
222                 (*gr) += 1;
223     }
224 }
225
226 typedef enum ArgumentClass {
227         ARG_CLASS_NO_CLASS,
228         ARG_CLASS_MEMORY,
229         ARG_CLASS_INTEGER,
230         ARG_CLASS_SSE
231 } ArgumentClass;
232
233 static ArgumentClass
234 merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
235 {
236         ArgumentClass class2;
237         MonoType *ptype;
238
239         ptype = mono_type_get_underlying_type (type);
240         switch (ptype->type) {
241         case MONO_TYPE_BOOLEAN:
242         case MONO_TYPE_CHAR:
243         case MONO_TYPE_I1:
244         case MONO_TYPE_U1:
245         case MONO_TYPE_I2:
246         case MONO_TYPE_U2:
247         case MONO_TYPE_I4:
248         case MONO_TYPE_U4:
249         case MONO_TYPE_I:
250         case MONO_TYPE_U:
251         case MONO_TYPE_STRING:
252         case MONO_TYPE_OBJECT:
253         case MONO_TYPE_CLASS:
254         case MONO_TYPE_SZARRAY:
255         case MONO_TYPE_PTR:
256         case MONO_TYPE_FNPTR:
257         case MONO_TYPE_ARRAY:
258         case MONO_TYPE_I8:
259         case MONO_TYPE_U8:
260                 class2 = ARG_CLASS_INTEGER;
261                 break;
262         case MONO_TYPE_R4:
263         case MONO_TYPE_R8:
264                 class2 = ARG_CLASS_SSE;
265                 break;
266
267         case MONO_TYPE_TYPEDBYREF:
268                 g_assert_not_reached ();
269
270         case MONO_TYPE_VALUETYPE: {
271                 MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass);
272                 int i;
273
274                 for (i = 0; i < info->num_fields; ++i) {
275                         class2 = class1;
276                         class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
277                 }
278                 break;
279         }
280         default:
281                 g_assert_not_reached ();
282         }
283
284         /* Merge */
285         if (class1 == class2)
286                 ;
287         else if (class1 == ARG_CLASS_NO_CLASS)
288                 class1 = class2;
289         else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY))
290                 class1 = ARG_CLASS_MEMORY;
291         else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER))
292                 class1 = ARG_CLASS_INTEGER;
293         else
294                 class1 = ARG_CLASS_SSE;
295
296         return class1;
297 }
298
299 static void
300 add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
301                gboolean is_return,
302                guint32 *gr, guint32 *fr, guint32 *stack_size)
303 {
304         guint32 size, quad, nquads, i;
305         ArgumentClass args [2];
306         MonoMarshalType *info;
307         MonoClass *klass;
308
309         klass = mono_class_from_mono_type (type);
310         if (sig->pinvoke) 
311                 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
312         else 
313                 size = mono_type_stack_size (&klass->byval_arg, NULL);
314
315         if (!sig->pinvoke || (size == 0) || (size > 16)) {
316                 /* Allways pass in memory */
317                 ainfo->offset = *stack_size;
318                 *stack_size += ALIGN_TO (size, 8);
319                 ainfo->storage = ArgOnStack;
320
321                 return;
322         }
323
324         /* FIXME: Handle structs smaller than 8 bytes */
325         //if ((size % 8) != 0)
326         //      NOT_IMPLEMENTED;
327
328         if (size > 8)
329                 nquads = 2;
330         else
331                 nquads = 1;
332
333         /*
334          * Implement the algorithm from section 3.2.3 of the X86_64 ABI.
335          * The X87 and SSEUP stuff is left out since there are no such types in
336          * the CLR.
337          */
338         info = mono_marshal_load_type_info (klass);
339         g_assert (info);
340         if (info->native_size > 16) {
341                 ainfo->offset = *stack_size;
342                 *stack_size += ALIGN_TO (info->native_size, 8);
343                 ainfo->storage = ArgOnStack;
344
345                 return;
346         }
347
348         for (quad = 0; quad < nquads; ++quad) {
349                 int size, align;
350                 ArgumentClass class1;
351                 
352                 class1 = ARG_CLASS_NO_CLASS;
353                 for (i = 0; i < info->num_fields; ++i) {
354                         size = mono_marshal_type_size (info->fields [i].field->type, 
355                                                                                    info->fields [i].mspec, 
356                                                                                    &align, TRUE, klass->unicode);
357                         if ((info->fields [i].offset < 8) && (info->fields [i].offset + size) > 8) {
358                                 /* Unaligned field */
359                                 NOT_IMPLEMENTED;
360                         }
361
362                         /* Skip fields in other quad */
363                         if ((quad == 0) && (info->fields [i].offset >= 8))
364                                 continue;
365                         if ((quad == 1) && (info->fields [i].offset < 8))
366                                 continue;
367
368                         class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
369                 }
370                 g_assert (class1 != ARG_CLASS_NO_CLASS);
371                 args [quad] = class1;
372         }
373
374         /* Post merger cleanup */
375         if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY))
376                 args [0] = args [1] = ARG_CLASS_MEMORY;
377
378         /* Allocate registers */
379         {
380                 int orig_gr = *gr;
381                 int orig_fr = *fr;
382
383                 ainfo->storage = ArgValuetypeInReg;
384                 ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
385                 for (quad = 0; quad < nquads; ++quad) {
386                         switch (args [quad]) {
387                         case ARG_CLASS_INTEGER:
388                                 if (*gr >= PARAM_REGS)
389                                         args [quad] = ARG_CLASS_MEMORY;
390                                 else {
391                                         ainfo->pair_storage [quad] = ArgInIReg;
392                                         if (is_return)
393                                                 ainfo->pair_regs [quad] = return_regs [*gr];
394                                         else
395                                                 ainfo->pair_regs [quad] = param_regs [*gr];
396                                         (*gr) ++;
397                                 }
398                                 break;
399                         case ARG_CLASS_SSE:
400                                 if (*fr >= FLOAT_PARAM_REGS)
401                                         args [quad] = ARG_CLASS_MEMORY;
402                                 else {
403                                         ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
404                                         ainfo->pair_regs [quad] = *fr;
405                                         (*fr) ++;
406                                 }
407                                 break;
408                         case ARG_CLASS_MEMORY:
409                                 break;
410                         default:
411                                 g_assert_not_reached ();
412                         }
413                 }
414
415                 if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) {
416                         /* Revert possible register assignments */
417                         *gr = orig_gr;
418                         *fr = orig_fr;
419
420                         ainfo->offset = *stack_size;
421                         *stack_size += ALIGN_TO (info->native_size, 8);
422                         ainfo->storage = ArgOnStack;
423                 }
424         }
425 }
426
427 /*
428  * get_call_info:
429  *
430  *  Obtain information about a call according to the calling convention.
431  * For AMD64, see the "System V ABI, x86-64 Architecture Processor Supplement 
432  * Draft Version 0.23" document for more information.
433  */
434 static CallInfo*
435 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
436 {
437         guint32 i, gr, fr;
438         MonoType *ret_type;
439         int n = sig->hasthis + sig->param_count;
440         guint32 stack_size = 0;
441         CallInfo *cinfo;
442
443         cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
444
445         gr = 0;
446         fr = 0;
447
448         /* return value */
449         {
450                 ret_type = mono_type_get_underlying_type (sig->ret);
451                 switch (ret_type->type) {
452                 case MONO_TYPE_BOOLEAN:
453                 case MONO_TYPE_I1:
454                 case MONO_TYPE_U1:
455                 case MONO_TYPE_I2:
456                 case MONO_TYPE_U2:
457                 case MONO_TYPE_CHAR:
458                 case MONO_TYPE_I4:
459                 case MONO_TYPE_U4:
460                 case MONO_TYPE_I:
461                 case MONO_TYPE_U:
462                 case MONO_TYPE_PTR:
463                 case MONO_TYPE_CLASS:
464                 case MONO_TYPE_OBJECT:
465                 case MONO_TYPE_SZARRAY:
466                 case MONO_TYPE_ARRAY:
467                 case MONO_TYPE_STRING:
468                         cinfo->ret.storage = ArgInIReg;
469                         cinfo->ret.reg = AMD64_RAX;
470                         break;
471                 case MONO_TYPE_U8:
472                 case MONO_TYPE_I8:
473                         cinfo->ret.storage = ArgInIReg;
474                         cinfo->ret.reg = AMD64_RAX;
475                         break;
476                 case MONO_TYPE_R4:
477                         cinfo->ret.storage = ArgInFloatSSEReg;
478                         cinfo->ret.reg = AMD64_XMM0;
479                         break;
480                 case MONO_TYPE_R8:
481                         cinfo->ret.storage = ArgInDoubleSSEReg;
482                         cinfo->ret.reg = AMD64_XMM0;
483                         break;
484                 case MONO_TYPE_VALUETYPE: {
485                         guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
486
487                         add_valuetype (sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
488                         if (cinfo->ret.storage == ArgOnStack)
489                                 /* The caller passes the address where the value is stored */
490                                 add_general (&gr, &stack_size, &cinfo->ret);
491                         break;
492                 }
493                 case MONO_TYPE_TYPEDBYREF:
494                         /* Same as a valuetype with size 24 */
495                         add_general (&gr, &stack_size, &cinfo->ret);
496                         ;
497                         break;
498                 case MONO_TYPE_VOID:
499                         break;
500                 default:
501                         g_error ("Can't handle as return value 0x%x", sig->ret->type);
502                 }
503         }
504
505         /* this */
506         if (sig->hasthis)
507                 add_general (&gr, &stack_size, cinfo->args + 0);
508
509         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
510                 gr = PARAM_REGS;
511                 fr = FLOAT_PARAM_REGS;
512                 
513                 /* Emit the signature cookie just before the implicit arguments */
514                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
515         }
516
517         for (i = 0; i < sig->param_count; ++i) {
518                 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
519                 MonoType *ptype;
520
521                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
522                         /* We allways pass the sig cookie on the stack for simplicity */
523                         /* 
524                          * Prevent implicit arguments + the sig cookie from being passed 
525                          * in registers.
526                          */
527                         gr = PARAM_REGS;
528                         fr = FLOAT_PARAM_REGS;
529
530                         /* Emit the signature cookie just before the implicit arguments */
531                         add_general (&gr, &stack_size, &cinfo->sig_cookie);
532                 }
533
534                 if (sig->params [i]->byref) {
535                         add_general (&gr, &stack_size, ainfo);
536                         continue;
537                 }
538                 ptype = mono_type_get_underlying_type (sig->params [i]);
539                 switch (ptype->type) {
540                 case MONO_TYPE_BOOLEAN:
541                 case MONO_TYPE_I1:
542                 case MONO_TYPE_U1:
543                         add_general (&gr, &stack_size, ainfo);
544                         break;
545                 case MONO_TYPE_I2:
546                 case MONO_TYPE_U2:
547                 case MONO_TYPE_CHAR:
548                         add_general (&gr, &stack_size, ainfo);
549                         break;
550                 case MONO_TYPE_I4:
551                 case MONO_TYPE_U4:
552                         add_general (&gr, &stack_size, ainfo);
553                         break;
554                 case MONO_TYPE_I:
555                 case MONO_TYPE_U:
556                 case MONO_TYPE_PTR:
557                 case MONO_TYPE_CLASS:
558                 case MONO_TYPE_OBJECT:
559                 case MONO_TYPE_STRING:
560                 case MONO_TYPE_SZARRAY:
561                 case MONO_TYPE_ARRAY:
562                         add_general (&gr, &stack_size, ainfo);
563                         break;
564                 case MONO_TYPE_VALUETYPE:
565                         add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
566                         break;
567                 case MONO_TYPE_TYPEDBYREF:
568                         stack_size += sizeof (MonoTypedRef);
569                         ainfo->storage = ArgOnStack;
570                         break;
571                 case MONO_TYPE_U8:
572                 case MONO_TYPE_I8:
573                         add_general (&gr, &stack_size, ainfo);
574                         break;
575                 case MONO_TYPE_R4:
576                         add_float (&fr, &stack_size, ainfo, FALSE);
577                         break;
578                 case MONO_TYPE_R8:
579                         add_float (&fr, &stack_size, ainfo, TRUE);
580                         break;
581                 default:
582                         g_assert_not_reached ();
583                 }
584         }
585
586         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
587                 gr = PARAM_REGS;
588                 fr = FLOAT_PARAM_REGS;
589                 
590                 /* Emit the signature cookie just before the implicit arguments */
591                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
592         }
593
594         if (stack_size & 0x8) {
595                 /* The AMD64 ABI requires each stack frame to be 16 byte aligned */
596                 cinfo->need_stack_align = TRUE;
597                 stack_size += 8;
598         }
599
600         cinfo->stack_usage = stack_size;
601         cinfo->reg_usage = gr;
602         cinfo->freg_usage = fr;
603         return cinfo;
604 }
605
606 /*
607  * mono_arch_get_argument_info:
608  * @csig:  a method signature
609  * @param_count: the number of parameters to consider
610  * @arg_info: an array to store the result infos
611  *
612  * Gathers information on parameters such as size, alignment and
613  * padding. arg_info should be large enought to hold param_count + 1 entries. 
614  *
615  * Returns the size of the argument area on the stack.
616  */
617 int
618 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
619 {
620         int k;
621         CallInfo *cinfo = get_call_info (csig, FALSE);
622         guint32 args_size = cinfo->stack_usage;
623
624         /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
625         if (csig->hasthis) {
626                 arg_info [0].offset = 0;
627         }
628
629         for (k = 0; k < param_count; k++) {
630                 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
631                 /* FIXME: */
632                 arg_info [k + 1].size = 0;
633         }
634
635         g_free (cinfo);
636
637         return args_size;
638 }
639
640 static int 
641 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
642 {
643         return 0;
644 }
645
646 /*
647  * Initialize the cpu to execute managed code.
648  */
649 void
650 mono_arch_cpu_init (void)
651 {
652         guint16 fpcw;
653
654         /* spec compliance requires running with double precision */
655         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
656         fpcw &= ~X86_FPCW_PRECC_MASK;
657         fpcw |= X86_FPCW_PREC_DOUBLE;
658         __asm__  __volatile__ ("fldcw %0\n": : "m" (fpcw));
659         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
660
661         mono_amd64_exceptions_init ();
662         mono_amd64_tramp_init ();
663 }
664
665 /*
666  * This function returns the optimizations supported on this cpu.
667  */
668 guint32
669 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
670 {
671         int eax, ebx, ecx, edx;
672         guint32 opts = 0;
673
674         /* FIXME: AMD64 */
675
676         *exclude_mask = 0;
677         /* Feature Flags function, flags returned in EDX. */
678         if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
679                 if (edx & (1 << 15)) {
680                         opts |= MONO_OPT_CMOV;
681                         if (edx & 1)
682                                 opts |= MONO_OPT_FCMOV;
683                         else
684                                 *exclude_mask |= MONO_OPT_FCMOV;
685                 } else
686                         *exclude_mask |= MONO_OPT_CMOV;
687         }
688         return opts;
689 }
690
691 static gboolean
692 is_regsize_var (MonoType *t) {
693         if (t->byref)
694                 return TRUE;
695         t = mono_type_get_underlying_type (t);
696         switch (t->type) {
697         case MONO_TYPE_I4:
698         case MONO_TYPE_U4:
699         case MONO_TYPE_I:
700         case MONO_TYPE_U:
701         case MONO_TYPE_PTR:
702                 return TRUE;
703         case MONO_TYPE_OBJECT:
704         case MONO_TYPE_STRING:
705         case MONO_TYPE_CLASS:
706         case MONO_TYPE_SZARRAY:
707         case MONO_TYPE_ARRAY:
708                 return TRUE;
709         case MONO_TYPE_VALUETYPE:
710                 return FALSE;
711         }
712         return FALSE;
713 }
714
715 GList *
716 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
717 {
718         GList *vars = NULL;
719         int i;
720
721         for (i = 0; i < cfg->num_varinfo; i++) {
722                 MonoInst *ins = cfg->varinfo [i];
723                 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
724
725                 /* unused vars */
726                 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
727                         continue;
728
729                 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || 
730                     (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
731                         continue;
732
733                 /* we dont allocate I1 to registers because there is no simply way to sign extend 
734                  * 8bit quantities in caller saved registers on x86 */
735                 if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) || 
736                     (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
737                     (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
738                         g_assert (MONO_VARINFO (cfg, i)->reg == -1);
739                         g_assert (i == vmv->idx);
740                         vars = g_list_prepend (vars, vmv);
741                 }
742         }
743
744         vars = mono_varlist_sort (cfg, vars, 0);
745
746         return vars;
747 }
748
749 GList *
750 mono_arch_get_global_int_regs (MonoCompile *cfg)
751 {
752         GList *regs = NULL;
753
754         /* We use the callee saved registers for global allocation */
755         regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
756         regs = g_list_prepend (regs, (gpointer)AMD64_R12);
757         regs = g_list_prepend (regs, (gpointer)AMD64_R13);
758         regs = g_list_prepend (regs, (gpointer)AMD64_R14);
759         regs = g_list_prepend (regs, (gpointer)AMD64_R15);
760
761         return regs;
762 }
763
764 /*
765  * mono_arch_regalloc_cost:
766  *
767  *  Return the cost, in number of memory references, of the action of 
768  * allocating the variable VMV into a register during global register
769  * allocation.
770  */
771 guint32
772 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
773 {
774         MonoInst *ins = cfg->varinfo [vmv->idx];
775
776         if (cfg->method->save_lmf)
777                 /* The register is already saved */
778                 /* substract 1 for the invisible store in the prolog */
779                 return (ins->opcode == OP_ARG) ? 0 : 1;
780         else
781                 /* push+pop */
782                 return (ins->opcode == OP_ARG) ? 1 : 2;
783 }
784  
785 void
786 mono_arch_allocate_vars (MonoCompile *m)
787 {
788         MonoMethodSignature *sig;
789         MonoMethodHeader *header;
790         MonoInst *inst;
791         int i, offset;
792         guint32 locals_stack_size, locals_stack_align;
793         gint32 *offsets;
794         CallInfo *cinfo;
795
796         header = mono_method_get_header (m->method);
797
798         sig = mono_method_signature (m->method);
799
800         cinfo = get_call_info (sig, FALSE);
801
802         /*
803          * We use the ABI calling conventions for managed code as well.
804          * Exception: valuetypes are never passed or returned in registers.
805          */
806
807         /* Locals are allocated backwards from %fp */
808         m->frame_reg = AMD64_RBP;
809         offset = 0;
810
811         /* Reserve space for caller saved registers */
812         for (i = 0; i < AMD64_NREG; ++i)
813                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (m->used_int_regs & (1 << i))) {
814                         offset += sizeof (gpointer);
815                 }
816
817         if (m->method->save_lmf) {
818                 /* Reserve stack space for saving LMF + argument regs */
819                 offset += sizeof (MonoLMF);
820                 if (lmf_tls_offset == -1)
821                         /* Need to save argument regs too */
822                         offset += (AMD64_NREG * 8) + (8 * 8);
823                 m->arch.lmf_offset = offset;
824         }
825
826         if (sig->ret->type != MONO_TYPE_VOID) {
827                 switch (cinfo->ret.storage) {
828                 case ArgInIReg:
829                 case ArgInFloatSSEReg:
830                 case ArgInDoubleSSEReg:
831                         if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
832                                 /* The register is volatile */
833                                 m->ret->opcode = OP_REGOFFSET;
834                                 m->ret->inst_basereg = AMD64_RBP;
835                                 offset += 8;
836                                 m->ret->inst_offset = - offset;
837                         }
838                         else {
839                                 m->ret->opcode = OP_REGVAR;
840                                 m->ret->inst_c0 = cinfo->ret.reg;
841                         }
842                         break;
843                 case ArgValuetypeInReg:
844                         /* Allocate a local to hold the result, the epilog will copy it to the correct place */
845                         offset += 16;
846                         m->ret->opcode = OP_REGOFFSET;
847                         m->ret->inst_basereg = AMD64_RBP;
848                         m->ret->inst_offset = - offset;
849                         break;
850                 default:
851                         g_assert_not_reached ();
852                 }
853                 m->ret->dreg = m->ret->inst_c0;
854         }
855
856         /* Allocate locals */
857         offsets = mono_allocate_stack_slots (m, &locals_stack_size, &locals_stack_align);
858         if (locals_stack_align) {
859                 offset += (locals_stack_align - 1);
860                 offset &= ~(locals_stack_align - 1);
861         }
862         for (i = m->locals_start; i < m->num_varinfo; i++) {
863                 if (offsets [i] != -1) {
864                         MonoInst *inst = m->varinfo [i];
865                         inst->opcode = OP_REGOFFSET;
866                         inst->inst_basereg = AMD64_RBP;
867                         inst->inst_offset = - (offset + offsets [i]);
868                         //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
869                 }
870         }
871         g_free (offsets);
872         offset += locals_stack_size;
873
874         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
875                 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
876                 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
877         }
878
879         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
880                 inst = m->varinfo [i];
881                 if (inst->opcode != OP_REGVAR) {
882                         ArgInfo *ainfo = &cinfo->args [i];
883                         gboolean inreg = TRUE;
884                         MonoType *arg_type;
885
886                         if (sig->hasthis && (i == 0))
887                                 arg_type = &mono_defaults.object_class->byval_arg;
888                         else
889                                 arg_type = sig->params [i - sig->hasthis];
890
891                         /* FIXME: Allocate volatile arguments to registers */
892                         if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
893                                 inreg = FALSE;
894
895                         /* 
896                          * Under AMD64, all registers used to pass arguments to functions
897                          * are volatile across calls.
898                          * FIXME: Optimize this.
899                          */
900                         if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg))
901                                 inreg = FALSE;
902
903                         inst->opcode = OP_REGOFFSET;
904
905                         switch (ainfo->storage) {
906                         case ArgInIReg:
907                         case ArgInFloatSSEReg:
908                         case ArgInDoubleSSEReg:
909                                 inst->opcode = OP_REGVAR;
910                                 inst->dreg = ainfo->reg;
911                                 break;
912                         case ArgOnStack:
913                                 inst->opcode = OP_REGOFFSET;
914                                 inst->inst_basereg = AMD64_RBP;
915                                 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
916                                 break;
917                         case ArgValuetypeInReg:
918                                 break;
919                         default:
920                                 NOT_IMPLEMENTED;
921                         }
922
923                         if (!inreg && (ainfo->storage != ArgOnStack)) {
924                                 inst->opcode = OP_REGOFFSET;
925                                 inst->inst_basereg = AMD64_RBP;
926                                 /* These arguments are saved to the stack in the prolog */
927                                 if (ainfo->storage == ArgValuetypeInReg)
928                                         offset += 2 * sizeof (gpointer);
929                                 else
930                                         offset += sizeof (gpointer);
931                                 inst->inst_offset = - offset;
932                         }
933                 }
934         }
935
936         m->stack_offset = offset;
937
938         g_free (cinfo);
939 }
940
941 void
942 mono_arch_create_vars (MonoCompile *cfg)
943 {
944         MonoMethodSignature *sig;
945         CallInfo *cinfo;
946
947         sig = mono_method_signature (cfg->method);
948
949         cinfo = get_call_info (sig, FALSE);
950
951         if (cinfo->ret.storage == ArgValuetypeInReg)
952                 cfg->ret_var_is_local = TRUE;
953
954         g_free (cinfo);
955 }
956
957 static void
958 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, MonoInst *arg, ArgStorage storage, int reg, MonoInst *tree)
959 {
960         switch (storage) {
961         case ArgInIReg:
962                 arg->opcode = OP_OUTARG_REG;
963                 arg->inst_left = tree;
964                 arg->inst_right = (MonoInst*)call;
965                 arg->unused = reg;
966                 call->used_iregs |= 1 << reg;
967                 break;
968         case ArgInFloatSSEReg:
969                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R4;
970                 arg->inst_left = tree;
971                 arg->inst_right = (MonoInst*)call;
972                 arg->unused = reg;
973                 call->used_fregs |= 1 << reg;
974                 break;
975         case ArgInDoubleSSEReg:
976                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R8;
977                 arg->inst_left = tree;
978                 arg->inst_right = (MonoInst*)call;
979                 arg->unused = reg;
980                 call->used_fregs |= 1 << reg;
981                 break;
982         default:
983                 g_assert_not_reached ();
984         }
985 }
986
987 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
988  * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info 
989  */
990
991 static int
992 arg_storage_to_ldind (ArgStorage storage)
993 {
994         switch (storage) {
995         case ArgInIReg:
996                 return CEE_LDIND_I;
997         case ArgInDoubleSSEReg:
998                 return CEE_LDIND_R8;
999         case ArgInFloatSSEReg:
1000                 return CEE_LDIND_R4;
1001         default:
1002                 g_assert_not_reached ();
1003         }
1004
1005         return -1;
1006 }
1007
1008 /* 
1009  * take the arguments and generate the arch-specific
1010  * instructions to properly call the function in call.
1011  * This includes pushing, moving arguments to the right register
1012  * etc.
1013  * Issue: who does the spilling if needed, and when?
1014  */
1015 MonoCallInst*
1016 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1017         MonoInst *arg, *in;
1018         MonoMethodSignature *sig;
1019         int i, n, stack_size;
1020         CallInfo *cinfo;
1021         ArgInfo *ainfo;
1022
1023         stack_size = 0;
1024
1025         sig = call->signature;
1026         n = sig->param_count + sig->hasthis;
1027
1028         cinfo = get_call_info (sig, sig->pinvoke);
1029
1030         for (i = 0; i < n; ++i) {
1031                 ainfo = cinfo->args + i;
1032
1033                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1034                         MonoMethodSignature *tmp_sig;
1035                         
1036                         /* Emit the signature cookie just before the implicit arguments */
1037                         MonoInst *sig_arg;
1038                         /* FIXME: Add support for signature tokens to AOT */
1039                         cfg->disable_aot = TRUE;
1040
1041                         g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1042
1043                         /*
1044                          * mono_ArgIterator_Setup assumes the signature cookie is 
1045                          * passed first and all the arguments which were before it are
1046                          * passed on the stack after the signature. So compensate by 
1047                          * passing a different signature.
1048                          */
1049                         tmp_sig = mono_metadata_signature_dup (call->signature);
1050                         tmp_sig->param_count -= call->signature->sentinelpos;
1051                         tmp_sig->sentinelpos = 0;
1052                         memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1053
1054                         MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1055                         sig_arg->inst_p0 = tmp_sig;
1056
1057                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1058                         arg->inst_left = sig_arg;
1059                         arg->type = STACK_PTR;
1060
1061                         /* prepend, so they get reversed */
1062                         arg->next = call->out_args;
1063                         call->out_args = arg;
1064                 }
1065
1066                 if (is_virtual && i == 0) {
1067                         /* the argument will be attached to the call instruction */
1068                         in = call->args [i];
1069                 } else {
1070                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1071                         in = call->args [i];
1072                         arg->cil_code = in->cil_code;
1073                         arg->inst_left = in;
1074                         arg->type = in->type;
1075                         /* prepend, so they get reversed */
1076                         arg->next = call->out_args;
1077                         call->out_args = arg;
1078
1079                         if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1080                                 gint align;
1081                                 guint32 size;
1082
1083                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1084                                         size = sizeof (MonoTypedRef);
1085                                         align = sizeof (gpointer);
1086                                 }
1087                                 else
1088                                 if (sig->pinvoke)
1089                                         size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1090                                 else
1091                                         size = mono_type_stack_size (&in->klass->byval_arg, &align);
1092                                 if (ainfo->storage == ArgValuetypeInReg) {
1093                                         if (ainfo->pair_storage [1] == ArgNone) {
1094                                                 MonoInst *load;
1095
1096                                                 /* Simpler case */
1097
1098                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1099                                                 load->inst_left = in;
1100
1101                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1102                                         }
1103                                         else {
1104                                                 /* Trees can't be shared so make a copy */
1105                                                 MonoInst *vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1106                                                 MonoInst *load, *load2, *offset_ins;
1107
1108                                                 /* Reg1 */
1109                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1110                                                 load->ssa_op = MONO_SSA_LOAD;
1111                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1112
1113                                                 NEW_ICONST (cfg, offset_ins, 0);
1114                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1115                                                 load2->inst_left = load;
1116                                                 load2->inst_right = offset_ins;
1117
1118                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1119                                                 load->inst_left = load2;
1120
1121                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1122
1123                                                 /* Reg2 */
1124                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1125                                                 load->ssa_op = MONO_SSA_LOAD;
1126                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1127
1128                                                 NEW_ICONST (cfg, offset_ins, 8);
1129                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1130                                                 load2->inst_left = load;
1131                                                 load2->inst_right = offset_ins;
1132
1133                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [1]));
1134                                                 load->inst_left = load2;
1135
1136                                                 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1137                                                 arg->cil_code = in->cil_code;
1138                                                 arg->type = in->type;
1139                                                 /* prepend, so they get reversed */
1140                                                 arg->next = call->out_args;
1141                                                 call->out_args = arg;
1142
1143                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [1], ainfo->pair_regs [1], load);
1144
1145                                                 /* Prepend a copy inst */
1146                                                 MONO_INST_NEW (cfg, arg, CEE_STIND_I);
1147                                                 arg->cil_code = in->cil_code;
1148                                                 arg->ssa_op = MONO_SSA_STORE;
1149                                                 arg->inst_left = vtaddr;
1150                                                 arg->inst_right = in;
1151                                                 arg->type = in->type;
1152
1153                                                 /* prepend, so they get reversed */
1154                                                 arg->next = call->out_args;
1155                                                 call->out_args = arg;
1156                                         }
1157                                 }
1158                                 else {
1159                                         arg->opcode = OP_OUTARG_VT;
1160                                         arg->klass = in->klass;
1161                                         arg->unused = sig->pinvoke;
1162                                         arg->inst_imm = size;
1163                                 }
1164                         }
1165                         else {
1166                                 switch (ainfo->storage) {
1167                                 case ArgInIReg:
1168                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1169                                         break;
1170                                 case ArgInFloatSSEReg:
1171                                 case ArgInDoubleSSEReg:
1172                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1173                                         break;
1174                                 case ArgOnStack:
1175                                         arg->opcode = OP_OUTARG;
1176                                         if (!sig->params [i - sig->hasthis]->byref) {
1177                                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4)
1178                                                         arg->opcode = OP_OUTARG_R4;
1179                                                 else
1180                                                         if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)
1181                                                                 arg->opcode = OP_OUTARG_R8;
1182                                         }
1183                                         break;
1184                                 default:
1185                                         g_assert_not_reached ();
1186                                 }
1187                         }
1188                 }
1189         }
1190
1191         if (cinfo->need_stack_align) {
1192                 MONO_INST_NEW (cfg, arg, OP_AMD64_OUTARG_ALIGN_STACK);
1193                 /* prepend, so they get reversed */
1194                 arg->next = call->out_args;
1195                 call->out_args = arg;
1196         }
1197
1198         call->stack_usage = cinfo->stack_usage;
1199         cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1200         cfg->flags |= MONO_CFG_HAS_CALLS;
1201
1202         g_free (cinfo);
1203
1204         return call;
1205 }
1206
1207 #define EMIT_COND_BRANCH(ins,cond,sign) \
1208 if (ins->flags & MONO_INST_BRLABEL) { \
1209         if (ins->inst_i0->inst_c0) { \
1210                 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
1211         } else { \
1212                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1213                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1214                     x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
1215                         x86_branch8 (code, cond, 0, sign); \
1216                 else \
1217                         x86_branch32 (code, cond, 0, sign); \
1218         } \
1219 } else { \
1220         if (ins->inst_true_bb->native_offset) { \
1221                 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1222         } else { \
1223                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1224                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1225                     x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1226                         x86_branch8 (code, cond, 0, sign); \
1227                 else \
1228                         x86_branch32 (code, cond, 0, sign); \
1229         } \
1230 }
1231
1232 /* emit an exception if condition is fail */
1233 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name)            \
1234         do {                                                        \
1235                 mono_add_patch_info (cfg, code - cfg->native_code,   \
1236                                     MONO_PATCH_INFO_EXC, exc_name);  \
1237                 x86_branch32 (code, cond, 0, signed);               \
1238         } while (0); 
1239
1240 #define EMIT_FPCOMPARE(code) do { \
1241         amd64_fcompp (code); \
1242         amd64_fnstsw (code); \
1243 } while (0); 
1244
1245 static guint8*
1246 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
1247 {
1248         mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
1249
1250         if (mono_compile_aot) {
1251                 amd64_call_membase (code, AMD64_RIP, 0);
1252         }
1253         else {
1254                 gboolean near_call = FALSE;
1255
1256                 /*
1257                  * Indirect calls are expensive so try to make a near call if possible.
1258                  * The caller memory is allocated by the code manager so it is 
1259                  * guaranteed to be at a 32 bit offset.
1260                  */
1261
1262                 if (patch_type != MONO_PATCH_INFO_ABS)
1263                         /* The target is in memory allocated using the code manager */
1264                         near_call = TRUE;
1265                 else {
1266                         if (mono_find_class_init_trampoline_by_addr (data))
1267                                 near_call = TRUE;
1268                         else {
1269                                 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (data);
1270                                 if (info) {
1271                                         if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && 
1272                                                 strstr (cfg->method->name, info->name)) {
1273                                                 /* A call to the wrapped function */
1274                                                 if ((((guint64)data) >> 32) == 0)
1275                                                         near_call = TRUE;
1276                                         }
1277                                         else
1278                                                 near_call = TRUE;
1279                                 }
1280                                 else if ((((guint64)data) >> 32) == 0)
1281                                         near_call = TRUE;
1282                         }
1283                 }
1284
1285                 if (near_call) {
1286                         amd64_call_code (code, 0);
1287                 }
1288                 else {
1289                         amd64_set_reg_template (code, GP_SCRATCH_REG);
1290                         amd64_call_reg (code, GP_SCRATCH_REG);
1291                 }
1292         }
1293
1294         return code;
1295 }
1296
1297 /* FIXME: Add more instructions */
1298 #define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG) || ((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_SETREG) || ((ins)->opcode == OP_ICONST) || ((ins)->opcode == OP_I8CONST) || ((ins)->opcode == OP_LOAD_MEMBASE))
1299
1300 static void
1301 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1302 {
1303         MonoInst *ins, *last_ins = NULL;
1304         ins = bb->code;
1305
1306         while (ins) {
1307
1308                 switch (ins->opcode) {
1309                 case OP_ICONST:
1310                 case OP_I8CONST:
1311                         /* reg = 0 -> XOR (reg, reg) */
1312                         /* XOR sets cflags on x86, so we cant do it always */
1313                         if (ins->inst_c0 == 0 && (ins->next && INST_IGNORES_CFLAGS (ins->next))) {
1314                                 ins->opcode = CEE_XOR;
1315                                 ins->sreg1 = ins->dreg;
1316                                 ins->sreg2 = ins->dreg;
1317                         }
1318                         break;
1319                 case OP_MUL_IMM: 
1320                         /* remove unnecessary multiplication with 1 */
1321                         if (ins->inst_imm == 1) {
1322                                 if (ins->dreg != ins->sreg1) {
1323                                         ins->opcode = OP_MOVE;
1324                                 } else {
1325                                         last_ins->next = ins->next;
1326                                         ins = ins->next;
1327                                         continue;
1328                                 }
1329                         }
1330                         break;
1331                 case OP_COMPARE_IMM:
1332                         /* OP_COMPARE_IMM (reg, 0) 
1333                          * --> 
1334                          * OP_AMD64_TEST_NULL (reg) 
1335                          */
1336                         if (!ins->inst_imm)
1337                                 ins->opcode = OP_AMD64_TEST_NULL;
1338                         break;
1339                 case OP_ICOMPARE_IMM:
1340                         if (!ins->inst_imm)
1341                                 ins->opcode = OP_X86_TEST_NULL;
1342                         break;
1343                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
1344                         /* 
1345                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1346                          * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1347                          * -->
1348                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1349                          * OP_COMPARE_IMM reg, imm
1350                          *
1351                          * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1352                          */
1353                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1354                             ins->inst_basereg == last_ins->inst_destbasereg &&
1355                             ins->inst_offset == last_ins->inst_offset) {
1356                                         ins->opcode = OP_ICOMPARE_IMM;
1357                                         ins->sreg1 = last_ins->sreg1;
1358
1359                                         /* check if we can remove cmp reg,0 with test null */
1360                                         if (!ins->inst_imm)
1361                                                 ins->opcode = OP_X86_TEST_NULL;
1362                                 }
1363
1364                         break;
1365                 case OP_LOAD_MEMBASE:
1366                 case OP_LOADI4_MEMBASE:
1367                         /* 
1368                          * Note: if reg1 = reg2 the load op is removed
1369                          *
1370                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1371                          * OP_LOAD_MEMBASE offset(basereg), reg2
1372                          * -->
1373                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1374                          * OP_MOVE reg1, reg2
1375                          */
1376                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG 
1377                                          || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1378                             ins->inst_basereg == last_ins->inst_destbasereg &&
1379                             ins->inst_offset == last_ins->inst_offset) {
1380                                 if (ins->dreg == last_ins->sreg1) {
1381                                         last_ins->next = ins->next;                             
1382                                         ins = ins->next;                                
1383                                         continue;
1384                                 } else {
1385                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1386                                         ins->opcode = OP_MOVE;
1387                                         ins->sreg1 = last_ins->sreg1;
1388                                 }
1389
1390                         /* 
1391                          * Note: reg1 must be different from the basereg in the second load
1392                          * Note: if reg1 = reg2 is equal then second load is removed
1393                          *
1394                          * OP_LOAD_MEMBASE offset(basereg), reg1
1395                          * OP_LOAD_MEMBASE offset(basereg), reg2
1396                          * -->
1397                          * OP_LOAD_MEMBASE offset(basereg), reg1
1398                          * OP_MOVE reg1, reg2
1399                          */
1400                         } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1401                                            || last_ins->opcode == OP_LOAD_MEMBASE) &&
1402                               ins->inst_basereg != last_ins->dreg &&
1403                               ins->inst_basereg == last_ins->inst_basereg &&
1404                               ins->inst_offset == last_ins->inst_offset) {
1405
1406                                 if (ins->dreg == last_ins->dreg) {
1407                                         last_ins->next = ins->next;                             
1408                                         ins = ins->next;                                
1409                                         continue;
1410                                 } else {
1411                                         ins->opcode = OP_MOVE;
1412                                         ins->sreg1 = last_ins->dreg;
1413                                 }
1414
1415                                 //g_assert_not_reached ();
1416
1417 #if 0
1418                         /* 
1419                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1420                          * OP_LOAD_MEMBASE offset(basereg), reg
1421                          * -->
1422                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1423                          * OP_ICONST reg, imm
1424                          */
1425                         } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1426                                                 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1427                                    ins->inst_basereg == last_ins->inst_destbasereg &&
1428                                    ins->inst_offset == last_ins->inst_offset) {
1429                                 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1430                                 ins->opcode = OP_ICONST;
1431                                 ins->inst_c0 = last_ins->inst_imm;
1432                                 g_assert_not_reached (); // check this rule
1433 #endif
1434                         }
1435                         break;
1436                 case OP_LOADU1_MEMBASE:
1437                 case OP_LOADI1_MEMBASE:
1438                         /* 
1439                          * Note: if reg1 = reg2 the load op is removed
1440                          *
1441                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1442                          * OP_LOAD_MEMBASE offset(basereg), reg2
1443                          * -->
1444                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1445                          * OP_MOVE reg1, reg2
1446                          */
1447                         if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1448                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1449                                         ins->inst_offset == last_ins->inst_offset) {
1450                                 if (ins->dreg == last_ins->sreg1) {
1451                                         last_ins->next = ins->next;                             
1452                                         ins = ins->next;                                
1453                                         continue;
1454                                 } else {
1455                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1456                                         ins->opcode = OP_MOVE;
1457                                         ins->sreg1 = last_ins->sreg1;
1458                                 }
1459                         }
1460                         break;
1461                 case OP_LOADU2_MEMBASE:
1462                 case OP_LOADI2_MEMBASE:
1463                         /* 
1464                          * Note: if reg1 = reg2 the load op is removed
1465                          *
1466                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1467                          * OP_LOAD_MEMBASE offset(basereg), reg2
1468                          * -->
1469                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1470                          * OP_MOVE reg1, reg2
1471                          */
1472                         if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1473                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1474                                         ins->inst_offset == last_ins->inst_offset) {
1475                                 if (ins->dreg == last_ins->sreg1) {
1476                                         last_ins->next = ins->next;                             
1477                                         ins = ins->next;                                
1478                                         continue;
1479                                 } else {
1480                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1481                                         ins->opcode = OP_MOVE;
1482                                         ins->sreg1 = last_ins->sreg1;
1483                                 }
1484                         }
1485                         break;
1486                 case CEE_CONV_I4:
1487                 case CEE_CONV_U4:
1488                 case OP_MOVE:
1489                         /*
1490                          * Removes:
1491                          *
1492                          * OP_MOVE reg, reg 
1493                          */
1494                         if (ins->dreg == ins->sreg1) {
1495                                 if (last_ins)
1496                                         last_ins->next = ins->next;                             
1497                                 ins = ins->next;
1498                                 continue;
1499                         }
1500                         /* 
1501                          * Removes:
1502                          *
1503                          * OP_MOVE sreg, dreg 
1504                          * OP_MOVE dreg, sreg
1505                          */
1506                         if (last_ins && last_ins->opcode == OP_MOVE &&
1507                             ins->sreg1 == last_ins->dreg &&
1508                             ins->dreg == last_ins->sreg1) {
1509                                 last_ins->next = ins->next;                             
1510                                 ins = ins->next;                                
1511                                 continue;
1512                         }
1513                         break;
1514                 }
1515                 last_ins = ins;
1516                 ins = ins->next;
1517         }
1518         bb->last_ins = last_ins;
1519 }
1520
1521 static const int 
1522 branch_cc_table [] = {
1523         X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1524         X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1525         X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1526 };
1527
1528 static int
1529 opcode_to_x86_cond (int opcode)
1530 {
1531         switch (opcode) {
1532         case OP_IBEQ:
1533                 return X86_CC_EQ;
1534         case OP_IBNE_UN:
1535                 return X86_CC_NE;
1536         case OP_IBLT:
1537                 return X86_CC_LT;
1538         case OP_IBLT_UN:
1539                 return X86_CC_LT;
1540         case OP_IBGT:
1541                 return X86_CC_GT;
1542         case OP_IBGT_UN:
1543                 return X86_CC_GT;
1544         case OP_IBGE:
1545                 return X86_CC_GE;
1546         case OP_IBGE_UN:
1547                 return X86_CC_GE;
1548         case OP_IBLE:
1549                 return X86_CC_LE;
1550         case OP_IBLE_UN:
1551                 return X86_CC_LE;
1552         case OP_COND_EXC_IOV:
1553                 return X86_CC_O;
1554         case OP_COND_EXC_IC:
1555                 return X86_CC_C;
1556         default:
1557                 g_assert_not_reached ();
1558         }
1559
1560         return -1;
1561 }
1562
1563 /*
1564  * returns the offset used by spillvar. It allocates a new
1565  * spill variable if necessary. 
1566  */
1567 static int
1568 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1569 {
1570         MonoSpillInfo **si, *info;
1571         int i = 0;
1572
1573         si = &cfg->spill_info; 
1574         
1575         while (i <= spillvar) {
1576
1577                 if (!*si) {
1578                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1579                         info->next = NULL;
1580                         cfg->stack_offset += sizeof (gpointer);
1581                         info->offset = - cfg->stack_offset;
1582                 }
1583
1584                 if (i == spillvar)
1585                         return (*si)->offset;
1586
1587                 i++;
1588                 si = &(*si)->next;
1589         }
1590
1591         g_assert_not_reached ();
1592         return 0;
1593 }
1594
1595 /*
1596  * returns the offset used by spillvar. It allocates a new
1597  * spill float variable if necessary. 
1598  * (same as mono_spillvar_offset but for float)
1599  */
1600 static int
1601 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1602 {
1603         MonoSpillInfo **si, *info;
1604         int i = 0;
1605
1606         si = &cfg->spill_info_float; 
1607         
1608         while (i <= spillvar) {
1609
1610                 if (!*si) {
1611                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1612                         info->next = NULL;
1613                         cfg->stack_offset += sizeof (double);
1614                         info->offset = - cfg->stack_offset;
1615                 }
1616
1617                 if (i == spillvar)
1618                         return (*si)->offset;
1619
1620                 i++;
1621                 si = &(*si)->next;
1622         }
1623
1624         g_assert_not_reached ();
1625         return 0;
1626 }
1627
1628 /*
1629  * Creates a store for spilled floating point items
1630  */
1631 static MonoInst*
1632 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1633 {
1634         MonoInst *store;
1635         MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
1636         store->sreg1 = reg;
1637         store->inst_destbasereg = AMD64_RBP;
1638         store->inst_offset = mono_spillvar_offset_float (cfg, spill);
1639
1640         DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)store->inst_offset, reg));
1641         return store;
1642 }
1643
1644 /*
1645  * Creates a load for spilled floating point items 
1646  */
1647 static MonoInst*
1648 create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1649 {
1650         MonoInst *load;
1651         MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
1652         load->dreg = reg;
1653         load->inst_basereg = AMD64_RBP;
1654         load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1655
1656         DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)load->inst_offset, reg));
1657         return load;
1658 }
1659
1660 #define is_global_ireg(r) ((r) >= 0 && (r) <= 15 && AMD64_IS_CALLEE_SAVED_REG ((r)))
1661 #define ireg_is_freeable(r) ((r) >= 0 && (r) <= 15 && AMD64_IS_CALLEE_REG ((r)))
1662 #define freg_is_freeable(r) ((r) >= 0 && (r) <= AMD64_XMM_NREG)
1663
1664 #define reg_is_freeable(r,fp) ((fp) ? freg_is_freeable ((r)) : ireg_is_freeable ((r)))
1665 #define reg_is_hard(r,fp) ((fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
1666 #define reg_is_soft(r,fp) (!reg_is_hard((r),(fp)))
1667 #define rassign(cfg,reg,fp) ((fp) ? (cfg)->rs->fassign [(reg)] : (cfg)->rs->iassign [(reg)])
1668 #define sreg1_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC1] == 'f')
1669 #define sreg2_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC2] == 'f')
1670 #define dreg_is_fp(ins)  (ins_spec [(ins)->opcode] [MONO_INST_DEST] == 'f')
1671
1672 typedef struct {
1673         int born_in;
1674         int killed_in;
1675         int last_use;
1676         int prev_use;
1677         int flags;              /* used to track fp spill/load */
1678 } RegTrack;
1679
1680 static const char*const * ins_spec = amd64_desc;
1681
1682 static void
1683 print_ins (int i, MonoInst *ins)
1684 {
1685         const char *spec = ins_spec [ins->opcode];
1686         g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1687         if (!spec)
1688                 g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
1689         if (spec [MONO_INST_DEST]) {
1690                 gboolean fp = (spec [MONO_INST_DEST] == 'f');
1691                 if (reg_is_soft (ins->dreg, fp))
1692                         g_print (" R%d <-", ins->dreg);
1693                 else
1694                         g_print (" %s <-", mono_amd64_regname (ins->dreg, fp));
1695         }
1696         if (spec [MONO_INST_SRC1]) {
1697                 gboolean fp = (spec [MONO_INST_SRC1] == 'f');
1698                 if (reg_is_soft (ins->sreg1, fp))
1699                         g_print (" R%d", ins->sreg1);
1700                 else
1701                         g_print (" %s", mono_amd64_regname (ins->sreg1, fp));
1702         }
1703         if (spec [MONO_INST_SRC2]) {
1704                 gboolean fp = (spec [MONO_INST_SRC2] == 'f');
1705                 if (reg_is_soft (ins->sreg2, fp))
1706                         g_print (" R%d", ins->sreg2);
1707                 else
1708                         g_print (" %s", mono_amd64_regname (ins->sreg2, fp));
1709         }
1710         if (spec [MONO_INST_CLOB])
1711                 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1712         g_print ("\n");
1713 }
1714
1715 static void
1716 print_regtrack (RegTrack *t, int num)
1717 {
1718         int i;
1719         char buf [32];
1720         const char *r;
1721         
1722         for (i = 0; i < num; ++i) {
1723                 if (!t [i].born_in)
1724                         continue;
1725                 if (i >= MONO_MAX_IREGS) {
1726                         g_snprintf (buf, sizeof(buf), "R%d", i);
1727                         r = buf;
1728                 } else
1729                         r = mono_arch_regname (i);
1730                 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1731         }
1732 }
1733
1734 typedef struct InstList InstList;
1735
1736 struct InstList {
1737         InstList *prev;
1738         InstList *next;
1739         MonoInst *data;
1740 };
1741
1742 static inline InstList*
1743 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1744 {
1745         InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1746         item->data = data;
1747         item->prev = NULL;
1748         item->next = list;
1749         if (list)
1750                 list->prev = item;
1751         return item;
1752 }
1753
1754 /*
1755  * Force the spilling of the variable in the symbolic register 'reg'.
1756  */
1757 static int
1758 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg, gboolean fp)
1759 {
1760         MonoInst *load;
1761         int i, sel, spill;
1762         int *assign, *symbolic;
1763
1764         if (fp) {
1765                 assign = cfg->rs->fassign;
1766                 symbolic = cfg->rs->fsymbolic;
1767         }
1768         else {
1769                 assign = cfg->rs->iassign;
1770                 symbolic = cfg->rs->isymbolic;
1771         }       
1772         
1773         sel = assign [reg];
1774         /*i = cfg->rs->isymbolic [sel];
1775         g_assert (i == reg);*/
1776         i = reg;
1777         spill = ++cfg->spill_count;
1778         assign [i] = -spill - 1;
1779         if (fp)
1780                 mono_regstate_free_float (cfg->rs, sel);
1781         else
1782                 mono_regstate_free_int (cfg->rs, sel);
1783         /* we need to create a spill var and insert a load to sel after the current instruction */
1784         if (fp)
1785                 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1786         else
1787                 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1788         load->dreg = sel;
1789         load->inst_basereg = AMD64_RBP;
1790         load->inst_offset = mono_spillvar_offset (cfg, spill);
1791         if (item->prev) {
1792                 while (ins->next != item->prev->data)
1793                         ins = ins->next;
1794         }
1795         load->next = ins->next;
1796         ins->next = load;
1797         DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1798         if (fp)
1799                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1800         else
1801                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1802         g_assert (i == sel);
1803
1804         return sel;
1805 }
1806
1807 static int
1808 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg, gboolean fp)
1809 {
1810         MonoInst *load;
1811         int i, sel, spill;
1812         int *assign, *symbolic;
1813
1814         if (fp) {
1815                 assign = cfg->rs->fassign;
1816                 symbolic = cfg->rs->fsymbolic;
1817         }
1818         else {
1819                 assign = cfg->rs->iassign;
1820                 symbolic = cfg->rs->isymbolic;
1821         }
1822
1823         DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1824         /* exclude the registers in the current instruction */
1825         if ((sreg1_is_fp (ins) == fp) && (reg != ins->sreg1) && (reg_is_freeable (ins->sreg1, fp) || (reg_is_soft (ins->sreg1, fp) && rassign (cfg, ins->sreg1, fp) >= 0))) {
1826                 if (reg_is_soft (ins->sreg1, fp))
1827                         regmask &= ~ (1 << rassign (cfg, ins->sreg1, fp));
1828                 else
1829                         regmask &= ~ (1 << ins->sreg1);
1830                 DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_amd64_regname (ins->sreg1, fp)));
1831         }
1832         if ((sreg2_is_fp (ins) == fp) && (reg != ins->sreg2) && (reg_is_freeable (ins->sreg2, fp) || (reg_is_soft (ins->sreg2, fp) && rassign (cfg, ins->sreg2, fp) >= 0))) {
1833                 if (reg_is_soft (ins->sreg2, fp))
1834                         regmask &= ~ (1 << rassign (cfg, ins->sreg2, fp));
1835                 else
1836                         regmask &= ~ (1 << ins->sreg2);
1837                 DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_amd64_regname (ins->sreg2, fp), ins->sreg2));
1838         }
1839         if ((dreg_is_fp (ins) == fp) && (reg != ins->dreg) && reg_is_freeable (ins->dreg, fp)) {
1840                 regmask &= ~ (1 << ins->dreg);
1841                 DEBUG (g_print ("\t\texcluding dreg %s\n", mono_amd64_regname (ins->dreg, fp)));
1842         }
1843
1844         DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
1845         g_assert (regmask); /* need at least a register we can free */
1846         sel = -1;
1847         /* we should track prev_use and spill the register that's farther */
1848         if (fp) {
1849                 for (i = 0; i < MONO_MAX_FREGS; ++i) {
1850                         if (regmask & (1 << i)) {
1851                                 sel = i;
1852                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_fregname (sel), cfg->rs->fassign [sel]));
1853                                 break;
1854                         }
1855                 }
1856
1857                 i = cfg->rs->fsymbolic [sel];
1858                 spill = ++cfg->spill_count;
1859                 cfg->rs->fassign [i] = -spill - 1;
1860                 mono_regstate_free_float (cfg->rs, sel);
1861         }
1862         else {
1863                 for (i = 0; i < MONO_MAX_IREGS; ++i) {
1864                         if (regmask & (1 << i)) {
1865                                 sel = i;
1866                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1867                                 break;
1868                         }
1869                 }
1870
1871                 i = cfg->rs->isymbolic [sel];
1872                 spill = ++cfg->spill_count;
1873                 cfg->rs->iassign [i] = -spill - 1;
1874                 mono_regstate_free_int (cfg->rs, sel);
1875         }
1876
1877         /* we need to create a spill var and insert a load to sel after the current instruction */
1878         MONO_INST_NEW (cfg, load, fp ? OP_LOADR8_MEMBASE : OP_LOAD_MEMBASE);
1879         load->dreg = sel;
1880         load->inst_basereg = AMD64_RBP;
1881         load->inst_offset = mono_spillvar_offset (cfg, spill);
1882         if (item->prev) {
1883                 while (ins->next != item->prev->data)
1884                         ins = ins->next;
1885         }
1886         load->next = ins->next;
1887         ins->next = load;
1888         DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1889         if (fp)
1890                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1891         else
1892                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1893         g_assert (i == sel);
1894         
1895         return sel;
1896 }
1897
1898 static MonoInst*
1899 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins, gboolean fp)
1900 {
1901         MonoInst *copy;
1902
1903         if (fp)
1904                 MONO_INST_NEW (cfg, copy, OP_FMOVE);
1905         else
1906                 MONO_INST_NEW (cfg, copy, OP_MOVE);
1907
1908         copy->dreg = dest;
1909         copy->sreg1 = src;
1910         if (ins) {
1911                 copy->next = ins->next;
1912                 ins->next = copy;
1913         }
1914         DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
1915         return copy;
1916 }
1917
1918 static MonoInst*
1919 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins, gboolean fp)
1920 {
1921         MonoInst *store;
1922         MONO_INST_NEW (cfg, store, fp ? OP_STORER8_MEMBASE_REG : OP_STORE_MEMBASE_REG);
1923         store->sreg1 = reg;
1924         store->inst_destbasereg = AMD64_RBP;
1925         store->inst_offset = mono_spillvar_offset (cfg, spill);
1926         if (ins) {
1927                 store->next = ins->next;
1928                 ins->next = store;
1929         }
1930         DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_amd64_regname (reg, fp)));
1931         return store;
1932 }
1933
1934 static void
1935 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
1936 {
1937         MonoInst *prev;
1938         if (item->next) {
1939                 prev = item->next->data;
1940
1941                 while (prev->next != ins)
1942                         prev = prev->next;
1943                 to_insert->next = ins;
1944                 prev->next = to_insert;
1945         } else {
1946                 to_insert->next = ins;
1947         }
1948         /* 
1949          * needed otherwise in the next instruction we can add an ins to the 
1950          * end and that would get past this instruction.
1951          */
1952         item->data = to_insert; 
1953 }
1954
1955 /* flags used in reginfo->flags */
1956 enum {
1957         MONO_X86_FP_NEEDS_LOAD_SPILL    = 1 << 0,
1958         MONO_X86_FP_NEEDS_SPILL                 = 1 << 1,
1959         MONO_X86_FP_NEEDS_LOAD                  = 1 << 2,
1960         MONO_X86_REG_NOT_ECX                    = 1 << 3,
1961         MONO_X86_REG_EAX                                = 1 << 4,
1962         MONO_X86_REG_EDX                                = 1 << 5,
1963         MONO_X86_REG_ECX                                = 1 << 6
1964 };
1965
1966 static int
1967 mono_amd64_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
1968 {
1969         int val;
1970         int test_mask = dest_mask;
1971
1972         if (flags & MONO_X86_REG_EAX)
1973                 test_mask &= (1 << AMD64_RAX);
1974         else if (flags & MONO_X86_REG_EDX)
1975                 test_mask &= (1 << AMD64_RDX);
1976         else if (flags & MONO_X86_REG_ECX)
1977                 test_mask &= (1 << AMD64_RCX);
1978         else if (flags & MONO_X86_REG_NOT_ECX)
1979                 test_mask &= ~ (1 << AMD64_RCX);
1980
1981         val = mono_regstate_alloc_int (cfg->rs, test_mask);
1982         if (val >= 0 && test_mask != dest_mask)
1983                 DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
1984
1985         if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
1986                 DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
1987                 val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << AMD64_RCX)));
1988         }
1989
1990         if (val < 0) {
1991                 val = mono_regstate_alloc_int (cfg->rs, dest_mask);
1992                 if (val < 0)
1993                         val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, FALSE);
1994         }
1995
1996         return val;
1997 }
1998
1999 static int
2000 mono_amd64_alloc_float_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg)
2001 {
2002         int val;
2003
2004         val = mono_regstate_alloc_float (cfg->rs, dest_mask);
2005
2006         if (val < 0) {
2007                 val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, TRUE);
2008         }
2009
2010         return val;
2011 }
2012
2013 static inline void
2014 assign_ireg (MonoRegState *rs, int reg, int hreg)
2015 {
2016         g_assert (reg >= MONO_MAX_IREGS);
2017         g_assert (hreg < MONO_MAX_IREGS);
2018         g_assert (! is_global_ireg (hreg));
2019
2020         rs->iassign [reg] = hreg;
2021         rs->isymbolic [hreg] = reg;
2022         rs->ifree_mask &= ~ (1 << hreg);
2023 }
2024
2025 /*#include "cprop.c"*/
2026
2027 /*
2028  * Local register allocation.
2029  * We first scan the list of instructions and we save the liveness info of
2030  * each register (when the register is first used, when it's value is set etc.).
2031  * We also reverse the list of instructions (in the InstList list) because assigning
2032  * registers backwards allows for more tricks to be used.
2033  */
2034 void
2035 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
2036 {
2037         MonoInst *ins;
2038         MonoRegState *rs = cfg->rs;
2039         int i, val, fpcount;
2040         RegTrack *reginfo, *reginfof;
2041         RegTrack *reginfo1, *reginfo2, *reginfod;
2042         InstList *tmp, *reversed = NULL;
2043         const char *spec;
2044         guint32 src1_mask, src2_mask, dest_mask;
2045         GList *fspill_list = NULL;
2046         int fspill = 0;
2047
2048         if (!bb->code)
2049                 return;
2050         rs->next_vireg = bb->max_ireg;
2051         rs->next_vfreg = bb->max_freg;
2052         mono_regstate_assign (rs);
2053         reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
2054         reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
2055         rs->ifree_mask = AMD64_CALLEE_REGS;
2056         rs->ffree_mask = AMD64_CALLEE_FREGS;
2057
2058         if (!use_sse2)
2059                 /* The fp stack is 6 entries deep */
2060                 rs->ffree_mask = 0x3f;
2061
2062         ins = bb->code;
2063
2064         /*if (cfg->opt & MONO_OPT_COPYPROP)
2065                 local_copy_prop (cfg, ins);*/
2066
2067         i = 1;
2068         fpcount = 0;
2069         DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
2070         /* forward pass on the instructions to collect register liveness info */
2071         while (ins) {
2072                 spec = ins_spec [ins->opcode];
2073                 
2074                 DEBUG (print_ins (i, ins));
2075
2076                 if (spec [MONO_INST_SRC1]) {
2077                         if (spec [MONO_INST_SRC1] == 'f') {
2078                                 reginfo1 = reginfof;
2079
2080                                 if (!use_sse2) {
2081                                         GList *spill;
2082
2083                                         spill = g_list_first (fspill_list);
2084                                         if (spill && fpcount < FPSTACK_SIZE) {
2085                                                 reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
2086                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2087                                         } else
2088                                                 fpcount--;
2089                                 }
2090                         }
2091                         else
2092                                 reginfo1 = reginfo;
2093                         reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
2094                         reginfo1 [ins->sreg1].last_use = i;
2095                         if (spec [MONO_INST_SRC1] == 'L') {
2096                                 /* The virtual register is allocated sequentially */
2097                                 reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
2098                                 reginfo1 [ins->sreg1 + 1].last_use = i;
2099                                 if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
2100                                         reginfo1 [ins->sreg1 + 1].born_in = i;
2101
2102                                 reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
2103                                 reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
2104                         }
2105                 } else {
2106                         ins->sreg1 = -1;
2107                 }
2108                 if (spec [MONO_INST_SRC2]) {
2109                         if (spec [MONO_INST_SRC2] == 'f') {
2110                                 reginfo2 = reginfof;
2111
2112                                 if (!use_sse2) {
2113                                         GList *spill;
2114
2115                                         spill = g_list_first (fspill_list);
2116                                         if (spill) {
2117                                                 reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
2118                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2119                                                 if (fpcount >= FPSTACK_SIZE) {
2120                                                         fspill++;
2121                                                         fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2122                                                         reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
2123                                                 }
2124                                         } else
2125                                                 fpcount--;
2126                                 }
2127                         }
2128                         else
2129                                 reginfo2 = reginfo;
2130                         reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
2131                         reginfo2 [ins->sreg2].last_use = i;
2132                         if (spec [MONO_INST_SRC2] == 'L') {
2133                                 /* The virtual register is allocated sequentially */
2134                                 reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
2135                                 reginfo2 [ins->sreg2 + 1].last_use = i;
2136                                 if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
2137                                         reginfo2 [ins->sreg2 + 1].born_in = i;
2138                         }
2139                         if (spec [MONO_INST_CLOB] == 's') {
2140                                 reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
2141                                 reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
2142                         }
2143                 } else {
2144                         ins->sreg2 = -1;
2145                 }
2146                 if (spec [MONO_INST_DEST]) {
2147                         if (spec [MONO_INST_DEST] == 'f') {
2148                                 reginfod = reginfof;
2149                                 if (!use_sse2 && (spec [MONO_INST_CLOB] != 'm')) {
2150                                         if (fpcount >= FPSTACK_SIZE) {
2151                                                 reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
2152                                                 fspill++;
2153                                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2154                                                 fpcount--;
2155                                         }
2156                                         fpcount++;
2157                                 }
2158                         }
2159                         else
2160                                 reginfod = reginfo;
2161                         if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
2162                                 reginfod [ins->dreg].killed_in = i;
2163                         reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
2164                         reginfod [ins->dreg].last_use = i;
2165                         if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
2166                                 reginfod [ins->dreg].born_in = i;
2167                         if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
2168                                 /* The virtual register is allocated sequentially */
2169                                 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
2170                                 reginfod [ins->dreg + 1].last_use = i;
2171                                 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
2172                                         reginfod [ins->dreg + 1].born_in = i;
2173
2174                                 reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
2175                                 reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
2176                         }
2177                 } else {
2178                         ins->dreg = -1;
2179                 }
2180
2181                 if (spec [MONO_INST_CLOB] == 'c') {
2182                         /* A call instruction implicitly uses all registers in call->out_ireg_args */
2183
2184                         MonoCallInst *call = (MonoCallInst*)ins;
2185                         GSList *list;
2186
2187                         list = call->out_ireg_args;
2188                         if (list) {
2189                                 while (list) {
2190                                         guint64 regpair;
2191                                         int reg, hreg;
2192
2193                                         regpair = (guint64) (list->data);
2194                                         hreg = regpair >> 32;
2195                                         reg = regpair & 0xffffffff;
2196
2197                                         reginfo [reg].prev_use = reginfo [reg].last_use;
2198                                         reginfo [reg].last_use = i;
2199
2200                                         list = g_slist_next (list);
2201                                 }
2202                         }
2203
2204                         list = call->out_freg_args;
2205                         if (use_sse2 && list) {
2206                                 while (list) {
2207                                         guint64 regpair;
2208                                         int reg, hreg;
2209
2210                                         regpair = (guint64) (list->data);
2211                                         hreg = regpair >> 32;
2212                                         reg = regpair & 0xffffffff;
2213
2214                                         reginfof [reg].prev_use = reginfof [reg].last_use;
2215                                         reginfof [reg].last_use = i;
2216
2217                                         list = g_slist_next (list);
2218                                 }
2219                         }
2220                 }
2221
2222                 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
2223                 ++i;
2224                 ins = ins->next;
2225         }
2226
2227         // todo: check if we have anything left on fp stack, in verify mode?
2228         fspill = 0;
2229
2230         DEBUG (print_regtrack (reginfo, rs->next_vireg));
2231         DEBUG (print_regtrack (reginfof, rs->next_vfreg));
2232         tmp = reversed;
2233         while (tmp) {
2234                 int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
2235                 dest_mask = src1_mask = src2_mask = AMD64_CALLEE_REGS;
2236                 --i;
2237                 ins = tmp->data;
2238                 spec = ins_spec [ins->opcode];
2239                 prev_dreg = -1;
2240                 clob_dreg = -1;
2241                 DEBUG (g_print ("processing:"));
2242                 DEBUG (print_ins (i, ins));
2243                 if (spec [MONO_INST_CLOB] == 's') {
2244                         /*
2245                          * Shift opcodes, SREG2 must be RCX
2246                          */
2247                         if (rs->ifree_mask & (1 << AMD64_RCX)) {
2248                                 if (ins->sreg2 < MONO_MAX_IREGS) {
2249                                         /* Argument already in hard reg, need to copy */
2250                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2251                                         insert_before_ins (ins, tmp, copy);
2252                                 }
2253                                 else {
2254                                         DEBUG (g_print ("\tshortcut assignment of R%d to ECX\n", ins->sreg2));
2255                                         assign_ireg (rs, ins->sreg2, AMD64_RCX);
2256                                 }
2257                         } else {
2258                                 int need_ecx_spill = TRUE;
2259                                 /* 
2260                                  * we first check if src1/dreg is already assigned a register
2261                                  * and then we force a spill of the var assigned to ECX.
2262                                  */
2263                                 /* the destination register can't be ECX */
2264                                 dest_mask &= ~ (1 << AMD64_RCX);
2265                                 src1_mask &= ~ (1 << AMD64_RCX);
2266                                 val = rs->iassign [ins->dreg];
2267                                 /* 
2268                                  * the destination register is already assigned to ECX:
2269                                  * we need to allocate another register for it and then
2270                                  * copy from this to ECX.
2271                                  */
2272                                 if (val == AMD64_RCX && ins->dreg != ins->sreg2) {
2273                                         int new_dest;
2274                                         new_dest = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2275                                         g_assert (new_dest >= 0);
2276                                         DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
2277
2278                                         rs->isymbolic [new_dest] = ins->dreg;
2279                                         rs->iassign [ins->dreg] = new_dest;
2280                                         clob_dreg = ins->dreg;
2281                                         ins->dreg = new_dest;
2282                                         create_copy_ins (cfg, AMD64_RCX, new_dest, ins, FALSE);
2283                                         need_ecx_spill = FALSE;
2284                                         /*DEBUG (g_print ("\tforced spill of R%d\n", ins->dreg));
2285                                         val = get_register_force_spilling (cfg, tmp, ins, ins->dreg);
2286                                         rs->iassign [ins->dreg] = val;
2287                                         rs->isymbolic [val] = prev_dreg;
2288                                         ins->dreg = val;*/
2289                                 }
2290                                 if (is_global_ireg (ins->sreg2)) {
2291                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2292                                         insert_before_ins (ins, tmp, copy);
2293                                 }
2294                                 else {
2295                                         val = rs->iassign [ins->sreg2];
2296                                         if (val >= 0 && val != AMD64_RCX) {
2297                                                 MonoInst *move = create_copy_ins (cfg, AMD64_RCX, val, NULL, FALSE);
2298                                                 DEBUG (g_print ("\tmoved arg from R%d (%d) to ECX\n", val, ins->sreg2));
2299                                                 move->next = ins;
2300                                                 g_assert_not_reached ();
2301                                                 /* FIXME: where is move connected to the instruction list? */
2302                                                 //tmp->prev->data->next = move;
2303                                         }
2304                                         else {
2305                                                 if (val == AMD64_RCX)
2306                                                 need_ecx_spill = FALSE;
2307                                         }
2308                                 }
2309                                 if (need_ecx_spill && !(rs->ifree_mask & (1 << AMD64_RCX))) {
2310                                         DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RCX]));
2311                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RCX], FALSE);
2312                                         mono_regstate_free_int (rs, AMD64_RCX);
2313                                 }
2314                                 if (!is_global_ireg (ins->sreg2))
2315                                         /* force-set sreg2 */
2316                                         assign_ireg (rs, ins->sreg2, AMD64_RCX);
2317                         }
2318                         ins->sreg2 = AMD64_RCX;
2319                 } else if (spec [MONO_INST_CLOB] == 'd') { 
2320                         /*
2321                          * DIVISION/REMAINER
2322                          */
2323                         int dest_reg = AMD64_RAX;
2324                         int clob_reg = AMD64_RDX;
2325                         if (spec [MONO_INST_DEST] == 'd') {
2326                                 dest_reg = AMD64_RDX; /* reminder */
2327                                 clob_reg = AMD64_RAX;
2328                         }
2329                         if (is_global_ireg (ins->dreg))
2330                                 val = ins->dreg;
2331                         else
2332                                 val = rs->iassign [ins->dreg];
2333                         if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
2334                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2335                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2336                                 mono_regstate_free_int (rs, dest_reg);
2337                         }
2338                         if (val < 0) {
2339                                 if (val < -1) {
2340                                         /* the register gets spilled after this inst */
2341                                         int spill = -val -1;
2342                                         dest_mask = 1 << clob_reg;
2343                                         prev_dreg = ins->dreg;
2344                                         val = mono_regstate_alloc_int (rs, dest_mask);
2345                                         if (val < 0)
2346                                                 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg, FALSE);
2347                                         rs->iassign [ins->dreg] = val;
2348                                         if (spill)
2349                                                 create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2350                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2351                                         rs->isymbolic [val] = prev_dreg;
2352                                         ins->dreg = val;
2353                                 } else {
2354                                         DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
2355                                         prev_dreg = ins->dreg;
2356                                         assign_ireg (rs, ins->dreg, dest_reg);
2357                                         ins->dreg = dest_reg;
2358                                         val = dest_reg;
2359                                 }
2360                         }
2361
2362                         //DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
2363                         if (val != dest_reg) { /* force a copy */
2364                                 create_copy_ins (cfg, val, dest_reg, ins, FALSE);
2365                                 if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
2366                                         DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2367                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2368                                         mono_regstate_free_int (rs, dest_reg);
2369                                 }
2370                         }
2371                         if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= MONO_MAX_IREGS)) {
2372                                 DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
2373                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg], FALSE);
2374                                 mono_regstate_free_int (rs, clob_reg);
2375                         }
2376                         src1_mask = 1 << AMD64_RAX;
2377                         src2_mask = 1 << AMD64_RCX;
2378                 }
2379                 if (spec [MONO_INST_DEST] == 'l') {
2380                         int hreg;
2381                         val = rs->iassign [ins->dreg];
2382                         /* check special case when dreg have been moved from ecx (clob shift) */
2383                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2384                                 hreg = clob_dreg + 1;
2385                         else
2386                                 hreg = ins->dreg + 1;
2387
2388                         /* base prev_dreg on fixed hreg, handle clob case */
2389                         val = hreg - 1;
2390
2391                         if (val != rs->isymbolic [AMD64_RAX] && !(rs->ifree_mask & (1 << AMD64_RAX))) {
2392                                 DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2393                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2394                                 mono_regstate_free_int (rs, AMD64_RAX);
2395                         }
2396                         if (hreg != rs->isymbolic [AMD64_RDX] && !(rs->ifree_mask & (1 << AMD64_RDX))) {
2397                                 DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [AMD64_RDX]));
2398                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RDX], FALSE);
2399                                 mono_regstate_free_int (rs, AMD64_RDX);
2400                         }
2401                 }
2402
2403                 /*
2404                  * TRACK DREG
2405                  */
2406                 if (spec [MONO_INST_DEST] == 'f') {
2407                         if (use_sse2) {
2408                                 /* Allocate an XMM reg the same way as an int reg */
2409                                 if (reg_is_soft (ins->dreg, TRUE)) {
2410                                         val = rs->fassign [ins->dreg];
2411                                         prev_dreg = ins->dreg;
2412                                         
2413                                         if (val < 0) {
2414                                                 int spill = 0;
2415                                                 if (val < -1) {
2416                                                         /* the register gets spilled after this inst */
2417                                                         spill = -val -1;
2418                                                 }
2419                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->dreg);
2420                                                 rs->fassign [ins->dreg] = val;
2421                                                 if (spill)
2422                                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, TRUE);
2423                                         }
2424                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_amd64_regname (val, TRUE), ins->dreg));
2425                                         rs->fsymbolic [val] = prev_dreg;
2426                                         ins->dreg = val;
2427                                 }
2428                         }
2429                         else if (spec [MONO_INST_CLOB] != 'm') {
2430                                 if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
2431                                         GList *spill_node;
2432                                         MonoInst *store;
2433                                         spill_node = g_list_first (fspill_list);
2434                                         g_assert (spill_node);
2435
2436                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
2437                                         insert_before_ins (ins, tmp, store);
2438                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2439                                         fspill--;
2440                                 }
2441                         }
2442                 } else if (spec [MONO_INST_DEST] == 'L') {
2443                         int hreg;
2444                         val = rs->iassign [ins->dreg];
2445                         /* check special case when dreg have been moved from ecx (clob shift) */
2446                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2447                                 hreg = clob_dreg + 1;
2448                         else
2449                                 hreg = ins->dreg + 1;
2450
2451                         /* base prev_dreg on fixed hreg, handle clob case */
2452                         prev_dreg = hreg - 1;
2453
2454                         if (val < 0) {
2455                                 int spill = 0;
2456                                 if (val < -1) {
2457                                         /* the register gets spilled after this inst */
2458                                         spill = -val -1;
2459                                 }
2460                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2461                                 rs->iassign [ins->dreg] = val;
2462                                 if (spill)
2463                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2464                         }
2465
2466                         DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
2467  
2468                         rs->isymbolic [val] = hreg - 1;
2469                         ins->dreg = val;
2470                         
2471                         val = rs->iassign [hreg];
2472                         if (val < 0) {
2473                                 int spill = 0;
2474                                 if (val < -1) {
2475                                         /* the register gets spilled after this inst */
2476                                         spill = -val -1;
2477                                 }
2478                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2479                                 rs->iassign [hreg] = val;
2480                                 if (spill)
2481                                         create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2482                         }
2483
2484                         DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
2485                         rs->isymbolic [val] = hreg;
2486                         /* save reg allocating into unused */
2487                         ins->unused = val;
2488
2489                         /* check if we can free our long reg */
2490                         if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2491                                 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
2492                                 mono_regstate_free_int (rs, val);
2493                         }
2494                 }
2495                 else if (ins->dreg >= MONO_MAX_IREGS) {
2496                         int hreg;
2497                         val = rs->iassign [ins->dreg];
2498                         if (spec [MONO_INST_DEST] == 'l') {
2499                                 /* check special case when dreg have been moved from ecx (clob shift) */
2500                                 if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2501                                         hreg = clob_dreg + 1;
2502                                 else
2503                                         hreg = ins->dreg + 1;
2504
2505                                 /* base prev_dreg on fixed hreg, handle clob case */
2506                                 prev_dreg = hreg - 1;
2507                         } else
2508                                 prev_dreg = ins->dreg;
2509
2510                         if (val < 0) {
2511                                 int spill = 0;
2512                                 if (val < -1) {
2513                                         /* the register gets spilled after this inst */
2514                                         spill = -val -1;
2515                                 }
2516                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2517                                 rs->iassign [ins->dreg] = val;
2518                                 if (spill)
2519                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2520                         }
2521                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2522                         rs->isymbolic [val] = prev_dreg;
2523                         ins->dreg = val;
2524                         /* handle cases where lreg needs to be eax:edx */
2525                         if (spec [MONO_INST_DEST] == 'l') {
2526                                 /* check special case when dreg have been moved from ecx (clob shift) */
2527                                 int hreg = prev_dreg + 1;
2528                                 val = rs->iassign [hreg];
2529                                 if (val < 0) {
2530                                         int spill = 0;
2531                                         if (val < -1) {
2532                                                 /* the register gets spilled after this inst */
2533                                                 spill = -val -1;
2534                                         }
2535                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2536                                         rs->iassign [hreg] = val;
2537                                         if (spill)
2538                                                 create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2539                                 }
2540                                 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
2541                                 rs->isymbolic [val] = hreg;
2542                                 if (ins->dreg == AMD64_RAX) {
2543                                         if (val != AMD64_RDX)
2544                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2545                                 } else if (ins->dreg == AMD64_RDX) {
2546                                         if (val == AMD64_RAX) {
2547                                                 /* swap */
2548                                                 g_assert_not_reached ();
2549                                         } else {
2550                                                 /* two forced copies */
2551                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2552                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2553                                         }
2554                                 } else {
2555                                         if (val == AMD64_RDX) {
2556                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2557                                         } else {
2558                                                 /* two forced copies */
2559                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2560                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2561                                         }
2562                                 }
2563                                 if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2564                                         DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
2565                                         mono_regstate_free_int (rs, val);
2566                                 }
2567                         } else if (spec [MONO_INST_DEST] == 'a' && ins->dreg != AMD64_RAX && spec [MONO_INST_CLOB] != 'd') {
2568                                 /* this instruction only outputs to EAX, need to copy */
2569                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2570                         } else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != AMD64_RDX && spec [MONO_INST_CLOB] != 'd') {
2571                                 create_copy_ins (cfg, ins->dreg, AMD64_RDX, ins, FALSE);
2572                         }
2573                 }
2574
2575                 if (use_sse2 && spec [MONO_INST_DEST] == 'f' && reg_is_freeable (ins->dreg, TRUE) && prev_dreg >= 0 && reginfof [prev_dreg].born_in >= i) {
2576                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_fregname (ins->dreg), prev_dreg, reginfof [prev_dreg].born_in));
2577                         mono_regstate_free_float (rs, ins->dreg);
2578                 }
2579                 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg, FALSE) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
2580                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2581                         mono_regstate_free_int (rs, ins->dreg);
2582                 }
2583
2584                 /* put src1 in EAX if it needs to be */
2585                 if (spec [MONO_INST_SRC1] == 'a') {
2586                         if (!(rs->ifree_mask & (1 << AMD64_RAX))) {
2587                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2588                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2589                                 mono_regstate_free_int (rs, AMD64_RAX);
2590                         }
2591                         if (ins->sreg1 < MONO_MAX_IREGS) {
2592                                 /* The argument is already in a hard reg, need to copy */
2593                                 MonoInst *copy = create_copy_ins (cfg, AMD64_RAX, ins->sreg1, NULL, FALSE);
2594                                 insert_before_ins (ins, tmp, copy);
2595                         }
2596                         else
2597                                 /* force-set sreg1 */
2598                                 assign_ireg (rs, ins->sreg1, AMD64_RAX);
2599                         ins->sreg1 = AMD64_RAX;
2600                 }
2601
2602                 /*
2603                  * TRACK SREG1
2604                  */
2605                 if (spec [MONO_INST_SRC1] == 'f') {
2606                         if (use_sse2) {
2607                                 if (reg_is_soft (ins->sreg1, TRUE)) {
2608                                         val = rs->fassign [ins->sreg1];
2609                                         prev_sreg1 = ins->sreg1;
2610                                         if (val < 0) {
2611                                                 int spill = 0;
2612                                                 if (val < -1) {
2613                                                         /* the register gets spilled after this inst */
2614                                                         spill = -val -1;
2615                                                 }
2616                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg1);
2617                                                 rs->fassign [ins->sreg1] = val;
2618                                                 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_fregname (val), ins->sreg1));
2619                                                 if (spill) {
2620                                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, TRUE);
2621                                                         insert_before_ins (ins, tmp, store);
2622                                                 }
2623                                         }
2624                                         rs->fsymbolic [val] = prev_sreg1;
2625                                         ins->sreg1 = val;
2626                                 } else {
2627                                         prev_sreg1 = -1;
2628                                 }
2629                         }
2630                         else
2631                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
2632                                 MonoInst *load;
2633                                 MonoInst *store = NULL;
2634
2635                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2636                                         GList *spill_node;
2637                                         spill_node = g_list_first (fspill_list);
2638                                         g_assert (spill_node);
2639
2640                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);          
2641                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2642                                 }
2643
2644                                 fspill++;
2645                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2646                                 load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
2647                                 insert_before_ins (ins, tmp, load);
2648                                 if (store) 
2649                                         insert_before_ins (load, tmp, store);
2650                         }
2651                 } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
2652                         /* force source to be same as dest */
2653                         rs->iassign [ins->sreg1] = ins->dreg;
2654                         rs->iassign [ins->sreg1 + 1] = ins->unused;
2655
2656                         DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
2657                         DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
2658
2659                         ins->sreg1 = ins->dreg;
2660                         /* 
2661                          * No need for saving the reg, we know that src1=dest in this cases
2662                          * ins->inst_c0 = ins->unused;
2663                          */
2664
2665                         /* make sure that we remove them from free mask */
2666                         rs->ifree_mask &= ~ (1 << ins->dreg);
2667                         rs->ifree_mask &= ~ (1 << ins->unused);
2668                 }
2669                 else if (ins->sreg1 >= MONO_MAX_IREGS) {
2670                         val = rs->iassign [ins->sreg1];
2671                         prev_sreg1 = ins->sreg1;
2672                         if (val < 0) {
2673                                 int spill = 0;
2674                                 if (val < -1) {
2675                                         /* the register gets spilled after this inst */
2676                                         spill = -val -1;
2677                                 }
2678                                 if (0 && (ins->opcode == OP_MOVE)) {
2679                                         /* 
2680                                          * small optimization: the dest register is already allocated
2681                                          * but the src one is not: we can simply assign the same register
2682                                          * here and peephole will get rid of the instruction later.
2683                                          * This optimization may interfere with the clobbering handling:
2684                                          * it removes a mov operation that will be added again to handle clobbering.
2685                                          * There are also some other issues that should with make testjit.
2686                                          */
2687                                         mono_regstate_alloc_int (rs, 1 << ins->dreg);
2688                                         val = rs->iassign [ins->sreg1] = ins->dreg;
2689                                         //g_assert (val >= 0);
2690                                         DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2691                                 } else {
2692                                         //g_assert (val == -1); /* source cannot be spilled */
2693                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
2694                                         rs->iassign [ins->sreg1] = val;
2695                                         DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2696                                 }
2697                                 if (spill) {
2698                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, FALSE);
2699                                         insert_before_ins (ins, tmp, store);
2700                                 }
2701                         }
2702                         rs->isymbolic [val] = prev_sreg1;
2703                         ins->sreg1 = val;
2704                 } else {
2705                         prev_sreg1 = -1;
2706                 }
2707
2708                 /* handle clobbering of sreg1 */
2709                 if (((spec [MONO_INST_DEST] == 'f' && spec [MONO_INST_SRC1] == 'f' && use_sse2) || spec [MONO_INST_CLOB] == '1' || spec [MONO_INST_CLOB] == 's') && ins->dreg != ins->sreg1) {
2710                         MonoInst *sreg2_copy = NULL;
2711                         MonoInst *copy;
2712                         gboolean fp = (spec [MONO_INST_SRC1] == 'f');
2713
2714                         if (ins->dreg == ins->sreg2) {
2715                                 /* 
2716                                  * copying sreg1 to dreg could clobber sreg2, so allocate a new
2717                                  * register for it.
2718                                  */
2719                                 int reg2 = 0;
2720
2721                                 if (fp)
2722                                         reg2 = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2723                                 else
2724                                         reg2 = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->sreg2, 0);
2725
2726                                 DEBUG (g_print ("\tneed to copy sreg2 %s to reg %s\n", mono_amd64_regname (ins->sreg2, fp), mono_amd64_regname (reg2, fp)));
2727                                 sreg2_copy = create_copy_ins (cfg, reg2, ins->sreg2, NULL, fp);
2728                                 prev_sreg2 = ins->sreg2 = reg2;
2729
2730                                 if (fp)
2731                                         mono_regstate_free_float (rs, reg2);
2732                                 else
2733                                         mono_regstate_free_int (rs, reg2);
2734                         }
2735
2736                         copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL, fp);
2737                         DEBUG (g_print ("\tneed to copy sreg1 %s to dreg %s\n", mono_amd64_regname (ins->sreg1, fp), mono_amd64_regname (ins->dreg, fp)));
2738                         insert_before_ins (ins, tmp, copy);
2739
2740                         if (sreg2_copy)
2741                                 insert_before_ins (copy, tmp, sreg2_copy);
2742
2743                         /*
2744                          * Need to prevent sreg2 to be allocated to sreg1, since that
2745                          * would screw up the previous copy.
2746                          */
2747                         src2_mask &= ~ (1 << ins->sreg1);
2748                         /* we set sreg1 to dest as well */
2749                         prev_sreg1 = ins->sreg1 = ins->dreg;
2750                         src2_mask &= ~ (1 << ins->dreg);
2751                 }
2752
2753                 /*
2754                  * TRACK SREG2
2755                  */
2756                 if (spec [MONO_INST_SRC2] == 'f') {
2757                         if (use_sse2) {
2758                                 if (reg_is_soft (ins->sreg2, TRUE)) {
2759                                         val = rs->fassign [ins->sreg2];
2760                                         prev_sreg2 = ins->sreg2;
2761                                         if (val < 0) {
2762                                                 int spill = 0;
2763                                                 if (val < -1) {
2764                                                         /* the register gets spilled after this inst */
2765                                                         spill = -val -1;
2766                                                 }
2767                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2768                                                 rs->fassign [ins->sreg2] = val;
2769                                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_fregname (val), ins->sreg2));
2770                                                 if (spill)
2771                                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, TRUE);
2772                                         }
2773                                         rs->fsymbolic [val] = prev_sreg2;
2774                                         ins->sreg2 = val;
2775                                 } else {
2776                                         prev_sreg2 = -1;
2777                                 }
2778                         }
2779                         else
2780                         if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
2781                                 MonoInst *load;
2782                                 MonoInst *store = NULL;
2783
2784                                 if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2785                                         GList *spill_node;
2786
2787                                         spill_node = g_list_first (fspill_list);
2788                                         g_assert (spill_node);
2789                                         if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
2790                                                 spill_node = g_list_next (spill_node);
2791         
2792                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
2793                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2794                                 } 
2795                                 
2796                                 fspill++;
2797                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2798                                 load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
2799                                 insert_before_ins (ins, tmp, load);
2800                                 if (store) 
2801                                         insert_before_ins (load, tmp, store);
2802                         }
2803                 } 
2804                 else if (ins->sreg2 >= MONO_MAX_IREGS) {
2805                         val = rs->iassign [ins->sreg2];
2806                         prev_sreg2 = ins->sreg2;
2807                         if (val < 0) {
2808                                 int spill = 0;
2809                                 if (val < -1) {
2810                                         /* the register gets spilled after this inst */
2811                                         spill = -val -1;
2812                                 }
2813                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
2814                                 rs->iassign [ins->sreg2] = val;
2815                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2816                                 if (spill)
2817                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, FALSE);
2818                         }
2819                         rs->isymbolic [val] = prev_sreg2;
2820                         ins->sreg2 = val;
2821                         if (spec [MONO_INST_CLOB] == 's' && ins->sreg2 != AMD64_RCX) {
2822                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d, but ECX is needed (R%d)\n", mono_arch_regname (val), ins->sreg2, rs->iassign [AMD64_RCX]));
2823                         }
2824                 } else {
2825                         prev_sreg2 = -1;
2826                 }
2827
2828                 if (spec [MONO_INST_CLOB] == 'c') {
2829                         int j, s;
2830                         MonoCallInst *call = (MonoCallInst*)ins;
2831                         GSList *list;
2832                         guint32 clob_mask = AMD64_CALLEE_REGS;
2833
2834                         for (j = 0; j < MONO_MAX_IREGS; ++j) {
2835                                 s = 1 << j;
2836                                 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2837                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [j], FALSE);
2838                                         mono_regstate_free_int (rs, j);
2839                                         //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2840                                 }
2841                         }
2842
2843                         if (use_sse2) {
2844                                 clob_mask = AMD64_CALLEE_FREGS;
2845
2846                                 for (j = 0; j < MONO_MAX_FREGS; ++j) {
2847                                         s = 1 << j;
2848                                         if ((clob_mask & s) && !(rs->ffree_mask & s) && j != ins->sreg1) {
2849                                                 get_register_force_spilling (cfg, tmp, ins, rs->fsymbolic [j], TRUE);
2850                                                 mono_regstate_free_float (rs, j);
2851                                                 //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2852                                         }
2853                                 }
2854                         }
2855
2856                         /* 
2857                          * Assign all registers in call->out_reg_args to the proper 
2858                          * argument registers.
2859                          */
2860
2861                         list = call->out_ireg_args;
2862                         if (list) {
2863                                 while (list) {
2864                                         guint64 regpair;
2865                                         int reg, hreg;
2866
2867                                         regpair = (guint64) (list->data);
2868                                         hreg = regpair >> 32;
2869                                         reg = regpair & 0xffffffff;
2870
2871                                         assign_ireg (rs, reg, hreg);
2872
2873                                         DEBUG (g_print ("\tassigned arg reg %s to R%d\n", mono_arch_regname (hreg), reg));
2874
2875                                         list = g_slist_next (list);
2876                                 }
2877                                 g_slist_free (call->out_ireg_args);
2878                         }
2879
2880                         list = call->out_freg_args;
2881                         if (list && use_sse2) {
2882                                 while (list) {
2883                                         guint64 regpair;
2884                                         int reg, hreg;
2885
2886                                         regpair = (guint64) (list->data);
2887                                         hreg = regpair >> 32;
2888                                         reg = regpair & 0xffffffff;
2889
2890                                         rs->fassign [reg] = hreg;
2891                                         rs->fsymbolic [hreg] = reg;
2892                                         rs->ffree_mask &= ~ (1 << hreg);
2893
2894                                         list = g_slist_next (list);
2895                                 }
2896                         }
2897                         if (call->out_freg_args)
2898                                 g_slist_free (call->out_freg_args);
2899                 }
2900
2901                 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2902                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2903                         mono_regstate_free_int (rs, ins->sreg1);
2904                 }
2905                 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2906                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2907                         mono_regstate_free_int (rs, ins->sreg2);
2908                 }*/
2909         
2910                 DEBUG (print_ins (i, ins));
2911                 /* this may result from a insert_before call */
2912                 if (!tmp->next)
2913                         bb->code = tmp->data;
2914                 tmp = tmp->next;
2915         }
2916
2917         g_free (reginfo);
2918         g_free (reginfof);
2919         g_list_free (fspill_list);
2920 }
2921
2922 static unsigned char*
2923 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2924 {
2925         if (use_sse2) {
2926                 amd64_sse_cvtsd2si_reg_reg (code, dreg, sreg);
2927         }
2928         else {
2929                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
2930                 x86_fnstcw_membase(code, AMD64_RSP, 0);
2931                 amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
2932                 amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
2933                 amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
2934                 amd64_fldcw_membase (code, AMD64_RSP, 2);
2935                 amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
2936                 amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
2937                 amd64_pop_reg (code, dreg);
2938                 amd64_fldcw_membase (code, AMD64_RSP, 0);
2939                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
2940         }
2941
2942         if (size == 1)
2943                 amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
2944         else if (size == 2)
2945                 amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
2946         return code;
2947 }
2948
2949 static unsigned char*
2950 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
2951 {
2952         int sreg = tree->sreg1;
2953 #ifdef PLATFORM_WIN32
2954         guint8* br[5];
2955
2956         NOT_IMPLEMENTED;
2957
2958         /*
2959          * Under Windows:
2960          * If requested stack size is larger than one page,
2961          * perform stack-touch operation
2962          */
2963         /*
2964          * Generate stack probe code.
2965          * Under Windows, it is necessary to allocate one page at a time,
2966          * "touching" stack after each successful sub-allocation. This is
2967          * because of the way stack growth is implemented - there is a
2968          * guard page before the lowest stack page that is currently commited.
2969          * Stack normally grows sequentially so OS traps access to the
2970          * guard page and commits more pages when needed.
2971          */
2972         amd64_test_reg_imm (code, sreg, ~0xFFF);
2973         br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
2974
2975         br[2] = code; /* loop */
2976         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
2977         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
2978         amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
2979         amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
2980         br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
2981         amd64_patch (br[3], br[2]);
2982         amd64_test_reg_reg (code, sreg, sreg);
2983         br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
2984         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
2985
2986         br[1] = code; x86_jump8 (code, 0);
2987
2988         amd64_patch (br[0], code);
2989         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
2990         amd64_patch (br[1], code);
2991         amd64_patch (br[4], code);
2992 #else /* PLATFORM_WIN32 */
2993         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
2994 #endif
2995         if (tree->flags & MONO_INST_INIT) {
2996                 int offset = 0;
2997                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
2998                         amd64_push_reg (code, AMD64_RAX);
2999                         offset += 8;
3000                 }
3001                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
3002                         amd64_push_reg (code, AMD64_RCX);
3003                         offset += 8;
3004                 }
3005                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
3006                         amd64_push_reg (code, AMD64_RDI);
3007                         offset += 8;
3008                 }
3009                 
3010                 amd64_shift_reg_imm (code, X86_SHR, sreg, 4);
3011                 if (sreg != AMD64_RCX)
3012                         amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
3013                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3014                                 
3015                 amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
3016                 amd64_cld (code);
3017                 amd64_prefix (code, X86_REP_PREFIX);
3018                 amd64_stosl (code);
3019                 
3020                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
3021                         amd64_pop_reg (code, AMD64_RDI);
3022                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
3023                         amd64_pop_reg (code, AMD64_RCX);
3024                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
3025                         amd64_pop_reg (code, AMD64_RAX);
3026         }
3027         return code;
3028 }
3029
3030 static guint8*
3031 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
3032 {
3033         CallInfo *cinfo;
3034         guint32 offset, quad;
3035
3036         /* Move return value to the target register */
3037         /* FIXME: do this in the local reg allocator */
3038         switch (ins->opcode) {
3039         case CEE_CALL:
3040         case OP_CALL_REG:
3041         case OP_CALL_MEMBASE:
3042         case OP_LCALL:
3043         case OP_LCALL_REG:
3044         case OP_LCALL_MEMBASE:
3045                 if (ins->dreg != AMD64_RAX)
3046                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, 8);
3047                 break;
3048         case OP_FCALL:
3049         case OP_FCALL_REG:
3050         case OP_FCALL_MEMBASE:
3051                 /* FIXME: optimize this */
3052                 offset = mono_spillvar_offset_float (cfg, 0);
3053                 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
3054                         if (use_sse2)
3055                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
3056                         else {
3057                                 amd64_movss_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
3058                                 amd64_fld_membase (code, AMD64_RBP, offset, FALSE);
3059                         }
3060                 }
3061                 else {
3062                         if (use_sse2) {
3063                                 if (ins->dreg != AMD64_XMM0)
3064                                         amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
3065                         }
3066                         else {
3067                                 amd64_movsd_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
3068                                 amd64_fld_membase (code, AMD64_RBP, offset, TRUE);
3069                         }
3070                 }
3071                 break;
3072         case OP_VCALL:
3073         case OP_VCALL_REG:
3074         case OP_VCALL_MEMBASE:
3075                 cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
3076                 if (cinfo->ret.storage == ArgValuetypeInReg) {
3077                         /* Pop the destination address from the stack */
3078                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3079                         amd64_pop_reg (code, AMD64_RCX);
3080                         
3081                         for (quad = 0; quad < 2; quad ++) {
3082                                 switch (cinfo->ret.pair_storage [quad]) {
3083                                 case ArgInIReg:
3084                                         amd64_mov_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad], 8);
3085                                         break;
3086                                 case ArgInFloatSSEReg:
3087                                         amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3088                                         break;
3089                                 case ArgInDoubleSSEReg:
3090                                         amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3091                                         break;
3092                                 case ArgNone:
3093                                         break;
3094                                 default:
3095                                         NOT_IMPLEMENTED;
3096                                 }
3097                         }
3098                 }
3099                 g_free (cinfo);
3100                 break;
3101         }
3102
3103         return code;
3104 }
3105
3106 /*
3107  * emit_load_volatile_arguments:
3108  *
3109  *  Load volatile arguments from the stack to the original input registers.
3110  * Required before a tail call.
3111  */
3112 static guint8*
3113 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3114 {
3115         MonoMethod *method = cfg->method;
3116         MonoMethodSignature *sig;
3117         MonoInst *inst;
3118         CallInfo *cinfo;
3119         guint32 i;
3120
3121         /* FIXME: Generate intermediate code instead */
3122
3123         sig = mono_method_signature (method);
3124
3125         cinfo = get_call_info (sig, FALSE);
3126         
3127         /* This is the opposite of the code in emit_prolog */
3128
3129         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3130                 ArgInfo *ainfo = cinfo->args + i;
3131                 MonoType *arg_type;
3132                 inst = cfg->varinfo [i];
3133
3134                 if (sig->hasthis && (i == 0))
3135                         arg_type = &mono_defaults.object_class->byval_arg;
3136                 else
3137                         arg_type = sig->params [i - sig->hasthis];
3138
3139                 if (inst->opcode != OP_REGVAR) {
3140                         switch (ainfo->storage) {
3141                         case ArgInIReg: {
3142                                 guint32 size = 8;
3143
3144                                 /* FIXME: I1 etc */
3145                                 amd64_mov_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset, size);
3146                                 break;
3147                         }
3148                         case ArgInFloatSSEReg:
3149                                 amd64_movss_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3150                                 break;
3151                         case ArgInDoubleSSEReg:
3152                                 amd64_movsd_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3153                                 break;
3154                         default:
3155                                 break;
3156                         }
3157                 }
3158         }
3159
3160         g_free (cinfo);
3161
3162         return code;
3163 }
3164
3165 #define REAL_PRINT_REG(text,reg) \
3166 mono_assert (reg >= 0); \
3167 amd64_push_reg (code, AMD64_RAX); \
3168 amd64_push_reg (code, AMD64_RDX); \
3169 amd64_push_reg (code, AMD64_RCX); \
3170 amd64_push_reg (code, reg); \
3171 amd64_push_imm (code, reg); \
3172 amd64_push_imm (code, text " %d %p\n"); \
3173 amd64_mov_reg_imm (code, AMD64_RAX, printf); \
3174 amd64_call_reg (code, AMD64_RAX); \
3175 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 3*4); \
3176 amd64_pop_reg (code, AMD64_RCX); \
3177 amd64_pop_reg (code, AMD64_RDX); \
3178 amd64_pop_reg (code, AMD64_RAX);
3179
3180 /* benchmark and set based on cpu */
3181 #define LOOP_ALIGNMENT 8
3182 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
3183
3184 void
3185 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3186 {
3187         MonoInst *ins;
3188         MonoCallInst *call;
3189         guint offset;
3190         guint8 *code = cfg->native_code + cfg->code_len;
3191         MonoInst *last_ins = NULL;
3192         guint last_offset = 0;
3193         int max_len, cpos;
3194
3195         if (cfg->opt & MONO_OPT_PEEPHOLE)
3196                 peephole_pass (cfg, bb);
3197
3198         if (cfg->opt & MONO_OPT_LOOP) {
3199                 int pad, align = LOOP_ALIGNMENT;
3200                 /* set alignment depending on cpu */
3201                 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
3202                         pad = align - pad;
3203                         /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
3204                         amd64_padding (code, pad);
3205                         cfg->code_len += pad;
3206                         bb->native_offset = cfg->code_len;
3207                 }
3208         }
3209
3210         if (cfg->verbose_level > 2)
3211                 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3212
3213         cpos = bb->max_offset;
3214
3215         if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3216                 MonoProfileCoverageInfo *cov = cfg->coverage_info;
3217                 g_assert (!mono_compile_aot);
3218                 cpos += 6;
3219
3220                 cov->data [bb->dfn].cil_code = bb->cil_code;
3221                 /* this is not thread save, but good enough */
3222                 amd64_inc_mem (code, (guint64)&cov->data [bb->dfn].count); 
3223         }
3224
3225         offset = code - cfg->native_code;
3226
3227         ins = bb->code;
3228         while (ins) {
3229                 offset = code - cfg->native_code;
3230
3231                 max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
3232
3233                 if (offset > (cfg->code_size - max_len - 16)) {
3234                         cfg->code_size *= 2;
3235                         cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3236                         code = cfg->native_code + offset;
3237                         mono_jit_stats.code_reallocs++;
3238                 }
3239
3240                 mono_debug_record_line_number (cfg, ins, offset);
3241
3242                 switch (ins->opcode) {
3243                 case OP_BIGMUL:
3244                         amd64_mul_reg (code, ins->sreg2, TRUE);
3245                         break;
3246                 case OP_BIGMUL_UN:
3247                         amd64_mul_reg (code, ins->sreg2, FALSE);
3248                         break;
3249                 case OP_X86_SETEQ_MEMBASE:
3250                         amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
3251                         break;
3252                 case OP_STOREI1_MEMBASE_IMM:
3253                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
3254                         break;
3255                 case OP_STOREI2_MEMBASE_IMM:
3256                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
3257                         break;
3258                 case OP_STOREI4_MEMBASE_IMM:
3259                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
3260                         break;
3261                 case OP_STOREI1_MEMBASE_REG:
3262                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
3263                         break;
3264                 case OP_STOREI2_MEMBASE_REG:
3265                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
3266                         break;
3267                 case OP_STORE_MEMBASE_REG:
3268                 case OP_STOREI8_MEMBASE_REG:
3269                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
3270                         break;
3271                 case OP_STOREI4_MEMBASE_REG:
3272                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
3273                         break;
3274                 case OP_STORE_MEMBASE_IMM:
3275                 case OP_STOREI8_MEMBASE_IMM:
3276                         if (amd64_is_imm32 (ins->inst_imm))
3277                                 amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
3278                         else {
3279                                 amd64_mov_reg_imm (code, GP_SCRATCH_REG, ins->inst_imm);
3280                                 amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, GP_SCRATCH_REG, 8);
3281                         }
3282                         break;
3283                 case CEE_LDIND_I:
3284                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, sizeof (gpointer));
3285                         break;
3286                 case CEE_LDIND_I4:
3287                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3288                         break;
3289                 case CEE_LDIND_U4:
3290                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3291                         break;
3292                 case OP_LOADU4_MEM:
3293                         amd64_mov_reg_imm (code, ins->dreg, ins->inst_p0);
3294                         amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
3295                         break;
3296                 case OP_LOAD_MEMBASE:
3297                 case OP_LOADI8_MEMBASE:
3298                         if (amd64_is_imm32 (ins->inst_offset)) {
3299                                 amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
3300                         }
3301                         else {
3302                                 amd64_mov_reg_imm_size (code, GP_SCRATCH_REG, ins->inst_offset, 8);
3303                                 amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, GP_SCRATCH_REG, 0, 8);
3304                         }
3305                         break;
3306                 case OP_LOADI4_MEMBASE:
3307                         amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3308                         break;
3309                 case OP_LOADU4_MEMBASE:
3310                         amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
3311                         break;
3312                 case OP_LOADU1_MEMBASE:
3313                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
3314                         break;
3315                 case OP_LOADI1_MEMBASE:
3316                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
3317                         break;
3318                 case OP_LOADU2_MEMBASE:
3319                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
3320                         break;
3321                 case OP_LOADI2_MEMBASE:
3322                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
3323                         break;
3324                 case CEE_CONV_I1:
3325                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3326                         break;
3327                 case CEE_CONV_I2:
3328                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3329                         break;
3330                 case CEE_CONV_U1:
3331                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
3332                         break;
3333                 case CEE_CONV_U2:
3334                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
3335                         break;
3336                 case CEE_CONV_U8:
3337                 case CEE_CONV_U:
3338                         /* Clean out the upper word */
3339                         amd64_mov_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
3340                         break;
3341                 case CEE_CONV_I8:
3342                 case CEE_CONV_I:
3343                         amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
3344                         break;                  
3345                 case OP_COMPARE:
3346                 case OP_LCOMPARE:
3347                         amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3348                         break;
3349                 case OP_COMPARE_IMM:
3350                         if (!amd64_is_imm32 (ins->inst_imm)) {
3351                                 amd64_mov_reg_imm (code, AMD64_R11, ins->inst_imm);
3352                                 amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, AMD64_R11);
3353                         } else {
3354                                 amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
3355                         }
3356                         break;
3357                 case OP_X86_COMPARE_REG_MEMBASE:
3358                         amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
3359                         break;
3360                 case OP_X86_TEST_NULL:
3361                         amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4);
3362                         break;
3363                 case OP_AMD64_TEST_NULL:
3364                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
3365                         break;
3366                 case OP_X86_ADD_MEMBASE_IMM:
3367                         /* FIXME: Make a 64 version too */
3368                         amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3369                         break;
3370                 case OP_X86_ADD_MEMBASE:
3371                         amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3372                         break;
3373                 case OP_X86_SUB_MEMBASE_IMM:
3374                         g_assert (amd64_is_imm32 (ins->inst_imm));
3375                         amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3376                         break;
3377                 case OP_X86_SUB_MEMBASE:
3378                         amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3379                         break;
3380                 case OP_X86_INC_MEMBASE:
3381                         amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3382                         break;
3383                 case OP_X86_INC_REG:
3384                         amd64_inc_reg_size (code, ins->dreg, 4);
3385                         break;
3386                 case OP_X86_DEC_MEMBASE:
3387                         amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3388                         break;
3389                 case OP_X86_DEC_REG:
3390                         amd64_dec_reg_size (code, ins->dreg, 4);
3391                         break;
3392                 case OP_X86_MUL_MEMBASE:
3393                         amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3394                         break;
3395                 case OP_AMD64_ICOMPARE_MEMBASE_REG:
3396                         amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
3397                         break;
3398                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
3399                         amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3400                         break;
3401                 case OP_AMD64_ICOMPARE_REG_MEMBASE:
3402                         amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3403                         break;
3404                 case CEE_BREAK:
3405                         amd64_breakpoint (code);
3406                         break;
3407
3408                 case OP_ADDCC:
3409                 case CEE_ADD:
3410                         amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
3411                         break;
3412                 case OP_ADC:
3413                         amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
3414                         break;
3415                 case OP_ADD_IMM:
3416                         g_assert (amd64_is_imm32 (ins->inst_imm));
3417                         amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
3418                         break;
3419                 case OP_ADC_IMM:
3420                         g_assert (amd64_is_imm32 (ins->inst_imm));
3421                         amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
3422                         break;
3423                 case OP_SUBCC:
3424                 case CEE_SUB:
3425                         amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
3426                         break;
3427                 case OP_SBB:
3428                         amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
3429                         break;
3430                 case OP_SUB_IMM:
3431                         g_assert (amd64_is_imm32 (ins->inst_imm));
3432                         amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
3433                         break;
3434                 case OP_SBB_IMM:
3435                         g_assert (amd64_is_imm32 (ins->inst_imm));
3436                         amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
3437                         break;
3438                 case CEE_AND:
3439                         amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
3440                         break;
3441                 case OP_AND_IMM:
3442                         g_assert (amd64_is_imm32 (ins->inst_imm));
3443                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
3444                         break;
3445                 case CEE_MUL:
3446                         amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
3447                         break;
3448                 case OP_MUL_IMM:
3449                         amd64_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
3450                         break;
3451                 case CEE_DIV:
3452                         amd64_cdq (code);
3453                         amd64_div_reg (code, ins->sreg2, TRUE);
3454                         break;
3455                 case CEE_DIV_UN:
3456                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3457                         amd64_div_reg (code, ins->sreg2, FALSE);
3458                         break;
3459                 case OP_DIV_IMM:
3460                         g_assert (amd64_is_imm32 (ins->inst_imm));
3461                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3462                         amd64_cdq (code);
3463                         amd64_div_reg (code, ins->sreg2, TRUE);
3464                         break;
3465                 case CEE_REM:
3466                         amd64_cdq (code);
3467                         amd64_div_reg (code, ins->sreg2, TRUE);
3468                         break;
3469                 case CEE_REM_UN:
3470                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3471                         amd64_div_reg (code, ins->sreg2, FALSE);
3472                         break;
3473                 case OP_REM_IMM:
3474                         g_assert (amd64_is_imm32 (ins->inst_imm));
3475                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3476                         amd64_cdq (code);
3477                         amd64_div_reg (code, ins->sreg2, TRUE);
3478                         break;
3479                 case CEE_OR:
3480                         amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
3481                         break;
3482                 case OP_OR_IMM
3483 :                       g_assert (amd64_is_imm32 (ins->inst_imm));
3484                         amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
3485                         break;
3486                 case CEE_XOR:
3487                         amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
3488                         break;
3489                 case OP_XOR_IMM:
3490                         g_assert (amd64_is_imm32 (ins->inst_imm));
3491                         amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
3492                         break;
3493                 case CEE_SHL:
3494                 case OP_LSHL:
3495                         g_assert (ins->sreg2 == AMD64_RCX);
3496                         amd64_shift_reg (code, X86_SHL, ins->dreg);
3497                         break;
3498                 case CEE_SHR:
3499                 case OP_LSHR:
3500                         g_assert (ins->sreg2 == AMD64_RCX);
3501                         amd64_shift_reg (code, X86_SAR, ins->dreg);
3502                         break;
3503                 case OP_SHR_IMM:
3504                         g_assert (amd64_is_imm32 (ins->inst_imm));
3505                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3506                         break;
3507                 case OP_LSHR_IMM:
3508                         g_assert (amd64_is_imm32 (ins->inst_imm));
3509                         amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
3510                         break;
3511                 case OP_SHR_UN_IMM:
3512                         g_assert (amd64_is_imm32 (ins->inst_imm));
3513                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3514                         break;
3515                 case OP_LSHR_UN_IMM:
3516                         g_assert (amd64_is_imm32 (ins->inst_imm));
3517                         amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
3518                         break;
3519                 case CEE_SHR_UN:
3520                         g_assert (ins->sreg2 == AMD64_RCX);
3521                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3522                         break;
3523                 case OP_LSHR_UN:
3524                         g_assert (ins->sreg2 == AMD64_RCX);
3525                         amd64_shift_reg (code, X86_SHR, ins->dreg);
3526                         break;
3527                 case OP_SHL_IMM:
3528                         g_assert (amd64_is_imm32 (ins->inst_imm));
3529                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3530                         break;
3531                 case OP_LSHL_IMM:
3532                         g_assert (amd64_is_imm32 (ins->inst_imm));
3533                         amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
3534                         break;
3535
3536                 case OP_IADDCC:
3537                 case OP_IADD:
3538                         amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4);
3539                         break;
3540                 case OP_IADC:
3541                         amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4);
3542                         break;
3543                 case OP_IADD_IMM:
3544                         amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4);
3545                         break;
3546                 case OP_IADC_IMM:
3547                         amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4);
3548                         break;
3549                 case OP_ISUBCC:
3550                 case OP_ISUB:
3551                         amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4);
3552                         break;
3553                 case OP_ISBB:
3554                         amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4);
3555                         break;
3556                 case OP_ISUB_IMM:
3557                         amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4);
3558                         break;
3559                 case OP_ISBB_IMM:
3560                         amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4);
3561                         break;
3562                 case OP_IAND:
3563                         amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4);
3564                         break;
3565                 case OP_IAND_IMM:
3566                         amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4);
3567                         break;
3568                 case OP_IOR:
3569                         amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4);
3570                         break;
3571                 case OP_IOR_IMM:
3572                         amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4);
3573                         break;
3574                 case OP_IXOR:
3575                         amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4);
3576                         break;
3577                 case OP_IXOR_IMM:
3578                         amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4);
3579                         break;
3580                 case OP_INEG:
3581                         amd64_neg_reg_size (code, ins->sreg1, 4);
3582                         break;
3583                 case OP_INOT:
3584                         amd64_not_reg_size (code, ins->sreg1, 4);
3585                         break;
3586                 case OP_ISHL:
3587                         g_assert (ins->sreg2 == AMD64_RCX);
3588                         amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4);
3589                         break;
3590                 case OP_ISHR:
3591                         g_assert (ins->sreg2 == AMD64_RCX);
3592                         amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4);
3593                         break;
3594                 case OP_ISHR_IMM:
3595                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3596                         break;
3597                 case OP_ISHR_UN_IMM:
3598                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3599                         break;
3600                 case OP_ISHR_UN:
3601                         g_assert (ins->sreg2 == AMD64_RCX);
3602                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3603                         break;
3604                 case OP_ISHL_IMM:
3605                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3606                         break;
3607                 case OP_IMUL:
3608                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3609                         break;
3610                 case OP_IMUL_IMM:
3611                         amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, 4);
3612                         break;
3613                 case OP_IMUL_OVF:
3614                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3615                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3616                         break;
3617                 case OP_IMUL_OVF_UN: {
3618                         /* the mul operation and the exception check should most likely be split */
3619                         int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
3620                         /*g_assert (ins->sreg2 == X86_EAX);
3621                         g_assert (ins->dreg == X86_EAX);*/
3622                         if (ins->sreg2 == X86_EAX) {
3623                                 non_eax_reg = ins->sreg1;
3624                         } else if (ins->sreg1 == X86_EAX) {
3625                                 non_eax_reg = ins->sreg2;
3626                         } else {
3627                                 /* no need to save since we're going to store to it anyway */
3628                                 if (ins->dreg != X86_EAX) {
3629                                         saved_eax = TRUE;
3630                                         amd64_push_reg (code, X86_EAX);
3631                                 }
3632                                 amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
3633                                 non_eax_reg = ins->sreg2;
3634                         }
3635                         if (ins->dreg == X86_EDX) {
3636                                 if (!saved_eax) {
3637                                         saved_eax = TRUE;
3638                                         amd64_push_reg (code, X86_EAX);
3639                                 }
3640                         } else if (ins->dreg != X86_EAX) {
3641                                 saved_edx = TRUE;
3642                                 amd64_push_reg (code, X86_EDX);
3643                         }
3644                         amd64_mul_reg_size (code, non_eax_reg, FALSE, 4);
3645                         /* save before the check since pop and mov don't change the flags */
3646                         if (ins->dreg != X86_EAX)
3647                                 amd64_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
3648                         if (saved_edx)
3649                                 amd64_pop_reg (code, X86_EDX);
3650                         if (saved_eax)
3651                                 amd64_pop_reg (code, X86_EAX);
3652                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3653                         break;
3654                 }
3655                 case OP_IDIV:
3656                         amd64_cdq_size (code, 4);
3657                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3658                         break;
3659                 case OP_IDIV_UN:
3660                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3661                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
3662                         break;
3663                 case OP_IDIV_IMM:
3664                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3665                         amd64_cdq_size (code, 4);
3666                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3667                         break;
3668                 case OP_IREM:
3669                         amd64_cdq_size (code, 4);
3670                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3671                         break;
3672                 case OP_IREM_UN:
3673                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3674                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
3675                         break;
3676                 case OP_IREM_IMM:
3677                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3678                         amd64_cdq_size (code, 4);
3679                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3680                         break;
3681
3682                 case OP_ICOMPARE:
3683                         amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
3684                         break;
3685                 case OP_ICOMPARE_IMM:
3686                         amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
3687                         break;
3688
3689                 case OP_IBEQ:
3690                 case OP_IBLT:
3691                 case OP_IBGT:
3692                 case OP_IBGE:
3693                 case OP_IBLE:
3694                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), TRUE);
3695                         break;
3696                 case OP_IBNE_UN:
3697                 case OP_IBLT_UN:
3698                 case OP_IBGT_UN:
3699                 case OP_IBGE_UN:
3700                 case OP_IBLE_UN:
3701                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), FALSE);
3702                         break;
3703                 case OP_COND_EXC_IOV:
3704                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3705                                                                                 TRUE, ins->inst_p1);
3706                         break;
3707                 case OP_COND_EXC_IC:
3708                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3709                                                                                 FALSE, ins->inst_p1);
3710                         break;
3711                 case CEE_NOT:
3712                         amd64_not_reg (code, ins->sreg1);
3713                         break;
3714                 case CEE_NEG:
3715                         amd64_neg_reg (code, ins->sreg1);
3716                         break;
3717                 case OP_SEXT_I1:
3718                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3719                         break;
3720                 case OP_SEXT_I2:
3721                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3722                         break;
3723                 case OP_ICONST:
3724                 case OP_I8CONST:
3725                         if ((((guint64)ins->inst_c0) >> 32) == 0)
3726                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
3727                         else
3728                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
3729                         break;
3730                 case OP_AOTCONST:
3731                         mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3732                         amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, 8);
3733                         break;
3734                 case CEE_CONV_I4:
3735                 case CEE_CONV_U4:
3736                 case OP_MOVE:
3737                 case OP_SETREG:
3738                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
3739                         break;
3740                 case OP_AMD64_SET_XMMREG_R4: {
3741                         if (use_sse2) {
3742                                 amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
3743                         }
3744                         else {
3745                                 amd64_fst_membase (code, AMD64_RSP, -8, FALSE, TRUE);
3746                                 /* ins->dreg is set to -1 by the reg allocator */
3747                                 amd64_movss_reg_membase (code, ins->unused, AMD64_RSP, -8);
3748                         }
3749                         break;
3750                 }
3751                 case OP_AMD64_SET_XMMREG_R8: {
3752                         if (use_sse2) {
3753                                 if (ins->dreg != ins->sreg1)
3754                                         amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
3755                         }
3756                         else {
3757                                 amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE);
3758                                 /* ins->dreg is set to -1 by the reg allocator */
3759                                 amd64_movsd_reg_membase (code, ins->unused, AMD64_RSP, -8);
3760                         }
3761                         break;
3762                 }
3763                 case CEE_JMP: {
3764                         /*
3765                          * Note: this 'frame destruction' logic is useful for tail calls, too.
3766                          * Keep in sync with the code in emit_epilog.
3767                          */
3768                         int pos = 0, i;
3769
3770                         /* FIXME: no tracing support... */
3771                         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3772                                 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
3773
3774                         g_assert (!cfg->method->save_lmf);
3775
3776                         code = emit_load_volatile_arguments (cfg, code);
3777
3778                         for (i = 0; i < AMD64_NREG; ++i)
3779                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
3780                                         pos -= sizeof (gpointer);
3781                         
3782                         if (pos)
3783                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
3784
3785                         /* Pop registers in reverse order */
3786                         for (i = AMD64_NREG - 1; i > 0; --i)
3787                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
3788                                         amd64_pop_reg (code, i);
3789                                 }
3790
3791                         amd64_leave (code);
3792                         offset = code - cfg->native_code;
3793                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3794                         if (mono_compile_aot)
3795                                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
3796                         else
3797                                 amd64_set_reg_template (code, AMD64_R11);
3798                         amd64_jump_reg (code, AMD64_R11);
3799                         break;
3800                 }
3801                 case OP_CHECK_THIS:
3802                         /* ensure ins->sreg1 is not NULL */
3803                         amd64_alu_membase_imm (code, X86_CMP, ins->sreg1, 0, 0);
3804                         break;
3805                 case OP_ARGLIST: {
3806                         amd64_lea_membase (code, AMD64_R11, AMD64_RBP, cfg->sig_cookie);
3807                         amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
3808                         break;
3809                 }
3810                 case OP_FCALL:
3811                 case OP_LCALL:
3812                 case OP_VCALL:
3813                 case OP_VOIDCALL:
3814                 case CEE_CALL:
3815                         call = (MonoCallInst*)ins;
3816                         /*
3817                          * The AMD64 ABI forces callers to know about varargs.
3818                          */
3819                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke))
3820                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3821                         else if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (cfg->method->klass->image != mono_defaults.corlib)) {
3822                                 /* 
3823                                  * Since the unmanaged calling convention doesn't contain a 
3824                                  * 'vararg' entry, we have to treat every pinvoke call as a
3825                                  * potential vararg call.
3826                                  */
3827                                 guint32 nregs, i;
3828                                 nregs = 0;
3829                                 for (i = 0; i < AMD64_XMM_NREG; ++i)
3830                                         if (call->used_fregs & (1 << i))
3831                                                 nregs ++;
3832                                 if (!nregs)
3833                                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3834                                 else
3835                                         amd64_mov_reg_imm (code, AMD64_RAX, nregs);
3836                         }
3837
3838                         if (ins->flags & MONO_INST_HAS_METHOD)
3839                                 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
3840                         else
3841                                 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
3842                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3843                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3844                         code = emit_move_return_value (cfg, ins, code);
3845                         break;
3846                 case OP_FCALL_REG:
3847                 case OP_LCALL_REG:
3848                 case OP_VCALL_REG:
3849                 case OP_VOIDCALL_REG:
3850                 case OP_CALL_REG:
3851                         call = (MonoCallInst*)ins;
3852
3853                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3854                                 amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
3855                                 ins->sreg1 = AMD64_R11;
3856                         }
3857
3858                         /*
3859                          * The AMD64 ABI forces callers to know about varargs.
3860                          */
3861                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke)) {
3862                                 if (ins->sreg1 == AMD64_RAX) {
3863                                         amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
3864                                         ins->sreg1 = AMD64_R11;
3865                                 }
3866                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3867                         }
3868                         amd64_call_reg (code, ins->sreg1);
3869                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3870                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3871                         code = emit_move_return_value (cfg, ins, code);
3872                         break;
3873                 case OP_FCALL_MEMBASE:
3874                 case OP_LCALL_MEMBASE:
3875                 case OP_VCALL_MEMBASE:
3876                 case OP_VOIDCALL_MEMBASE:
3877                 case OP_CALL_MEMBASE:
3878                         call = (MonoCallInst*)ins;
3879
3880                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3881                                 /* 
3882                                  * Can't use R11 because it is clobbered by the trampoline 
3883                                  * code, and the reg value is needed by get_vcall_slot_addr.
3884                                  */
3885                                 amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
3886                                 ins->sreg1 = AMD64_RAX;
3887                         }
3888
3889                         amd64_call_membase (code, ins->sreg1, ins->inst_offset);
3890                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3891                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3892                         code = emit_move_return_value (cfg, ins, code);
3893                         break;
3894                 case OP_OUTARG:
3895                 case OP_X86_PUSH:
3896                         amd64_push_reg (code, ins->sreg1);
3897                         break;
3898                 case OP_X86_PUSH_IMM:
3899                         g_assert (amd64_is_imm32 (ins->inst_imm));
3900                         amd64_push_imm (code, ins->inst_imm);
3901                         break;
3902                 case OP_X86_PUSH_MEMBASE:
3903                         amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
3904                         break;
3905                 case OP_X86_PUSH_OBJ: 
3906                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ins->inst_imm);
3907                         amd64_push_reg (code, AMD64_RDI);
3908                         amd64_push_reg (code, AMD64_RSI);
3909                         amd64_push_reg (code, AMD64_RCX);
3910                         if (ins->inst_offset)
3911                                 amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset);
3912                         else
3913                                 amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8);
3914                         amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, 3 * 8);
3915                         amd64_mov_reg_imm (code, AMD64_RCX, (ins->inst_imm >> 3));
3916                         amd64_cld (code);
3917                         amd64_prefix (code, X86_REP_PREFIX);
3918                         amd64_movsd (code);
3919                         amd64_pop_reg (code, AMD64_RCX);
3920                         amd64_pop_reg (code, AMD64_RSI);
3921                         amd64_pop_reg (code, AMD64_RDI);
3922                         break;
3923                 case OP_X86_LEA:
3924                         amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
3925                         break;
3926                 case OP_X86_LEA_MEMBASE:
3927                         amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
3928                         break;
3929                 case OP_X86_XCHG:
3930                         amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
3931                         break;
3932                 case OP_LOCALLOC:
3933                         /* keep alignment */
3934                         amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
3935                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
3936                         code = mono_emit_stack_alloc (code, ins);
3937                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
3938                         break;
3939                 case CEE_RET:
3940                         amd64_ret (code);
3941                         break;
3942                 case CEE_THROW: {
3943                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
3944                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3945                                              (gpointer)"mono_arch_throw_exception");
3946                         break;
3947                 }
3948                 case OP_RETHROW: {
3949                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
3950                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3951                                              (gpointer)"mono_arch_rethrow_exception");
3952                         break;
3953                 }
3954                 case OP_CALL_HANDLER: 
3955                         /* Align stack */
3956                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
3957                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3958                         amd64_call_imm (code, 0);
3959                         /* Restore stack alignment */
3960                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3961                         break;
3962                 case OP_LABEL:
3963                         ins->inst_c0 = code - cfg->native_code;
3964                         break;
3965                 case CEE_BR:
3966                         //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3967                         //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3968                         //break;
3969                         if (ins->flags & MONO_INST_BRLABEL) {
3970                                 if (ins->inst_i0->inst_c0) {
3971                                         amd64_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3972                                 } else {
3973                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3974                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3975                                             x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
3976                                                 x86_jump8 (code, 0);
3977                                         else 
3978                                                 x86_jump32 (code, 0);
3979                                 }
3980                         } else {
3981                                 if (ins->inst_target_bb->native_offset) {
3982                                         amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); 
3983                                 } else {
3984                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3985                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3986                                             x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
3987                                                 x86_jump8 (code, 0);
3988                                         else 
3989                                                 x86_jump32 (code, 0);
3990                                 } 
3991                         }
3992                         break;
3993                 case OP_BR_REG:
3994                         amd64_jump_reg (code, ins->sreg1);
3995                         break;
3996                 case OP_CEQ:
3997                 case OP_ICEQ:
3998                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3999                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4000                         break;
4001                 case OP_CLT:
4002                 case OP_ICLT:
4003                         amd64_set_reg (code, X86_CC_LT, ins->dreg, TRUE);
4004                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4005                         break;
4006                 case OP_CLT_UN:
4007                 case OP_ICLT_UN:
4008                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4009                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4010                         break;
4011                 case OP_CGT:
4012                 case OP_ICGT:
4013                         amd64_set_reg (code, X86_CC_GT, ins->dreg, TRUE);
4014                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4015                         break;
4016                 case OP_CGT_UN:
4017                 case OP_ICGT_UN:
4018                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4019                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4020                         break;
4021                 case OP_COND_EXC_EQ:
4022                 case OP_COND_EXC_NE_UN:
4023                 case OP_COND_EXC_LT:
4024                 case OP_COND_EXC_LT_UN:
4025                 case OP_COND_EXC_GT:
4026                 case OP_COND_EXC_GT_UN:
4027                 case OP_COND_EXC_GE:
4028                 case OP_COND_EXC_GE_UN:
4029                 case OP_COND_EXC_LE:
4030                 case OP_COND_EXC_LE_UN:
4031                 case OP_COND_EXC_OV:
4032                 case OP_COND_EXC_NO:
4033                 case OP_COND_EXC_C:
4034                 case OP_COND_EXC_NC:
4035                         EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], 
4036                                                     (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
4037                         break;
4038                 case CEE_BEQ:
4039                 case CEE_BNE_UN:
4040                 case CEE_BLT:
4041                 case CEE_BLT_UN:
4042                 case CEE_BGT:
4043                 case CEE_BGT_UN:
4044                 case CEE_BGE:
4045                 case CEE_BGE_UN:
4046                 case CEE_BLE:
4047                 case CEE_BLE_UN:
4048                         EMIT_COND_BRANCH (ins, branch_cc_table [ins->opcode - CEE_BEQ], (ins->opcode < CEE_BNE_UN));
4049                         break;
4050
4051                 /* floating point opcodes */
4052                 case OP_R8CONST: {
4053                         double d = *(double *)ins->inst_p0;
4054
4055                         if (use_sse2) {
4056                                 if ((d == 0.0) && (mono_signbit (d) == 0)) {
4057                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
4058                                 }
4059                                 else {
4060                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
4061                                         amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
4062                                 }
4063                         }
4064                         else if ((d == 0.0) && (mono_signbit (d) == 0)) {
4065                                 amd64_fldz (code);
4066                         } else if (d == 1.0) {
4067                                 x86_fld1 (code);
4068                         } else {
4069                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
4070                                 amd64_fld_membase (code, AMD64_RIP, 0, TRUE);
4071                         }
4072                         break;
4073                 }
4074                 case OP_R4CONST: {
4075                         float f = *(float *)ins->inst_p0;
4076
4077                         if (use_sse2) {
4078                                 if ((f == 0.0) && (mono_signbit (f) == 0)) {
4079                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
4080                                 }
4081                                 else {
4082                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
4083                                         amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0);
4084                                         amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
4085                                 }
4086                         }
4087                         else if ((f == 0.0) && (mono_signbit (f) == 0)) {
4088                                 amd64_fldz (code);
4089                         } else if (f == 1.0) {
4090                                 x86_fld1 (code);
4091                         } else {
4092                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
4093                                 amd64_fld_membase (code, AMD64_RIP, 0, FALSE);
4094                         }
4095                         break;
4096                 }
4097                 case OP_STORER8_MEMBASE_REG:
4098                         if (use_sse2)
4099                                 amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
4100                         else
4101                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
4102                         break;
4103                 case OP_LOADR8_SPILL_MEMBASE:
4104                         if (use_sse2)
4105                                 g_assert_not_reached ();
4106                         amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4107                         amd64_fxch (code, 1);
4108                         break;
4109                 case OP_LOADR8_MEMBASE:
4110                         if (use_sse2)
4111                                 amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4112                         else
4113                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4114                         break;
4115                 case OP_STORER4_MEMBASE_REG:
4116                         if (use_sse2) {
4117                                 /* This requires a double->single conversion */
4118                                 amd64_sse_cvtsd2ss_reg_reg (code, AMD64_XMM15, ins->sreg1);
4119                                 amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, AMD64_XMM15);
4120                         }
4121                         else
4122                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
4123                         break;
4124                 case OP_LOADR4_MEMBASE:
4125                         if (use_sse2) {
4126                                 amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4127                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
4128                         }
4129                         else
4130                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4131                         break;
4132                 case CEE_CONV_R4: /* FIXME: change precision */
4133                 case CEE_CONV_R8:
4134                         if (use_sse2)
4135                                 amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
4136                         else {
4137                                 amd64_push_reg (code, ins->sreg1);
4138                                 amd64_fild_membase (code, AMD64_RSP, 0, FALSE);
4139                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4140                         }
4141                         break;
4142                 case CEE_CONV_R_UN:
4143                         /* Emulated */
4144                         g_assert_not_reached ();
4145                         break;
4146                 case OP_LCONV_TO_R4: /* FIXME: change precision */
4147                 case OP_LCONV_TO_R8:
4148                         if (use_sse2)
4149                                 amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
4150                         else {
4151                                 amd64_push_reg (code, ins->sreg1);
4152                                 amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4153                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4154                         }
4155                         break;
4156                 case OP_X86_FP_LOAD_I8:
4157                         if (use_sse2)
4158                                 g_assert_not_reached ();
4159                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4160                         break;
4161                 case OP_X86_FP_LOAD_I4:
4162                         if (use_sse2)
4163                                 g_assert_not_reached ();
4164                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4165                         break;
4166                 case OP_FCONV_TO_I1:
4167                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4168                         break;
4169                 case OP_FCONV_TO_U1:
4170                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4171                         break;
4172                 case OP_FCONV_TO_I2:
4173                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4174                         break;
4175                 case OP_FCONV_TO_U2:
4176                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4177                         break;
4178                 case OP_FCONV_TO_I4:
4179                 case OP_FCONV_TO_I:
4180                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4181                         break;
4182                 case OP_FCONV_TO_I8:
4183                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
4184                         break;
4185                 case OP_LCONV_TO_R_UN: { 
4186                         static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
4187                         guint8 *br;
4188
4189                         if (use_sse2)
4190                                 g_assert_not_reached ();
4191
4192                         /* load 64bit integer to FP stack */
4193                         amd64_push_imm (code, 0);
4194                         amd64_push_reg (code, ins->sreg2);
4195                         amd64_push_reg (code, ins->sreg1);
4196                         amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4197                         /* store as 80bit FP value */
4198                         x86_fst80_membase (code, AMD64_RSP, 0);
4199                         
4200                         /* test if lreg is negative */
4201                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4202                         br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
4203         
4204                         /* add correction constant mn */
4205                         x86_fld80_mem (code, mn);
4206                         x86_fld80_membase (code, AMD64_RSP, 0);
4207                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4208                         x86_fst80_membase (code, AMD64_RSP, 0);
4209
4210                         amd64_patch (br, code);
4211
4212                         x86_fld80_membase (code, AMD64_RSP, 0);
4213                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 12);
4214
4215                         break;
4216                 }
4217                 case OP_LCONV_TO_OVF_I: {
4218                         guint8 *br [3], *label [1];
4219
4220                         if (use_sse2)
4221                                 g_assert_not_reached ();
4222
4223                         /* 
4224                          * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4225                          */
4226                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
4227
4228                         /* If the low word top bit is set, see if we are negative */
4229                         br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
4230                         /* We are not negative (no top bit set, check for our top word to be zero */
4231                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4232                         br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
4233                         label [0] = code;
4234
4235                         /* throw exception */
4236                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
4237                         x86_jump32 (code, 0);
4238         
4239                         amd64_patch (br [0], code);
4240                         /* our top bit is set, check that top word is 0xfffffff */
4241                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
4242                 
4243                         amd64_patch (br [1], code);
4244                         /* nope, emit exception */
4245                         br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
4246                         amd64_patch (br [2], label [0]);
4247
4248                         if (ins->dreg != ins->sreg1)
4249                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
4250                         break;
4251                 }
4252                 case CEE_CONV_OVF_U4:
4253                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0);
4254                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException");
4255                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4256                         break;
4257                 case CEE_CONV_OVF_I4_UN:
4258                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff);
4259                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException");
4260                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4261                         break;
4262                 case OP_FMOVE:
4263                         if (use_sse2 && (ins->dreg != ins->sreg1))
4264                                 amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
4265                         break;
4266                 case OP_FADD:
4267                         if (use_sse2)
4268                                 amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2);
4269                         else
4270                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4271                         break;
4272                 case OP_FSUB:
4273                         if (use_sse2)
4274                                 amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2);
4275                         else
4276                                 amd64_fp_op_reg (code, X86_FSUB, 1, TRUE);
4277                         break;          
4278                 case OP_FMUL:
4279                         if (use_sse2)
4280                                 amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2);
4281                         else
4282                                 amd64_fp_op_reg (code, X86_FMUL, 1, TRUE);
4283                         break;          
4284                 case OP_FDIV:
4285                         if (use_sse2)
4286                                 amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2);
4287                         else
4288                                 amd64_fp_op_reg (code, X86_FDIV, 1, TRUE);
4289                         break;          
4290                 case OP_FNEG:
4291                         if (use_sse2) {
4292                                 amd64_mov_reg_imm_size (code, AMD64_R11, 0x8000000000000000, 8);
4293                                 amd64_push_reg (code, AMD64_R11);
4294                                 amd64_push_reg (code, AMD64_R11);
4295                                 amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RSP, 0);
4296                         }
4297                         else
4298                                 amd64_fchs (code);
4299                         break;          
4300                 case OP_SIN:
4301                         if (use_sse2)
4302                                 g_assert_not_reached ();
4303                         amd64_fsin (code);
4304                         amd64_fldz (code);
4305                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4306                         break;          
4307                 case OP_COS:
4308                         if (use_sse2)
4309                                 g_assert_not_reached ();
4310                         amd64_fcos (code);
4311                         amd64_fldz (code);
4312                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4313                         break;          
4314                 case OP_ABS:
4315                         if (use_sse2)
4316                                 g_assert_not_reached ();
4317                         amd64_fabs (code);
4318                         break;          
4319                 case OP_TAN: {
4320                         /* 
4321                          * it really doesn't make sense to inline all this code,
4322                          * it's here just to show that things may not be as simple 
4323                          * as they appear.
4324                          */
4325                         guchar *check_pos, *end_tan, *pop_jump;
4326                         if (use_sse2)
4327                                 g_assert_not_reached ();
4328                         amd64_push_reg (code, AMD64_RAX);
4329                         amd64_fptan (code);
4330                         amd64_fnstsw (code);
4331                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4332                         check_pos = code;
4333                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4334                         amd64_fstp (code, 0); /* pop the 1.0 */
4335                         end_tan = code;
4336                         x86_jump8 (code, 0);
4337                         amd64_fldpi (code);
4338                         amd64_fp_op (code, X86_FADD, 0);
4339                         amd64_fxch (code, 1);
4340                         x86_fprem1 (code);
4341                         amd64_fstsw (code);
4342                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4343                         pop_jump = code;
4344                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4345                         amd64_fstp (code, 1);
4346                         amd64_fptan (code);
4347                         amd64_patch (pop_jump, code);
4348                         amd64_fstp (code, 0); /* pop the 1.0 */
4349                         amd64_patch (check_pos, code);
4350                         amd64_patch (end_tan, code);
4351                         amd64_fldz (code);
4352                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4353                         amd64_pop_reg (code, AMD64_RAX);
4354                         break;
4355                 }
4356                 case OP_ATAN:
4357                         if (use_sse2)
4358                                 g_assert_not_reached ();
4359                         x86_fld1 (code);
4360                         amd64_fpatan (code);
4361                         amd64_fldz (code);
4362                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4363                         break;          
4364                 case OP_SQRT:
4365                         if (use_sse2)
4366                                 g_assert_not_reached ();
4367                         amd64_fsqrt (code);
4368                         break;          
4369                 case OP_X86_FPOP:
4370                         if (!use_sse2)
4371                                 amd64_fstp (code, 0);
4372                         break;          
4373                 case OP_FREM: {
4374                         guint8 *l1, *l2;
4375
4376                         if (use_sse2)
4377                                 g_assert_not_reached ();
4378                         amd64_push_reg (code, AMD64_RAX);
4379                         /* we need to exchange ST(0) with ST(1) */
4380                         amd64_fxch (code, 1);
4381
4382                         /* this requires a loop, because fprem somtimes 
4383                          * returns a partial remainder */
4384                         l1 = code;
4385                         /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
4386                         /* x86_fprem1 (code); */
4387                         amd64_fprem (code);
4388                         amd64_fnstsw (code);
4389                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_C2);
4390                         l2 = code + 2;
4391                         x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
4392
4393                         /* pop result */
4394                         amd64_fstp (code, 1);
4395
4396                         amd64_pop_reg (code, AMD64_RAX);
4397                         break;
4398                 }
4399                 case OP_FCOMPARE:
4400                         if (use_sse2) {
4401                                 amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4402                                 break;
4403                         }
4404                         if (cfg->opt & MONO_OPT_FCMOV) {
4405                                 amd64_fcomip (code, 1);
4406                                 amd64_fstp (code, 0);
4407                                 break;
4408                         }
4409                         /* this overwrites EAX */
4410                         EMIT_FPCOMPARE(code);
4411                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4412                         break;
4413                 case OP_FCEQ:
4414                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4415                                 /* zeroing the register at the start results in 
4416                                  * shorter and faster code (we can also remove the widening op)
4417                                  */
4418                                 guchar *unordered_check;
4419                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4420                                 
4421                                 if (use_sse2)
4422                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4423                                 else {
4424                                         amd64_fcomip (code, 1);
4425                                         amd64_fstp (code, 0);
4426                                 }
4427                                 unordered_check = code;
4428                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4429                                 amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
4430                                 amd64_patch (unordered_check, code);
4431                                 break;
4432                         }
4433                         if (ins->dreg != AMD64_RAX) 
4434                                 amd64_push_reg (code, AMD64_RAX);
4435
4436                         EMIT_FPCOMPARE(code);
4437                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4438                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4439                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4440                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4441
4442                         if (ins->dreg != AMD64_RAX) 
4443                                 amd64_pop_reg (code, AMD64_RAX);
4444                         break;
4445                 case OP_FCLT:
4446                 case OP_FCLT_UN:
4447                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4448                                 /* zeroing the register at the start results in 
4449                                  * shorter and faster code (we can also remove the widening op)
4450                                  */
4451                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4452                                 if (use_sse2)
4453                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4454                                 else {
4455                                         amd64_fcomip (code, 1);
4456                                         amd64_fstp (code, 0);
4457                                 }
4458                                 if (ins->opcode == OP_FCLT_UN) {
4459                                         guchar *unordered_check = code;
4460                                         guchar *jump_to_end;
4461                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4462                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4463                                         jump_to_end = code;
4464                                         x86_jump8 (code, 0);
4465                                         amd64_patch (unordered_check, code);
4466                                         amd64_inc_reg (code, ins->dreg);
4467                                         amd64_patch (jump_to_end, code);
4468                                 } else {
4469                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4470                                 }
4471                                 break;
4472                         }
4473                         if (ins->dreg != AMD64_RAX) 
4474                                 amd64_push_reg (code, AMD64_RAX);
4475
4476                         EMIT_FPCOMPARE(code);
4477                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4478                         if (ins->opcode == OP_FCLT_UN) {
4479                                 guchar *is_not_zero_check, *end_jump;
4480                                 is_not_zero_check = code;
4481                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4482                                 end_jump = code;
4483                                 x86_jump8 (code, 0);
4484                                 amd64_patch (is_not_zero_check, code);
4485                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4486
4487                                 amd64_patch (end_jump, code);
4488                         }
4489                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4490                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4491
4492                         if (ins->dreg != AMD64_RAX) 
4493                                 amd64_pop_reg (code, AMD64_RAX);
4494                         break;
4495                 case OP_FCGT:
4496                 case OP_FCGT_UN:
4497                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4498                                 /* zeroing the register at the start results in 
4499                                  * shorter and faster code (we can also remove the widening op)
4500                                  */
4501                                 guchar *unordered_check;
4502                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4503                                 if (use_sse2)
4504                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4505                                 else {
4506                                         amd64_fcomip (code, 1);
4507                                         amd64_fstp (code, 0);
4508                                 }
4509                                 if (ins->opcode == OP_FCGT) {
4510                                         unordered_check = code;
4511                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4512                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4513                                         amd64_patch (unordered_check, code);
4514                                 } else {
4515                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4516                                 }
4517                                 break;
4518                         }
4519                         if (ins->dreg != AMD64_RAX) 
4520                                 amd64_push_reg (code, AMD64_RAX);
4521
4522                         EMIT_FPCOMPARE(code);
4523                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4524                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4525                         if (ins->opcode == OP_FCGT_UN) {
4526                                 guchar *is_not_zero_check, *end_jump;
4527                                 is_not_zero_check = code;
4528                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4529                                 end_jump = code;
4530                                 x86_jump8 (code, 0);
4531                                 amd64_patch (is_not_zero_check, code);
4532                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4533
4534                                 amd64_patch (end_jump, code);
4535                         }
4536                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4537                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4538
4539                         if (ins->dreg != AMD64_RAX) 
4540                                 amd64_pop_reg (code, AMD64_RAX);
4541                         break;
4542                 case OP_FBEQ:
4543                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4544                                 guchar *jump = code;
4545                                 x86_branch8 (code, X86_CC_P, 0, TRUE);
4546                                 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4547                                 amd64_patch (jump, code);
4548                                 break;
4549                         }
4550                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4551                         EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
4552                         break;
4553                 case OP_FBNE_UN:
4554                         /* Branch if C013 != 100 */
4555                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4556                                 /* branch if !ZF or (PF|CF) */
4557                                 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4558                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4559                                 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
4560                                 break;
4561                         }
4562                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4563                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4564                         break;
4565                 case OP_FBLT:
4566                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4567                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4568                                 break;
4569                         }
4570                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4571                         break;
4572                 case OP_FBLT_UN:
4573                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4574                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4575                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4576                                 break;
4577                         }
4578                         if (ins->opcode == OP_FBLT_UN) {
4579                                 guchar *is_not_zero_check, *end_jump;
4580                                 is_not_zero_check = code;
4581                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4582                                 end_jump = code;
4583                                 x86_jump8 (code, 0);
4584                                 amd64_patch (is_not_zero_check, code);
4585                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4586
4587                                 amd64_patch (end_jump, code);
4588                         }
4589                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4590                         break;
4591                 case OP_FBGT:
4592                 case OP_FBGT_UN:
4593                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4594                                 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
4595                                 break;
4596                         }
4597                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4598                         if (ins->opcode == OP_FBGT_UN) {
4599                                 guchar *is_not_zero_check, *end_jump;
4600                                 is_not_zero_check = code;
4601                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4602                                 end_jump = code;
4603                                 x86_jump8 (code, 0);
4604                                 amd64_patch (is_not_zero_check, code);
4605                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4606
4607                                 amd64_patch (end_jump, code);
4608                         }
4609                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4610                         break;
4611                 case OP_FBGE:
4612                         /* Branch if C013 == 100 or 001 */
4613                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4614                                 guchar *br1;
4615
4616                                 /* skip branch if C1=1 */
4617                                 br1 = code;
4618                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4619                                 /* branch if (C0 | C3) = 1 */
4620                                 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
4621                                 amd64_patch (br1, code);
4622                                 break;
4623                         }
4624                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4625                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4626                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4627                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4628                         break;
4629                 case OP_FBGE_UN:
4630                         /* Branch if C013 == 000 */
4631                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4632                                 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
4633                                 break;
4634                         }
4635                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4636                         break;
4637                 case OP_FBLE:
4638                         /* Branch if C013=000 or 100 */
4639                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4640                                 guchar *br1;
4641
4642                                 /* skip branch if C1=1 */
4643                                 br1 = code;
4644                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4645                                 /* branch if C0=0 */
4646                                 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
4647                                 amd64_patch (br1, code);
4648                                 break;
4649                         }
4650                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, (X86_FP_C0|X86_FP_C1));
4651                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4652                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4653                         break;
4654                 case OP_FBLE_UN:
4655                         /* Branch if C013 != 001 */
4656                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4657                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4658                                 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
4659                                 break;
4660                         }
4661                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4662                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4663                         break;
4664                 case CEE_CKFINITE: {
4665                         if (use_sse2) {
4666                                 /* Transfer value to the fp stack */
4667                                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
4668                                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
4669                                 amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
4670                         }
4671                         amd64_push_reg (code, AMD64_RAX);
4672                         amd64_fxam (code);
4673                         amd64_fnstsw (code);
4674                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100);
4675                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4676                         amd64_pop_reg (code, AMD64_RAX);
4677                         if (use_sse2) {
4678                                 amd64_fstp (code, 0);
4679                         }                               
4680                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
4681                         break;
4682                 }
4683                 case OP_TLS_GET: {
4684                         x86_prefix (code, X86_FS_PREFIX);
4685                         amd64_mov_reg_mem (code, ins->dreg, ins->inst_offset, 8);
4686                         break;
4687                 }
4688                 case OP_ATOMIC_ADD_I4:
4689                 case OP_ATOMIC_ADD_I8: {
4690                         int dreg = ins->dreg;
4691                         guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
4692
4693                         if (dreg == ins->inst_basereg)
4694                                 dreg = AMD64_R11;
4695                         
4696                         if (dreg != ins->sreg2)
4697                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg2, size);
4698
4699                         x86_prefix (code, X86_LOCK_PREFIX);
4700                         amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
4701
4702                         if (dreg != ins->dreg)
4703                                 amd64_mov_reg_reg (code, ins->dreg, dreg, size);
4704
4705                         break;
4706                 }
4707                 case OP_ATOMIC_ADD_NEW_I4:
4708                 case OP_ATOMIC_ADD_NEW_I8: {
4709                         int dreg = ins->dreg;
4710                         guint32 size = (ins->opcode == OP_ATOMIC_ADD_NEW_I4) ? 4 : 8;
4711
4712                         if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
4713                                 dreg = AMD64_R11;
4714
4715                         amd64_mov_reg_reg (code, dreg, ins->sreg2, size);
4716                         amd64_prefix (code, X86_LOCK_PREFIX);
4717                         amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
4718                         /* dreg contains the old value, add with sreg2 value */
4719                         amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size);
4720                         
4721                         if (ins->dreg != dreg)
4722                                 amd64_mov_reg_reg (code, ins->dreg, dreg, size);
4723
4724                         break;
4725                 }
4726                 case OP_ATOMIC_EXCHANGE_I4:
4727                 case OP_ATOMIC_EXCHANGE_I8: {
4728                         guchar *br[2];
4729                         int sreg2 = ins->sreg2;
4730                         int breg = ins->inst_basereg;
4731                         guint32 size = (ins->opcode == OP_ATOMIC_EXCHANGE_I4) ? 4 : 8;
4732
4733                         /* 
4734                          * See http://msdn.microsoft.com/msdnmag/issues/0700/Win32/ for
4735                          * an explanation of how this works.
4736                          */
4737
4738                         /* cmpxchg uses eax as comperand, need to make sure we can use it
4739                          * hack to overcome limits in x86 reg allocator 
4740                          * (req: dreg == eax and sreg2 != eax and breg != eax) 
4741                          */
4742                         if (ins->dreg != AMD64_RAX)
4743                                 amd64_push_reg (code, AMD64_RAX);
4744                         
4745                         /* We need the EAX reg for the cmpxchg */
4746                         if (ins->sreg2 == AMD64_RAX) {
4747                                 amd64_push_reg (code, AMD64_RDX);
4748                                 amd64_mov_reg_reg (code, AMD64_RDX, AMD64_RAX, size);
4749                                 sreg2 = AMD64_RDX;
4750                         }
4751
4752                         if (breg == AMD64_RAX) {
4753                                 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, size);
4754                                 breg = AMD64_R11;
4755                         }
4756
4757                         amd64_mov_reg_membase (code, AMD64_RAX, breg, ins->inst_offset, size);
4758
4759                         br [0] = code; amd64_prefix (code, X86_LOCK_PREFIX);
4760                         amd64_cmpxchg_membase_reg_size (code, breg, ins->inst_offset, sreg2, size);
4761                         br [1] = code; amd64_branch8 (code, X86_CC_NE, -1, FALSE);
4762                         amd64_patch (br [1], br [0]);
4763
4764                         if (ins->dreg != AMD64_RAX) {
4765                                 amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
4766                                 amd64_pop_reg (code, AMD64_RAX);
4767                         }
4768
4769                         if (ins->sreg2 != sreg2)
4770                                 amd64_pop_reg (code, AMD64_RDX);
4771
4772                         break;
4773                 }
4774                 default:
4775                         g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4776                         g_assert_not_reached ();
4777                 }
4778
4779                 if ((code - cfg->native_code - offset) > max_len) {
4780                         g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4781                                    mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4782                         g_assert_not_reached ();
4783                 }
4784                
4785                 cpos += max_len;
4786
4787                 last_ins = ins;
4788                 last_offset = offset;
4789                 
4790                 ins = ins->next;
4791         }
4792
4793         cfg->code_len = code - cfg->native_code;
4794 }
4795
4796 void
4797 mono_arch_register_lowlevel_calls (void)
4798 {
4799 }
4800
4801 void
4802 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4803 {
4804         MonoJumpInfo *patch_info;
4805
4806         for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4807                 unsigned char *ip = patch_info->ip.i + code;
4808                 const unsigned char *target;
4809
4810                 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4811
4812                 if (mono_compile_aot) {
4813                         switch (patch_info->type) {
4814                         case MONO_PATCH_INFO_BB:
4815                         case MONO_PATCH_INFO_LABEL:
4816                                 break;
4817                         default: {
4818                                 /* Just to make code run at aot time work */
4819                                 const unsigned char **tmp;
4820
4821                                 mono_domain_lock (domain);
4822                                 tmp = mono_code_manager_reserve (domain->code_mp, sizeof (gpointer));
4823                                 mono_domain_unlock (domain);
4824
4825                                 *tmp = target;
4826                                 target = (const unsigned char*)(guint64)((guint8*)tmp - (guint8*)ip);
4827                                 break;
4828                         }
4829                         }
4830                 }
4831
4832                 switch (patch_info->type) {
4833                 case MONO_PATCH_INFO_NONE:
4834                         continue;
4835                 case MONO_PATCH_INFO_CLASS_INIT: {
4836                         /* Might already been changed to a nop */
4837                         guint8* ip2 = ip;
4838                         if (mono_compile_aot)
4839                                 amd64_call_membase (ip2, AMD64_RIP, 0);
4840                         else {
4841                                 amd64_call_code (ip2, 0);
4842                         }
4843                         break;
4844                 }
4845                 case MONO_PATCH_INFO_METHOD_REL:
4846                 case MONO_PATCH_INFO_R8:
4847                 case MONO_PATCH_INFO_R4:
4848                         g_assert_not_reached ();
4849                         continue;
4850                 case MONO_PATCH_INFO_BB:
4851                         break;
4852                 default:
4853                         break;
4854                 }
4855                 amd64_patch (ip, (gpointer)target);
4856         }
4857 }
4858
4859 guint8 *
4860 mono_arch_emit_prolog (MonoCompile *cfg)
4861 {
4862         MonoMethod *method = cfg->method;
4863         MonoBasicBlock *bb;
4864         MonoMethodSignature *sig;
4865         MonoInst *inst;
4866         int alloc_size, pos, max_offset, i, quad;
4867         guint8 *code;
4868         CallInfo *cinfo;
4869
4870         cfg->code_size =  MAX (((MonoMethodNormal *)method)->header->code_size * 4, 512);
4871         code = cfg->native_code = g_malloc (cfg->code_size);
4872
4873         amd64_push_reg (code, AMD64_RBP);
4874         amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
4875
4876         /* Stack alignment check */
4877 #if 0
4878         {
4879                 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8);
4880                 amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf);
4881                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4882                 x86_branch8 (code, X86_CC_EQ, 2, FALSE);
4883                 amd64_breakpoint (code);
4884         }
4885 #endif
4886
4887         alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
4888         pos = 0;
4889
4890         if (method->save_lmf) {
4891                 gint32 lmf_offset;
4892
4893                 pos = ALIGN_TO (pos + sizeof (MonoLMF), 16);
4894
4895                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, pos);
4896
4897                 lmf_offset = - cfg->arch.lmf_offset;
4898
4899                 /* Save ip */
4900                 amd64_lea_membase (code, AMD64_R11, AMD64_RIP, 0);
4901                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
4902                 /* Save fp */
4903                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), AMD64_RBP, 8);
4904                 /* Save method */
4905                 /* FIXME: add a relocation for this */
4906                 if (IS_IMM32 (cfg->method))
4907                         amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), (guint64)cfg->method, 8);
4908                 else {
4909                         amd64_mov_reg_imm (code, AMD64_R11, cfg->method);
4910                         amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
4911                 }
4912                 /* Save callee saved regs */
4913                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
4914                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
4915                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
4916                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
4917                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
4918         } else {
4919
4920                 for (i = 0; i < AMD64_NREG; ++i)
4921                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4922                                 amd64_push_reg (code, i);
4923                                 pos += sizeof (gpointer);
4924                         }
4925         }
4926
4927         alloc_size -= pos;
4928
4929         if (alloc_size) {
4930                 /* See mono_emit_stack_alloc */
4931 #ifdef PLATFORM_WIN32
4932                 guint32 remaining_size = alloc_size;
4933                 while (remaining_size >= 0x1000) {
4934                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
4935                         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
4936                         remaining_size -= 0x1000;
4937                 }
4938                 if (remaining_size)
4939                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
4940 #else
4941                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
4942 #endif
4943         }
4944
4945         /* compute max_offset in order to use short forward jumps */
4946         max_offset = 0;
4947         if (cfg->opt & MONO_OPT_BRANCH) {
4948                 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4949                         MonoInst *ins = bb->code;
4950                         bb->max_offset = max_offset;
4951
4952                         if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4953                                 max_offset += 6;
4954                         /* max alignment for loops */
4955                         if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
4956                                 max_offset += LOOP_ALIGNMENT;
4957
4958                         while (ins) {
4959                                 if (ins->opcode == OP_LABEL)
4960                                         ins->inst_c1 = max_offset;
4961                                 
4962                                 max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
4963                                 ins = ins->next;
4964                         }
4965                 }
4966         }
4967
4968         sig = mono_method_signature (method);
4969         pos = 0;
4970
4971         cinfo = get_call_info (sig, FALSE);
4972
4973         if (sig->ret->type != MONO_TYPE_VOID) {
4974                 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
4975                         /* Save volatile arguments to the stack */
4976                         amd64_mov_membase_reg (code, cfg->ret->inst_basereg, cfg->ret->inst_offset, cinfo->ret.reg, 8);
4977                 }
4978         }
4979
4980         /* Keep this in sync with emit_load_volatile_arguments */
4981         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4982                 ArgInfo *ainfo = cinfo->args + i;
4983                 gint32 stack_offset;
4984                 MonoType *arg_type;
4985                 inst = cfg->varinfo [i];
4986
4987                 if (sig->hasthis && (i == 0))
4988                         arg_type = &mono_defaults.object_class->byval_arg;
4989                 else
4990                         arg_type = sig->params [i - sig->hasthis];
4991
4992                 stack_offset = ainfo->offset + ARGS_OFFSET;
4993
4994                 /* Save volatile arguments to the stack */
4995                 if (inst->opcode != OP_REGVAR) {
4996                         switch (ainfo->storage) {
4997                         case ArgInIReg: {
4998                                 guint32 size = 8;
4999
5000                                 /* FIXME: I1 etc */
5001                                 /*
5002                                 if (stack_offset & 0x1)
5003                                         size = 1;
5004                                 else if (stack_offset & 0x2)
5005                                         size = 2;
5006                                 else if (stack_offset & 0x4)
5007                                         size = 4;
5008                                 else
5009                                         size = 8;
5010                                 */
5011                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg, size);
5012                                 break;
5013                         }
5014                         case ArgInFloatSSEReg:
5015                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
5016                                 break;
5017                         case ArgInDoubleSSEReg:
5018                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
5019                                 break;
5020                         case ArgValuetypeInReg:
5021                                 for (quad = 0; quad < 2; quad ++) {
5022                                         switch (ainfo->pair_storage [quad]) {
5023                                         case ArgInIReg:
5024                                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
5025                                                 break;
5026                                         case ArgInFloatSSEReg:
5027                                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
5028                                                 break;
5029                                         case ArgInDoubleSSEReg:
5030                                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
5031                                                 break;
5032                                         case ArgNone:
5033                                                 break;
5034                                         default:
5035                                                 g_assert_not_reached ();
5036                                         }
5037                                 }
5038                                 break;
5039                         default:
5040                                 break;
5041                         }
5042                 }
5043
5044                 if (inst->opcode == OP_REGVAR) {
5045                         /* Argument allocated to (non-volatile) register */
5046                         switch (ainfo->storage) {
5047                         case ArgInIReg:
5048                                 amd64_mov_reg_reg (code, inst->dreg, ainfo->reg, 8);
5049                                 break;
5050                         case ArgOnStack:
5051                                 amd64_mov_reg_membase (code, inst->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
5052                                 break;
5053                         default:
5054                                 g_assert_not_reached ();
5055                         }
5056                 }
5057         }
5058
5059         if (method->save_lmf) {
5060                 gint32 lmf_offset;
5061
5062                 if (lmf_tls_offset != -1) {
5063                         /* Load lmf quicky using the FS register */
5064                         x86_prefix (code, X86_FS_PREFIX);
5065                         amd64_mov_reg_mem (code, AMD64_RAX, lmf_tls_offset, 8);
5066                 }
5067                 else {
5068                         /* 
5069                          * The call might clobber argument registers, but they are already
5070                          * saved to the stack/global regs.
5071                          */
5072
5073                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
5074                                                                  (gpointer)"mono_get_lmf_addr");                
5075                 }
5076
5077                 lmf_offset = - cfg->arch.lmf_offset;
5078
5079                 /* Save lmf_addr */
5080                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
5081                 /* Save previous_lmf */
5082                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
5083                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
5084                 /* Set new lmf */
5085                 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
5086                 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
5087         }
5088
5089
5090         g_free (cinfo);
5091
5092         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5093                 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5094
5095         cfg->code_len = code - cfg->native_code;
5096
5097         g_assert (cfg->code_len < cfg->code_size);
5098
5099         return code;
5100 }
5101
5102 void
5103 mono_arch_emit_epilog (MonoCompile *cfg)
5104 {
5105         MonoMethod *method = cfg->method;
5106         int quad, pos, i;
5107         guint8 *code;
5108         int max_epilog_size = 16;
5109         CallInfo *cinfo;
5110         
5111         if (cfg->method->save_lmf)
5112                 max_epilog_size += 256;
5113         
5114         if (mono_jit_trace_calls != NULL)
5115                 max_epilog_size += 50;
5116
5117         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5118                 max_epilog_size += 50;
5119
5120         max_epilog_size += (AMD64_NREG * 2);
5121
5122         while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5123                 cfg->code_size *= 2;
5124                 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5125                 mono_jit_stats.code_reallocs++;
5126         }
5127
5128         code = cfg->native_code + cfg->code_len;
5129
5130         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5131                 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5132
5133         /* the code restoring the registers must be kept in sync with CEE_JMP */
5134         pos = 0;
5135         
5136         if (method->save_lmf) {
5137                 gint32 lmf_offset = - cfg->arch.lmf_offset;
5138
5139                 /* Restore previous lmf */
5140                 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
5141                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
5142                 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
5143
5144                 /* Restore caller saved regs */
5145                 if (cfg->used_int_regs & (1 << AMD64_RBX)) {
5146                         amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
5147                 }
5148                 if (cfg->used_int_regs & (1 << AMD64_R12)) {
5149                         amd64_mov_reg_membase (code, AMD64_R12, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
5150                 }
5151                 if (cfg->used_int_regs & (1 << AMD64_R13)) {
5152                         amd64_mov_reg_membase (code, AMD64_R13, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
5153                 }
5154                 if (cfg->used_int_regs & (1 << AMD64_R14)) {
5155                         amd64_mov_reg_membase (code, AMD64_R14, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
5156                 }
5157                 if (cfg->used_int_regs & (1 << AMD64_R15)) {
5158                         amd64_mov_reg_membase (code, AMD64_R15, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
5159                 }
5160         } else {
5161
5162                 for (i = 0; i < AMD64_NREG; ++i)
5163                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
5164                                 pos -= sizeof (gpointer);
5165
5166                 if (pos) {
5167                         if (pos == - sizeof (gpointer)) {
5168                                 /* Only one register, so avoid lea */
5169                                 for (i = AMD64_NREG - 1; i > 0; --i)
5170                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
5171                                                 amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
5172                                         }
5173                         }
5174                         else {
5175                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
5176
5177                                 /* Pop registers in reverse order */
5178                                 for (i = AMD64_NREG - 1; i > 0; --i)
5179                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
5180                                                 amd64_pop_reg (code, i);
5181                                         }
5182                         }
5183                 }
5184         }
5185
5186         /* Load returned vtypes into registers if needed */
5187         cinfo = get_call_info (mono_method_signature (method), FALSE);
5188         if (cinfo->ret.storage == ArgValuetypeInReg) {
5189                 ArgInfo *ainfo = &cinfo->ret;
5190                 MonoInst *inst = cfg->ret;
5191
5192                 for (quad = 0; quad < 2; quad ++) {
5193                         switch (ainfo->pair_storage [quad]) {
5194                         case ArgInIReg:
5195                                 amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), sizeof (gpointer));
5196                                 break;
5197                         case ArgInFloatSSEReg:
5198                                 amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
5199                                 break;
5200                         case ArgInDoubleSSEReg:
5201                                 amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
5202                                 break;
5203                         case ArgNone:
5204                                 break;
5205                         default:
5206                                 g_assert_not_reached ();
5207                         }
5208                 }
5209         }
5210         g_free (cinfo);
5211
5212         amd64_leave (code);
5213         amd64_ret (code);
5214
5215         cfg->code_len = code - cfg->native_code;
5216
5217         g_assert (cfg->code_len < cfg->code_size);
5218
5219 }
5220
5221 void
5222 mono_arch_emit_exceptions (MonoCompile *cfg)
5223 {
5224         MonoJumpInfo *patch_info;
5225         int nthrows, i;
5226         guint8 *code;
5227         MonoClass *exc_classes [16];
5228         guint8 *exc_throw_start [16], *exc_throw_end [16];
5229         guint32 code_size = 0;
5230
5231         /* Compute needed space */
5232         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5233                 if (patch_info->type == MONO_PATCH_INFO_EXC)
5234                         code_size += 40;
5235                 if (patch_info->type == MONO_PATCH_INFO_R8)
5236                         code_size += 8 + 7; /* sizeof (double) + alignment */
5237                 if (patch_info->type == MONO_PATCH_INFO_R4)
5238                         code_size += 4 + 7; /* sizeof (float) + alignment */
5239         }
5240
5241         while (cfg->code_len + code_size > (cfg->code_size - 16)) {
5242                 cfg->code_size *= 2;
5243                 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5244                 mono_jit_stats.code_reallocs++;
5245         }
5246
5247         code = cfg->native_code + cfg->code_len;
5248
5249         /* add code to raise exceptions */
5250         nthrows = 0;
5251         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5252                 switch (patch_info->type) {
5253                 case MONO_PATCH_INFO_EXC: {
5254                         MonoClass *exc_class;
5255                         guint8 *buf, *buf2;
5256                         guint32 throw_ip;
5257
5258                         amd64_patch (patch_info->ip.i + cfg->native_code, code);
5259
5260                         exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5261                         g_assert (exc_class);
5262                         throw_ip = patch_info->ip.i;
5263
5264                         //x86_breakpoint (code);
5265                         /* Find a throw sequence for the same exception class */
5266                         for (i = 0; i < nthrows; ++i)
5267                                 if (exc_classes [i] == exc_class)
5268                                         break;
5269                         if (i < nthrows) {
5270                                 amd64_mov_reg_imm (code, AMD64_RSI, (exc_throw_end [i] - cfg->native_code) - throw_ip);
5271                                 x86_jump_code (code, exc_throw_start [i]);
5272                                 patch_info->type = MONO_PATCH_INFO_NONE;
5273                         }
5274                         else {
5275                                 buf = code;
5276                                 amd64_mov_reg_imm_size (code, AMD64_RSI, 0xf0f0f0f0, 4);
5277                                 buf2 = code;
5278
5279                                 if (nthrows < 16) {
5280                                         exc_classes [nthrows] = exc_class;
5281                                         exc_throw_start [nthrows] = code;
5282                                 }
5283
5284                                 amd64_mov_reg_imm (code, AMD64_RDI, exc_class->type_token);
5285                                 patch_info->data.name = "mono_arch_throw_corlib_exception";
5286                                 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5287                                 patch_info->ip.i = code - cfg->native_code;
5288
5289                                 if (mono_compile_aot)
5290                                         amd64_mov_reg_membase (code, GP_SCRATCH_REG, AMD64_RIP, 0, 8);
5291                                 else
5292                                         amd64_set_reg_template (code, GP_SCRATCH_REG);
5293                                 amd64_call_reg (code, GP_SCRATCH_REG);
5294
5295                                 amd64_mov_reg_imm (buf, AMD64_RSI, (code - cfg->native_code) - throw_ip);
5296                                 while (buf < buf2)
5297                                         x86_nop (buf);
5298
5299                                 if (nthrows < 16) {
5300                                         exc_throw_end [nthrows] = code;
5301                                         nthrows ++;
5302                                 }
5303                         }
5304                         break;
5305                 }
5306                 default:
5307                         /* do nothing */
5308                         break;
5309                 }
5310         }
5311
5312         /* Handle relocations with RIP relative addressing */
5313         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5314                 gboolean remove = FALSE;
5315
5316                 switch (patch_info->type) {
5317                 case MONO_PATCH_INFO_R8: {
5318                         guint8 *pos;
5319
5320                         code = (guint8*)ALIGN_TO (code, 8);
5321
5322                         pos = cfg->native_code + patch_info->ip.i;
5323
5324                         *(double*)code = *(double*)patch_info->data.target;
5325
5326                         if (use_sse2)
5327                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
5328                         else
5329                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
5330                         code += 8;
5331
5332                         remove = TRUE;
5333                         break;
5334                 }
5335                 case MONO_PATCH_INFO_R4: {
5336                         guint8 *pos;
5337
5338                         code = (guint8*)ALIGN_TO (code, 8);
5339
5340                         pos = cfg->native_code + patch_info->ip.i;
5341
5342                         *(float*)code = *(float*)patch_info->data.target;
5343
5344                         if (use_sse2)
5345                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
5346                         else
5347                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
5348                         code += 4;
5349
5350                         remove = TRUE;
5351                         break;
5352                 }
5353                 default:
5354                         break;
5355                 }
5356
5357                 if (remove) {
5358                         if (patch_info == cfg->patch_info)
5359                                 cfg->patch_info = patch_info->next;
5360                         else {
5361                                 MonoJumpInfo *tmp;
5362
5363                                 for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next)
5364                                         ;
5365                                 tmp->next = patch_info->next;
5366                         }
5367                 }
5368         }
5369
5370         cfg->code_len = code - cfg->native_code;
5371
5372         g_assert (cfg->code_len < cfg->code_size);
5373
5374 }
5375
5376 void*
5377 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5378 {
5379         guchar *code = p;
5380         CallInfo *cinfo;
5381         MonoMethodSignature *sig;
5382         MonoInst *inst;
5383         int i, n, stack_area = 0;
5384
5385         /* Keep this in sync with mono_arch_get_argument_info */
5386
5387         if (enable_arguments) {
5388                 /* Allocate a new area on the stack and save arguments there */
5389                 sig = mono_method_signature (cfg->method);
5390
5391                 cinfo = get_call_info (sig, FALSE);
5392
5393                 n = sig->param_count + sig->hasthis;
5394
5395                 stack_area = ALIGN_TO (n * 8, 16);
5396
5397                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_area);
5398
5399                 for (i = 0; i < n; ++i) {
5400                         inst = cfg->varinfo [i];
5401
5402                         if (inst->opcode == OP_REGVAR)
5403                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), inst->dreg, 8);
5404                         else {
5405                                 amd64_mov_reg_membase (code, AMD64_R11, inst->inst_basereg, inst->inst_offset, 8);
5406                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), AMD64_R11, 8);
5407                         }
5408                 }
5409         }
5410
5411         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
5412         amd64_set_reg_template (code, AMD64_RDI);
5413         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RSP, 8);
5414         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5415
5416         if (enable_arguments) {
5417                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, stack_area);
5418
5419                 g_free (cinfo);
5420         }
5421
5422         return code;
5423 }
5424
5425 enum {
5426         SAVE_NONE,
5427         SAVE_STRUCT,
5428         SAVE_EAX,
5429         SAVE_EAX_EDX,
5430         SAVE_XMM
5431 };
5432
5433 void*
5434 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5435 {
5436         guchar *code = p;
5437         int save_mode = SAVE_NONE;
5438         MonoMethod *method = cfg->method;
5439         int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
5440         
5441         switch (rtype) {
5442         case MONO_TYPE_VOID:
5443                 /* special case string .ctor icall */
5444                 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
5445                         save_mode = SAVE_EAX;
5446                 else
5447                         save_mode = SAVE_NONE;
5448                 break;
5449         case MONO_TYPE_I8:
5450         case MONO_TYPE_U8:
5451                 save_mode = SAVE_EAX;
5452                 break;
5453         case MONO_TYPE_R4:
5454         case MONO_TYPE_R8:
5455                 save_mode = SAVE_XMM;
5456                 break;
5457         case MONO_TYPE_VALUETYPE:
5458                 save_mode = SAVE_STRUCT;
5459                 break;
5460         default:
5461                 save_mode = SAVE_EAX;
5462                 break;
5463         }
5464
5465         /* Save the result and copy it into the proper argument register */
5466         switch (save_mode) {
5467         case SAVE_EAX:
5468                 amd64_push_reg (code, AMD64_RAX);
5469                 /* Align stack */
5470                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5471                 if (enable_arguments)
5472                         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RAX, 8);
5473                 break;
5474         case SAVE_STRUCT:
5475                 /* FIXME: */
5476                 if (enable_arguments)
5477                         amd64_mov_reg_imm (code, AMD64_RSI, 0);
5478                 break;
5479         case SAVE_XMM:
5480                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5481                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
5482                 /* Align stack */
5483                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5484                 /* 
5485                  * The result is already in the proper argument register so no copying
5486                  * needed.
5487                  */
5488                 break;
5489         case SAVE_NONE:
5490                 break;
5491         default:
5492                 g_assert_not_reached ();
5493         }
5494
5495         /* Set %al since this is a varargs call */
5496         if (save_mode == SAVE_XMM)
5497                 amd64_mov_reg_imm (code, AMD64_RAX, 1);
5498         else
5499                 amd64_mov_reg_imm (code, AMD64_RAX, 0);
5500
5501         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
5502         amd64_set_reg_template (code, AMD64_RDI);
5503         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5504
5505         /* Restore result */
5506         switch (save_mode) {
5507         case SAVE_EAX:
5508                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5509                 amd64_pop_reg (code, AMD64_RAX);
5510                 break;
5511         case SAVE_STRUCT:
5512                 /* FIXME: */
5513                 break;
5514         case SAVE_XMM:
5515                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5516                 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
5517                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5518                 break;
5519         case SAVE_NONE:
5520                 break;
5521         default:
5522                 g_assert_not_reached ();
5523         }
5524
5525         return code;
5526 }
5527
5528 void
5529 mono_arch_flush_icache (guint8 *code, gint size)
5530 {
5531         /* Not needed */
5532 }
5533
5534 void
5535 mono_arch_flush_register_windows (void)
5536 {
5537 }
5538
5539 gboolean 
5540 mono_arch_is_inst_imm (gint64 imm)
5541 {
5542         return amd64_is_imm32 (imm);
5543 }
5544
5545 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
5546
5547 static int reg_to_ucontext_reg [] = {
5548         REG_RAX, REG_RCX, REG_RDX, REG_RBX, REG_RSP, REG_RBP, REG_RSI, REG_RDI,
5549         REG_R8, REG_R9, REG_R10, REG_R11, REG_R12, REG_R13, REG_R14, REG_R15,
5550         REG_RIP
5551 };
5552
5553 /*
5554  * Determine whenever the trap whose info is in SIGINFO is caused by
5555  * integer overflow.
5556  */
5557 gboolean
5558 mono_arch_is_int_overflow (void *sigctx, void *info)
5559 {
5560         ucontext_t *ctx = (ucontext_t*)sigctx;
5561         guint8* rip;
5562         int reg;
5563
5564         rip = (guint8*)ctx->uc_mcontext.gregs [REG_RIP];
5565
5566         if (IS_REX (rip [0])) {
5567                 reg = amd64_rex_r (rip [0]);
5568                 rip ++;
5569         }
5570         else
5571                 reg = 0;
5572
5573         if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) {
5574                 /* idiv REG */
5575                 reg += x86_modrm_rm (rip [1]);
5576
5577                 if (ctx->uc_mcontext.gregs [reg_to_ucontext_reg [reg]] == -1)
5578                         return TRUE;
5579         }
5580
5581         return FALSE;
5582 }
5583
5584 guint32
5585 mono_arch_get_patch_offset (guint8 *code)
5586 {
5587         return 3;
5588 }
5589
5590 gpointer*
5591 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
5592 {
5593         guint32 reg;
5594         guint32 disp;
5595         guint8 rex = 0;
5596
5597         /* go to the start of the call instruction
5598          *
5599          * address_byte = (m << 6) | (o << 3) | reg
5600          * call opcode: 0xff address_byte displacement
5601          * 0xff m=1,o=2 imm8
5602          * 0xff m=2,o=2 imm32
5603          */
5604         code -= 7;
5605
5606         if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
5607                 /* call *%reg */
5608                 return NULL;
5609         }
5610         else if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
5611                 /* call OFFSET(%rip) */
5612                 return NULL;
5613         }
5614         else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
5615                 /* call *[reg+disp32] */
5616                 if (IS_REX (code [0]))
5617                         rex = code [0];
5618                 reg = amd64_modrm_rm (code [2]);
5619                 disp = *(guint32*)(code + 3);
5620                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5621         }
5622         else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
5623                 /* call *[reg+disp8] */
5624                 if (IS_REX (code [3]))
5625                         rex = code [3];
5626                 reg = amd64_modrm_rm (code [5]);
5627                 disp = *(guint8*)(code + 6);
5628                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5629         }
5630         else if ((code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x0)) {
5631                         /*
5632                          * This is a interface call: should check the above code can't catch it earlier 
5633                          * 8b 40 30   mov    0x30(%eax),%eax
5634                          * ff 10      call   *(%eax)
5635                          */
5636                 if (IS_REX (code [4]))
5637                         rex = code [4];
5638                 reg = amd64_modrm_rm (code [6]);
5639                 disp = 0;
5640         }
5641         else if (code [2] == 0xe8)
5642                 /* call <ADDR> */
5643                 return NULL;
5644         else
5645                 g_assert_not_reached ();
5646
5647         reg += amd64_rex_b (rex);
5648
5649         /* R11 is clobbered by the trampoline code */
5650         g_assert (reg != AMD64_R11);
5651
5652         return (gpointer)(((guint64)(regs [reg])) + disp);
5653 }
5654
5655 gpointer*
5656 mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
5657 {
5658         guint32 reg;
5659         guint32 disp;
5660
5661         code -= 10;
5662
5663         if (IS_REX (code [0]) && (code [1] == 0x8b) && (code [3] == 0x48) && (code [4] == 0x8b) && (code [5] == 0x40) && (code [7] == 0x48) && (code [8] == 0xff) && (code [9] == 0xd0)) {
5664                 /* mov REG, %rax; mov <OFFSET>(%rax), %rax; call *%rax */
5665                 reg = amd64_rex_b (code [0]) + amd64_modrm_rm (code [2]);
5666                 disp = code [6];
5667
5668                 if (reg == AMD64_RAX)
5669                         return NULL;
5670                 else
5671                         return (gpointer*)(((guint64)(regs [reg])) + disp);
5672         }
5673
5674         return NULL;
5675 }
5676
5677 /*
5678  * Support for fast access to the thread-local lmf structure using the GS
5679  * segment register on NPTL + kernel 2.6.x.
5680  */
5681
5682 static gboolean tls_offset_inited = FALSE;
5683
5684 /* code should be simply return <tls var>; */
5685 static int 
5686 read_tls_offset_from_method (void* method)
5687 {
5688         guint8 *code = (guint8*)method;
5689
5690         /* 
5691          * Determine the offset of mono_lfm_addr inside the TLS structures
5692          * by disassembling the function above.
5693          */
5694         /* This is generated by gcc 3.3.2 */
5695         if ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5696                 (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5697                 (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5698                 (code [9] == 0x00) && (code [10] == 0x00) && (code [11] == 0x00) &&
5699                 (code [12] == 0x0) && (code [13] == 0x48) && (code [14] == 0x8b) &&
5700                 (code [15] == 0x80)) {
5701                 return *(gint32*)&(code [16]);
5702         } else if
5703                 /* This is generated by gcc-3.3.2 with -O=2 */
5704                 /* mov fs:0, %rax ; mov <offset>(%rax), %rax ; retq */
5705                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5706                  (code [3] == 0x04) && (code [4] == 0x25) &&
5707                  (code [9] == 0x48) && (code [10] == 0x8b) && (code [11] == 0x80) &&
5708                  (code [16] == 0xc3)) {
5709                         return *(gint32*)&(code [12]);
5710         } else if 
5711                 /* This is generated by gcc-3.4.1 */
5712                 ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5713                  (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5714                  (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5715                  (code [13] == 0xc9) && (code [14] == 0xc3)) {
5716                         return *(gint32*)&(code [9]);
5717         } else if
5718                 /* This is generated by gcc-3.4.1 with -O=2 */
5719                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5720                  (code [3] == 0x04) && (code [4] == 0x25)) {
5721                 return *(gint32*)&(code [5]);
5722         }
5723
5724         return -1;
5725 }
5726
5727 void
5728 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5729 {
5730 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5731         pthread_t self = pthread_self();
5732         pthread_attr_t attr;
5733         void *staddr = NULL;
5734         size_t stsize = 0;
5735         struct sigaltstack sa;
5736 #endif
5737
5738         if (!tls_offset_inited) {
5739                 tls_offset_inited = TRUE;
5740
5741                 lmf_tls_offset = read_tls_offset_from_method (mono_get_lmf_addr);
5742                 appdomain_tls_offset = read_tls_offset_from_method (mono_domain_get);
5743                 //thread_tls_offset = read_tls_offset_from_method (mono_thread_current);
5744         }               
5745
5746 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5747
5748         /* Determine stack boundaries */
5749         if (!mono_running_on_valgrind ()) {
5750 #ifdef HAVE_PTHREAD_GETATTR_NP
5751                 pthread_getattr_np( self, &attr );
5752 #else
5753 #ifdef HAVE_PTHREAD_ATTR_GET_NP
5754                 pthread_attr_get_np( self, &attr );
5755 #elif defined(sun)
5756                 pthread_attr_init( &attr );
5757                 pthread_attr_getstacksize( &attr, &stsize );
5758 #else
5759 #error "Not implemented"
5760 #endif
5761 #endif
5762 #ifndef sun
5763                 pthread_attr_getstack( &attr, &staddr, &stsize );
5764 #endif
5765         }
5766
5767         /* 
5768          * staddr seems to be wrong for the main thread, so we keep the value in
5769          * tls->end_of_stack
5770          */
5771         tls->stack_size = stsize;
5772
5773         /* Setup an alternate signal stack */
5774         tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
5775         tls->signal_stack_size = SIGNAL_STACK_SIZE;
5776
5777         sa.ss_sp = tls->signal_stack;
5778         sa.ss_size = SIGNAL_STACK_SIZE;
5779         sa.ss_flags = SS_ONSTACK;
5780         sigaltstack (&sa, NULL);
5781 #endif
5782 }
5783
5784 void
5785 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5786 {
5787 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5788         struct sigaltstack sa;
5789
5790         sa.ss_sp = tls->signal_stack;
5791         sa.ss_size = SIGNAL_STACK_SIZE;
5792         sa.ss_flags = SS_DISABLE;
5793         sigaltstack  (&sa, NULL);
5794
5795         if (tls->signal_stack)
5796                 g_free (tls->signal_stack);
5797 #endif
5798 }
5799
5800 void
5801 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
5802 {
5803         MonoCallInst *call = (MonoCallInst*)inst;
5804         int out_reg = param_regs [0];
5805         guint64 regpair;
5806
5807         if (vt_reg != -1) {
5808                 CallInfo * cinfo = get_call_info (inst->signature, FALSE);
5809                 MonoInst *vtarg;
5810
5811                 if (cinfo->ret.storage == ArgValuetypeInReg) {
5812                         /*
5813                          * The valuetype is in RAX:RDX after the call, need to be copied to
5814                          * the stack. Push the address here, so the call instruction can
5815                          * access it.
5816                          */
5817                         MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
5818                         vtarg->sreg1 = vt_reg;
5819                         mono_bblock_add_inst (cfg->cbb, vtarg);
5820
5821                         /* Align stack */
5822                         MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
5823                 }
5824                 else {
5825                         MONO_INST_NEW (cfg, vtarg, OP_SETREG);
5826                         vtarg->sreg1 = vt_reg;
5827                         vtarg->dreg = mono_regstate_next_int (cfg->rs);
5828                         mono_bblock_add_inst (cfg->cbb, vtarg);
5829
5830                         regpair = (((guint64)out_reg) << 32) + vtarg->dreg;
5831                         call->out_ireg_args = g_slist_append (call->out_ireg_args, (gpointer)(regpair));
5832
5833                         out_reg = param_regs [1];
5834                 }
5835
5836                 g_free (cinfo);
5837         }
5838
5839         /* add the this argument */
5840         if (this_reg != -1) {
5841                 MonoInst *this;
5842                 MONO_INST_NEW (cfg, this, OP_SETREG);
5843                 this->type = this_type;
5844                 this->sreg1 = this_reg;
5845                 this->dreg = mono_regstate_next_int (cfg->rs);
5846                 mono_bblock_add_inst (cfg->cbb, this);
5847
5848                 regpair = (((guint64)out_reg) << 32) + this->dreg;
5849                 call->out_ireg_args = g_slist_append (call->out_ireg_args, (gpointer)(regpair));
5850         }
5851 }
5852
5853 MonoInst*
5854 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5855 {
5856         MonoInst *ins = NULL;
5857
5858         if (use_sse2)
5859                 return NULL;
5860
5861         if (cmethod->klass == mono_defaults.math_class) {
5862                 if (strcmp (cmethod->name, "Sin") == 0) {
5863                         MONO_INST_NEW (cfg, ins, OP_SIN);
5864                         ins->inst_i0 = args [0];
5865                 } else if (strcmp (cmethod->name, "Cos") == 0) {
5866                         MONO_INST_NEW (cfg, ins, OP_COS);
5867                         ins->inst_i0 = args [0];
5868                 } else if (strcmp (cmethod->name, "Tan") == 0) {
5869                         MONO_INST_NEW (cfg, ins, OP_TAN);
5870                         ins->inst_i0 = args [0];
5871                 } else if (strcmp (cmethod->name, "Atan") == 0) {
5872                         MONO_INST_NEW (cfg, ins, OP_ATAN);
5873                         ins->inst_i0 = args [0];
5874                 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5875                         MONO_INST_NEW (cfg, ins, OP_SQRT);
5876                         ins->inst_i0 = args [0];
5877                 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5878                         MONO_INST_NEW (cfg, ins, OP_ABS);
5879                         ins->inst_i0 = args [0];
5880                 }
5881 #if 0
5882                 /* OP_FREM is not IEEE compatible */
5883                 else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
5884                         MONO_INST_NEW (cfg, ins, OP_FREM);
5885                         ins->inst_i0 = args [0];
5886                         ins->inst_i1 = args [1];
5887                 }
5888 #endif
5889         } else if(cmethod->klass->image == mono_defaults.corlib &&
5890                            (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5891                            (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5892
5893                 if (strcmp (cmethod->name, "Increment") == 0) {
5894                         MonoInst *ins_iconst;
5895                         guint32 opcode;
5896
5897                         if (fsig->params [0]->type == MONO_TYPE_I4)
5898                                 opcode = OP_ATOMIC_ADD_NEW_I4;
5899                         else if (fsig->params [0]->type == MONO_TYPE_I8)
5900                                 opcode = OP_ATOMIC_ADD_NEW_I8;
5901                         else
5902                                 g_assert_not_reached ();
5903                         MONO_INST_NEW (cfg, ins, opcode);
5904                         MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5905                         ins_iconst->inst_c0 = 1;
5906
5907                         ins->inst_i0 = args [0];
5908                         ins->inst_i1 = ins_iconst;
5909                 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5910                         MonoInst *ins_iconst;
5911                         guint32 opcode;
5912
5913                         if (fsig->params [0]->type == MONO_TYPE_I4)
5914                                 opcode = OP_ATOMIC_ADD_NEW_I4;
5915                         else if (fsig->params [0]->type == MONO_TYPE_I8)
5916                                 opcode = OP_ATOMIC_ADD_NEW_I8;
5917                         else
5918                                 g_assert_not_reached ();
5919                         MONO_INST_NEW (cfg, ins, opcode);
5920                         MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5921                         ins_iconst->inst_c0 = -1;
5922
5923                         ins->inst_i0 = args [0];
5924                         ins->inst_i1 = ins_iconst;
5925                 } else if (strcmp (cmethod->name, "Add") == 0) {
5926                         guint32 opcode;
5927
5928                         if (fsig->params [0]->type == MONO_TYPE_I4)
5929                                 opcode = OP_ATOMIC_ADD_I4;
5930                         else if (fsig->params [0]->type == MONO_TYPE_I8)
5931                                 opcode = OP_ATOMIC_ADD_I8;
5932                         else
5933                                 g_assert_not_reached ();
5934                         
5935                         MONO_INST_NEW (cfg, ins, opcode);
5936
5937                         ins->inst_i0 = args [0];
5938                         ins->inst_i1 = args [1];
5939                 } else if (strcmp (cmethod->name, "Exchange") == 0) {
5940                         guint32 opcode;
5941
5942                         if (fsig->params [0]->type == MONO_TYPE_I4)
5943                                 opcode = OP_ATOMIC_EXCHANGE_I4;
5944                         else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
5945                                          (fsig->params [0]->type == MONO_TYPE_I) ||
5946                                          (fsig->params [0]->type == MONO_TYPE_OBJECT))
5947                                 opcode = OP_ATOMIC_EXCHANGE_I8;
5948                         else
5949                                 return NULL;
5950
5951                         MONO_INST_NEW (cfg, ins, opcode);
5952
5953                         ins->inst_i0 = args [0];
5954                         ins->inst_i1 = args [1];
5955                 } else if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5956                         /* 64 bit reads are already atomic */
5957                         MONO_INST_NEW (cfg, ins, CEE_LDIND_I8);
5958                         ins->inst_i0 = args [0];
5959                 }
5960
5961                 /* 
5962                  * Can't implement CompareExchange methods this way since they have
5963                  * three arguments.
5964                  */
5965         }
5966
5967         return ins;
5968 }
5969
5970 gboolean
5971 mono_arch_print_tree (MonoInst *tree, int arity)
5972 {
5973         return 0;
5974 }
5975
5976 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5977 {
5978         MonoInst* ins;
5979         
5980         if (appdomain_tls_offset == -1)
5981                 return NULL;
5982         
5983         MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5984         ins->inst_offset = appdomain_tls_offset;
5985         return ins;
5986 }
5987
5988 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
5989 {
5990         MonoInst* ins;
5991         
5992         if (thread_tls_offset == -1)
5993                 return NULL;
5994         
5995         MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5996         ins->inst_offset = thread_tls_offset;
5997         return ins;
5998 }