Don't run test-318 with gmcs.
[mono.git] / mono / mini / mini-amd64.c
1 /*
2  * mini-amd64.c: AMD64 backend for the Mono code generator
3  *
4  * Based on mini-x86.c.
5  *
6  * Authors:
7  *   Paolo Molaro (lupus@ximian.com)
8  *   Dietmar Maurer (dietmar@ximian.com)
9  *   Patrik Torstensson
10  *
11  * (C) 2003 Ximian, Inc.
12  */
13 #include "mini.h"
14 #include <string.h>
15 #include <math.h>
16
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/metadata/profiler-private.h>
21 #include <mono/utils/mono-math.h>
22
23 #include "trace.h"
24 #include "mini-amd64.h"
25 #include "inssel.h"
26 #include "cpu-amd64.h"
27
28 static gint lmf_tls_offset = -1;
29 static gint appdomain_tls_offset = -1;
30 static gint thread_tls_offset = -1;
31
32 /* Use SSE2 instructions for fp arithmetic */
33 static gboolean use_sse2 = FALSE;
34
35 /* xmm15 is reserved for use by some opcodes */
36 #define AMD64_CALLEE_FREGS 0xef
37
38 #define FPSTACK_SIZE 6
39
40 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
41
42 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
43
44 #ifdef PLATFORM_WIN32
45 /* Under windows, the default pinvoke calling convention is stdcall */
46 #define CALLCONV_IS_STDCALL(call_conv) (((call_conv) == MONO_CALL_STDCALL) || ((call_conv) == MONO_CALL_DEFAULT))
47 #else
48 #define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
49 #endif
50
51 #define SIGNAL_STACK_SIZE (64 * 1024)
52
53 #define ARGS_OFFSET 16
54 #define GP_SCRATCH_REG AMD64_R11
55
56 /*
57  * AMD64 register usage:
58  * - callee saved registers are used for global register allocation
59  * - %r11 is used for materializing 64 bit constants in opcodes
60  * - the rest is used for local allocation
61  */
62
63 /*
64  * FIXME: 
65  * - Use xmm registers instead of the x87 stack
66  * - Allocate arguments to global registers
67  * - implement emulated opcodes
68  * - (all archs) do not store trampoline addresses in method->info since they
69  *   are domain specific.   
70  */
71
72 #define NOT_IMPLEMENTED g_assert_not_reached ()
73
74 const char*
75 mono_arch_regname (int reg) {
76         switch (reg) {
77         case AMD64_RAX: return "%rax";
78         case AMD64_RBX: return "%rbx";
79         case AMD64_RCX: return "%rcx";
80         case AMD64_RDX: return "%rdx";
81         case AMD64_RSP: return "%rsp";  
82         case AMD64_RBP: return "%rbp";
83         case AMD64_RDI: return "%rdi";
84         case AMD64_RSI: return "%rsi";
85         case AMD64_R8: return "%r8";
86         case AMD64_R9: return "%r9";
87         case AMD64_R10: return "%r10";
88         case AMD64_R11: return "%r11";
89         case AMD64_R12: return "%r12";
90         case AMD64_R13: return "%r13";
91         case AMD64_R14: return "%r14";
92         case AMD64_R15: return "%r15";
93         }
94         return "unknown";
95 }
96
97 static const char * xmmregs [] = {
98         "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8",
99         "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
100 };
101
102 static const char*
103 mono_arch_fregname (int reg)
104 {
105         if (reg < AMD64_XMM_NREG)
106                 return xmmregs [reg];
107         else
108                 return "unknown";
109 }
110
111 static const char*
112 mono_amd64_regname (int reg, gboolean fp)
113 {
114         if (fp)
115                 return mono_arch_fregname (reg);
116         else
117                 return mono_arch_regname (reg);
118 }
119
120 static inline void 
121 amd64_patch (unsigned char* code, gpointer target)
122 {
123         /* Skip REX */
124         if ((code [0] >= 0x40) && (code [0] <= 0x4f))
125                 code += 1;
126
127         if (code [0] == 0xbb) {
128                 /* amd64_set_reg_template */
129                 *(guint64*)(code + 1) = (guint64)target;
130         }
131         else
132                 x86_patch (code, (unsigned char*)target);
133 }
134
135 typedef enum {
136         ArgInIReg,
137         ArgInFloatSSEReg,
138         ArgInDoubleSSEReg,
139         ArgOnStack,
140         ArgValuetypeInReg,
141         ArgNone /* only in pair_storage */
142 } ArgStorage;
143
144 typedef struct {
145         gint16 offset;
146         gint8  reg;
147         ArgStorage storage;
148
149         /* Only if storage == ArgValuetypeInReg */
150         ArgStorage pair_storage [2];
151         gint8 pair_regs [2];
152 } ArgInfo;
153
154 typedef struct {
155         int nargs;
156         guint32 stack_usage;
157         guint32 reg_usage;
158         guint32 freg_usage;
159         gboolean need_stack_align;
160         ArgInfo ret;
161         ArgInfo sig_cookie;
162         ArgInfo args [1];
163 } CallInfo;
164
165 #define DEBUG(a) if (cfg->verbose_level > 1) a
166
167 #define NEW_ICONST(cfg,dest,val) do {   \
168                 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst));       \
169                 (dest)->opcode = OP_ICONST;     \
170                 (dest)->inst_c0 = (val);        \
171                 (dest)->type = STACK_I4;        \
172         } while (0)
173
174 #define PARAM_REGS 6
175
176 static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
177
178 static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
179
180 static void inline
181 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
182 {
183     ainfo->offset = *stack_size;
184
185     if (*gr >= PARAM_REGS) {
186                 ainfo->storage = ArgOnStack;
187                 (*stack_size) += sizeof (gpointer);
188     }
189     else {
190                 ainfo->storage = ArgInIReg;
191                 ainfo->reg = param_regs [*gr];
192                 (*gr) ++;
193     }
194 }
195
196 #define FLOAT_PARAM_REGS 8
197
198 static void inline
199 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
200 {
201     ainfo->offset = *stack_size;
202
203     if (*gr >= FLOAT_PARAM_REGS) {
204                 ainfo->storage = ArgOnStack;
205                 (*stack_size) += sizeof (gpointer);
206     }
207     else {
208                 /* A double register */
209                 if (is_double)
210                         ainfo->storage = ArgInDoubleSSEReg;
211                 else
212                         ainfo->storage = ArgInFloatSSEReg;
213                 ainfo->reg = *gr;
214                 (*gr) += 1;
215     }
216 }
217
218 typedef enum ArgumentClass {
219         ARG_CLASS_NO_CLASS,
220         ARG_CLASS_MEMORY,
221         ARG_CLASS_INTEGER,
222         ARG_CLASS_SSE
223 } ArgumentClass;
224
225 static ArgumentClass
226 merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
227 {
228         ArgumentClass class2;
229         MonoType *ptype;
230
231         ptype = mono_type_get_underlying_type (type);
232         switch (ptype->type) {
233         case MONO_TYPE_BOOLEAN:
234         case MONO_TYPE_CHAR:
235         case MONO_TYPE_I1:
236         case MONO_TYPE_U1:
237         case MONO_TYPE_I2:
238         case MONO_TYPE_U2:
239         case MONO_TYPE_I4:
240         case MONO_TYPE_U4:
241         case MONO_TYPE_I:
242         case MONO_TYPE_U:
243         case MONO_TYPE_STRING:
244         case MONO_TYPE_OBJECT:
245         case MONO_TYPE_CLASS:
246         case MONO_TYPE_SZARRAY:
247         case MONO_TYPE_PTR:
248         case MONO_TYPE_FNPTR:
249         case MONO_TYPE_ARRAY:
250         case MONO_TYPE_I8:
251         case MONO_TYPE_U8:
252                 class2 = ARG_CLASS_INTEGER;
253                 break;
254         case MONO_TYPE_R4:
255         case MONO_TYPE_R8:
256                 class2 = ARG_CLASS_SSE;
257                 break;
258
259         case MONO_TYPE_TYPEDBYREF:
260                 g_assert_not_reached ();
261
262         case MONO_TYPE_VALUETYPE: {
263                 MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass);
264                 int i;
265
266                 for (i = 0; i < info->num_fields; ++i) {
267                         class2 = class1;
268                         class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
269                 }
270                 break;
271         }
272         default:
273                 g_assert_not_reached ();
274         }
275
276         /* Merge */
277         if (class1 == class2)
278                 ;
279         else if (class1 == ARG_CLASS_NO_CLASS)
280                 class1 = class2;
281         else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY))
282                 class1 = ARG_CLASS_MEMORY;
283         else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER))
284                 class1 = ARG_CLASS_INTEGER;
285         else
286                 class1 = ARG_CLASS_SSE;
287
288         return class1;
289 }
290
291 static void
292 add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
293                gboolean is_return,
294                guint32 *gr, guint32 *fr, guint32 *stack_size)
295 {
296         guint32 size, quad, nquads, i;
297         ArgumentClass args [2];
298         MonoMarshalType *info;
299         MonoClass *klass;
300
301         klass = mono_class_from_mono_type (type);
302         if (sig->pinvoke) 
303                 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
304         else 
305                 size = mono_type_stack_size (&klass->byval_arg, NULL);
306
307         if (!sig->pinvoke || (size == 0) || (size > 16)) {
308                 /* Allways pass in memory */
309                 ainfo->offset = *stack_size;
310                 *stack_size += ALIGN_TO (size, 8);
311                 ainfo->storage = ArgOnStack;
312
313                 return;
314         }
315
316         /* FIXME: Handle structs smaller than 8 bytes */
317         //if ((size % 8) != 0)
318         //      NOT_IMPLEMENTED;
319
320         if (size > 8)
321                 nquads = 2;
322         else
323                 nquads = 1;
324
325         /*
326          * Implement the algorithm from section 3.2.3 of the X86_64 ABI.
327          * The X87 and SSEUP stuff is left out since there are no such types in
328          * the CLR.
329          */
330         info = mono_marshal_load_type_info (klass);
331         g_assert (info);
332         if (info->native_size > 16) {
333                 ainfo->offset = *stack_size;
334                 *stack_size += ALIGN_TO (info->native_size, 8);
335                 ainfo->storage = ArgOnStack;
336
337                 return;
338         }
339
340         for (quad = 0; quad < nquads; ++quad) {
341                 int size, align;
342                 ArgumentClass class1;
343                 
344                 class1 = ARG_CLASS_NO_CLASS;
345                 for (i = 0; i < info->num_fields; ++i) {
346                         size = mono_marshal_type_size (info->fields [i].field->type, 
347                                                                                    info->fields [i].mspec, 
348                                                                                    &align, TRUE, klass->unicode);
349                         if ((info->fields [i].offset < 8) && (info->fields [i].offset + size) > 8) {
350                                 /* Unaligned field */
351                                 NOT_IMPLEMENTED;
352                         }
353
354                         /* Skip fields in other quad */
355                         if ((quad == 0) && (info->fields [i].offset >= 8))
356                                 continue;
357                         if ((quad == 1) && (info->fields [i].offset < 8))
358                                 continue;
359
360                         class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
361                 }
362                 g_assert (class1 != ARG_CLASS_NO_CLASS);
363                 args [quad] = class1;
364         }
365
366         /* Post merger cleanup */
367         if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY))
368                 args [0] = args [1] = ARG_CLASS_MEMORY;
369
370         /* Allocate registers */
371         {
372                 int orig_gr = *gr;
373                 int orig_fr = *fr;
374
375                 ainfo->storage = ArgValuetypeInReg;
376                 ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
377                 for (quad = 0; quad < nquads; ++quad) {
378                         switch (args [quad]) {
379                         case ARG_CLASS_INTEGER:
380                                 if (*gr >= PARAM_REGS)
381                                         args [quad] = ARG_CLASS_MEMORY;
382                                 else {
383                                         ainfo->pair_storage [quad] = ArgInIReg;
384                                         if (is_return)
385                                                 ainfo->pair_regs [quad] = return_regs [*gr];
386                                         else
387                                                 ainfo->pair_regs [quad] = param_regs [*gr];
388                                         (*gr) ++;
389                                 }
390                                 break;
391                         case ARG_CLASS_SSE:
392                                 if (*fr >= FLOAT_PARAM_REGS)
393                                         args [quad] = ARG_CLASS_MEMORY;
394                                 else {
395                                         ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
396                                         ainfo->pair_regs [quad] = *fr;
397                                         (*fr) ++;
398                                 }
399                                 break;
400                         case ARG_CLASS_MEMORY:
401                                 break;
402                         default:
403                                 g_assert_not_reached ();
404                         }
405                 }
406
407                 if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) {
408                         /* Revert possible register assignments */
409                         *gr = orig_gr;
410                         *fr = orig_fr;
411
412                         ainfo->offset = *stack_size;
413                         *stack_size += ALIGN_TO (info->native_size, 8);
414                         ainfo->storage = ArgOnStack;
415                 }
416         }
417 }
418
419 /*
420  * get_call_info:
421  *
422  *  Obtain information about a call according to the calling convention.
423  * For AMD64, see the "System V ABI, x86-64 Architecture Processor Supplement 
424  * Draft Version 0.23" document for more information.
425  */
426 static CallInfo*
427 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
428 {
429         guint32 i, gr, fr;
430         MonoType *ret_type;
431         int n = sig->hasthis + sig->param_count;
432         guint32 stack_size = 0;
433         CallInfo *cinfo;
434
435         cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
436
437         gr = 0;
438         fr = 0;
439
440         /* return value */
441         {
442                 ret_type = mono_type_get_underlying_type (sig->ret);
443                 switch (ret_type->type) {
444                 case MONO_TYPE_BOOLEAN:
445                 case MONO_TYPE_I1:
446                 case MONO_TYPE_U1:
447                 case MONO_TYPE_I2:
448                 case MONO_TYPE_U2:
449                 case MONO_TYPE_CHAR:
450                 case MONO_TYPE_I4:
451                 case MONO_TYPE_U4:
452                 case MONO_TYPE_I:
453                 case MONO_TYPE_U:
454                 case MONO_TYPE_PTR:
455                 case MONO_TYPE_CLASS:
456                 case MONO_TYPE_OBJECT:
457                 case MONO_TYPE_SZARRAY:
458                 case MONO_TYPE_ARRAY:
459                 case MONO_TYPE_STRING:
460                         cinfo->ret.storage = ArgInIReg;
461                         cinfo->ret.reg = AMD64_RAX;
462                         break;
463                 case MONO_TYPE_U8:
464                 case MONO_TYPE_I8:
465                         cinfo->ret.storage = ArgInIReg;
466                         cinfo->ret.reg = AMD64_RAX;
467                         break;
468                 case MONO_TYPE_R4:
469                         cinfo->ret.storage = ArgInFloatSSEReg;
470                         cinfo->ret.reg = AMD64_XMM0;
471                         break;
472                 case MONO_TYPE_R8:
473                         cinfo->ret.storage = ArgInDoubleSSEReg;
474                         cinfo->ret.reg = AMD64_XMM0;
475                         break;
476                 case MONO_TYPE_VALUETYPE: {
477                         guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
478
479                         add_valuetype (sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
480                         if (cinfo->ret.storage == ArgOnStack)
481                                 /* The caller passes the address where the value is stored */
482                                 add_general (&gr, &stack_size, &cinfo->ret);
483                         break;
484                 }
485                 case MONO_TYPE_TYPEDBYREF:
486                         /* Same as a valuetype with size 24 */
487                         add_general (&gr, &stack_size, &cinfo->ret);
488                         ;
489                         break;
490                 case MONO_TYPE_VOID:
491                         break;
492                 default:
493                         g_error ("Can't handle as return value 0x%x", sig->ret->type);
494                 }
495         }
496
497         /* this */
498         if (sig->hasthis)
499                 add_general (&gr, &stack_size, cinfo->args + 0);
500
501         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
502                 gr = PARAM_REGS;
503                 fr = FLOAT_PARAM_REGS;
504                 
505                 /* Emit the signature cookie just before the implicit arguments */
506                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
507         }
508
509         for (i = 0; i < sig->param_count; ++i) {
510                 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
511                 MonoType *ptype;
512
513                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
514                         /* We allways pass the sig cookie on the stack for simplicity */
515                         /* 
516                          * Prevent implicit arguments + the sig cookie from being passed 
517                          * in registers.
518                          */
519                         gr = PARAM_REGS;
520                         fr = FLOAT_PARAM_REGS;
521
522                         /* Emit the signature cookie just before the implicit arguments */
523                         add_general (&gr, &stack_size, &cinfo->sig_cookie);
524                 }
525
526                 if (sig->params [i]->byref) {
527                         add_general (&gr, &stack_size, ainfo);
528                         continue;
529                 }
530                 ptype = mono_type_get_underlying_type (sig->params [i]);
531                 switch (ptype->type) {
532                 case MONO_TYPE_BOOLEAN:
533                 case MONO_TYPE_I1:
534                 case MONO_TYPE_U1:
535                         add_general (&gr, &stack_size, ainfo);
536                         break;
537                 case MONO_TYPE_I2:
538                 case MONO_TYPE_U2:
539                 case MONO_TYPE_CHAR:
540                         add_general (&gr, &stack_size, ainfo);
541                         break;
542                 case MONO_TYPE_I4:
543                 case MONO_TYPE_U4:
544                         add_general (&gr, &stack_size, ainfo);
545                         break;
546                 case MONO_TYPE_I:
547                 case MONO_TYPE_U:
548                 case MONO_TYPE_PTR:
549                 case MONO_TYPE_CLASS:
550                 case MONO_TYPE_OBJECT:
551                 case MONO_TYPE_STRING:
552                 case MONO_TYPE_SZARRAY:
553                 case MONO_TYPE_ARRAY:
554                         add_general (&gr, &stack_size, ainfo);
555                         break;
556                 case MONO_TYPE_VALUETYPE:
557                         add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
558                         break;
559                 case MONO_TYPE_TYPEDBYREF:
560                         stack_size += sizeof (MonoTypedRef);
561                         ainfo->storage = ArgOnStack;
562                         break;
563                 case MONO_TYPE_U8:
564                 case MONO_TYPE_I8:
565                         add_general (&gr, &stack_size, ainfo);
566                         break;
567                 case MONO_TYPE_R4:
568                         add_float (&fr, &stack_size, ainfo, FALSE);
569                         break;
570                 case MONO_TYPE_R8:
571                         add_float (&fr, &stack_size, ainfo, TRUE);
572                         break;
573                 default:
574                         g_assert_not_reached ();
575                 }
576         }
577
578         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
579                 gr = PARAM_REGS;
580                 fr = FLOAT_PARAM_REGS;
581                 
582                 /* Emit the signature cookie just before the implicit arguments */
583                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
584         }
585
586         if (stack_size & 0x8) {
587                 /* The AMD64 ABI requires each stack frame to be 16 byte aligned */
588                 cinfo->need_stack_align = TRUE;
589                 stack_size += 8;
590         }
591
592         cinfo->stack_usage = stack_size;
593         cinfo->reg_usage = gr;
594         cinfo->freg_usage = fr;
595         return cinfo;
596 }
597
598 /*
599  * mono_arch_get_argument_info:
600  * @csig:  a method signature
601  * @param_count: the number of parameters to consider
602  * @arg_info: an array to store the result infos
603  *
604  * Gathers information on parameters such as size, alignment and
605  * padding. arg_info should be large enought to hold param_count + 1 entries. 
606  *
607  * Returns the size of the argument area on the stack.
608  */
609 int
610 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
611 {
612         int k;
613         CallInfo *cinfo = get_call_info (csig, FALSE);
614         guint32 args_size = cinfo->stack_usage;
615
616         /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
617         if (csig->hasthis) {
618                 arg_info [0].offset = 0;
619         }
620
621         for (k = 0; k < param_count; k++) {
622                 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
623                 /* FIXME: */
624                 arg_info [k + 1].size = 0;
625         }
626
627         g_free (cinfo);
628
629         return args_size;
630 }
631
632 static int 
633 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
634 {
635         return 0;
636 }
637
638 /*
639  * Initialize the cpu to execute managed code.
640  */
641 void
642 mono_arch_cpu_init (void)
643 {
644         guint16 fpcw;
645
646         /* spec compliance requires running with double precision */
647         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
648         fpcw &= ~X86_FPCW_PRECC_MASK;
649         fpcw |= X86_FPCW_PREC_DOUBLE;
650         __asm__  __volatile__ ("fldcw %0\n": : "m" (fpcw));
651         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
652
653         mono_amd64_exceptions_init ();
654         mono_amd64_tramp_init ();
655 }
656
657 /*
658  * This function returns the optimizations supported on this cpu.
659  */
660 guint32
661 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
662 {
663         int eax, ebx, ecx, edx;
664         guint32 opts = 0;
665
666         /* FIXME: AMD64 */
667
668         *exclude_mask = 0;
669         /* Feature Flags function, flags returned in EDX. */
670         if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
671                 if (edx & (1 << 15)) {
672                         opts |= MONO_OPT_CMOV;
673                         if (edx & 1)
674                                 opts |= MONO_OPT_FCMOV;
675                         else
676                                 *exclude_mask |= MONO_OPT_FCMOV;
677                 } else
678                         *exclude_mask |= MONO_OPT_CMOV;
679         }
680         return opts;
681 }
682
683 static gboolean
684 is_regsize_var (MonoType *t) {
685         if (t->byref)
686                 return TRUE;
687         t = mono_type_get_underlying_type (t);
688         switch (t->type) {
689         case MONO_TYPE_I4:
690         case MONO_TYPE_U4:
691         case MONO_TYPE_I:
692         case MONO_TYPE_U:
693         case MONO_TYPE_PTR:
694                 return TRUE;
695         case MONO_TYPE_OBJECT:
696         case MONO_TYPE_STRING:
697         case MONO_TYPE_CLASS:
698         case MONO_TYPE_SZARRAY:
699         case MONO_TYPE_ARRAY:
700                 return TRUE;
701         case MONO_TYPE_VALUETYPE:
702                 return FALSE;
703         }
704         return FALSE;
705 }
706
707 GList *
708 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
709 {
710         GList *vars = NULL;
711         int i;
712
713         for (i = 0; i < cfg->num_varinfo; i++) {
714                 MonoInst *ins = cfg->varinfo [i];
715                 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
716
717                 /* unused vars */
718                 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
719                         continue;
720
721                 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || 
722                     (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
723                         continue;
724
725                 /* we dont allocate I1 to registers because there is no simply way to sign extend 
726                  * 8bit quantities in caller saved registers on x86 */
727                 if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) || 
728                     (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
729                     (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
730                         g_assert (MONO_VARINFO (cfg, i)->reg == -1);
731                         g_assert (i == vmv->idx);
732                         vars = g_list_prepend (vars, vmv);
733                 }
734         }
735
736         vars = mono_varlist_sort (cfg, vars, 0);
737
738         return vars;
739 }
740
741 GList *
742 mono_arch_get_global_int_regs (MonoCompile *cfg)
743 {
744         GList *regs = NULL;
745
746         /* We use the callee saved registers for global allocation */
747         regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
748         regs = g_list_prepend (regs, (gpointer)AMD64_R12);
749         regs = g_list_prepend (regs, (gpointer)AMD64_R13);
750         regs = g_list_prepend (regs, (gpointer)AMD64_R14);
751         regs = g_list_prepend (regs, (gpointer)AMD64_R15);
752
753         return regs;
754 }
755
756 /*
757  * mono_arch_regalloc_cost:
758  *
759  *  Return the cost, in number of memory references, of the action of 
760  * allocating the variable VMV into a register during global register
761  * allocation.
762  */
763 guint32
764 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
765 {
766         MonoInst *ins = cfg->varinfo [vmv->idx];
767
768         if (cfg->method->save_lmf)
769                 /* The register is already saved */
770                 /* substract 1 for the invisible store in the prolog */
771                 return (ins->opcode == OP_ARG) ? 0 : 1;
772         else
773                 /* push+pop */
774                 return (ins->opcode == OP_ARG) ? 1 : 2;
775 }
776  
777 void
778 mono_arch_allocate_vars (MonoCompile *m)
779 {
780         MonoMethodSignature *sig;
781         MonoMethodHeader *header;
782         MonoInst *inst;
783         int i, offset, size, align, curinst;
784         CallInfo *cinfo;
785
786         header = mono_method_get_header (m->method);
787
788         sig = m->method->signature;
789
790         cinfo = get_call_info (sig, FALSE);
791
792         /*
793          * We use the ABI calling conventions for managed code as well.
794          * Exception: valuetypes are never passed or returned in registers.
795          */
796
797         /* Locals are allocated backwards from %fp */
798         m->frame_reg = AMD64_RBP;
799         offset = 0;
800
801         /* Reserve space for caller saved registers */
802         for (i = 0; i < AMD64_NREG; ++i)
803                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (m->used_int_regs & (1 << i))) {
804                         offset += sizeof (gpointer);
805                 }
806
807         if (m->method->save_lmf) {
808                 /* Reserve stack space for saving LMF + argument regs */
809                 offset += sizeof (MonoLMF);
810                 if (lmf_tls_offset == -1)
811                         /* Need to save argument regs too */
812                         offset += (AMD64_NREG * 8) + (8 * 8);
813                 m->arch.lmf_offset = offset;
814         }
815
816         if (sig->ret->type != MONO_TYPE_VOID) {
817                 switch (cinfo->ret.storage) {
818                 case ArgInIReg:
819                 case ArgInFloatSSEReg:
820                 case ArgInDoubleSSEReg:
821                         if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
822                                 /* The register is volatile */
823                                 m->ret->opcode = OP_REGOFFSET;
824                                 m->ret->inst_basereg = AMD64_RBP;
825                                 offset += 8;
826                                 m->ret->inst_offset = - offset;
827                         }
828                         else {
829                                 m->ret->opcode = OP_REGVAR;
830                                 m->ret->inst_c0 = cinfo->ret.reg;
831                         }
832                         break;
833                 default:
834                         g_assert_not_reached ();
835                 }
836                 m->ret->dreg = m->ret->inst_c0;
837         }
838
839         curinst = m->locals_start;
840         for (i = curinst; i < m->num_varinfo; ++i) {
841                 inst = m->varinfo [i];
842
843                 if (inst->opcode == OP_REGVAR) {
844                         //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
845                         continue;
846                 }
847
848                 /* inst->unused indicates native sized value types, this is used by the
849                 * pinvoke wrappers when they call functions returning structure */
850                 if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
851                         size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
852                 else
853                         size = mono_type_stack_size (inst->inst_vtype, &align);
854
855                 /*
856                  * variables are accessed as negative offsets from %fp, so increase
857                  * the offset before assigning it to a variable
858                  */
859                 offset += size;
860
861                 offset += align - 1;
862                 offset &= ~(align - 1);
863                 inst->opcode = OP_REGOFFSET;
864                 inst->inst_basereg = AMD64_RBP;
865                 inst->inst_offset = - offset;
866
867                 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
868         }
869
870         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
871                 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
872                 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
873         }
874
875         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
876                 inst = m->varinfo [i];
877                 if (inst->opcode != OP_REGVAR) {
878                         ArgInfo *ainfo = &cinfo->args [i];
879                         gboolean inreg = TRUE;
880                         MonoType *arg_type;
881
882                         if (sig->hasthis && (i == 0))
883                                 arg_type = &mono_defaults.object_class->byval_arg;
884                         else
885                                 arg_type = sig->params [i - sig->hasthis];
886
887                         /* FIXME: Allocate volatile arguments to registers */
888                         if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
889                                 inreg = FALSE;
890
891                         /* 
892                          * Under AMD64, all registers used to pass arguments to functions
893                          * are volatile across calls.
894                          * FIXME: Optimize this.
895                          */
896                         if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg))
897                                 inreg = FALSE;
898
899                         inst->opcode = OP_REGOFFSET;
900
901                         switch (ainfo->storage) {
902                         case ArgInIReg:
903                         case ArgInFloatSSEReg:
904                         case ArgInDoubleSSEReg:
905                                 inst->opcode = OP_REGVAR;
906                                 inst->dreg = ainfo->reg;
907                                 break;
908                         case ArgOnStack:
909                                 inst->opcode = OP_REGOFFSET;
910                                 inst->inst_basereg = AMD64_RBP;
911                                 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
912                                 break;
913                         default:
914                                 NOT_IMPLEMENTED;
915                         }
916
917                         if (!inreg && (ainfo->storage != ArgOnStack)) {
918                                 inst->opcode = OP_REGOFFSET;
919                                 inst->inst_basereg = AMD64_RBP;
920                                 /* These arguments are saved to the stack in the prolog */
921                                 offset += 8;
922                                 inst->inst_offset = - offset;
923                         }
924                 }
925         }
926
927         m->stack_offset = offset;
928
929         g_free (cinfo);
930 }
931
932 static void
933 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, MonoInst *arg, ArgStorage storage, int reg, MonoInst *tree)
934 {
935         switch (storage) {
936         case ArgInIReg:
937                 arg->opcode = OP_OUTARG_REG;
938                 arg->inst_left = tree;
939                 arg->inst_right = (MonoInst*)call;
940                 arg->unused = reg;
941                 call->used_iregs |= 1 << reg;
942                 break;
943         case ArgInFloatSSEReg:
944                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R4;
945                 arg->inst_left = tree;
946                 arg->inst_right = (MonoInst*)call;
947                 arg->unused = reg;
948                 call->used_fregs |= 1 << reg;
949                 break;
950         case ArgInDoubleSSEReg:
951                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R8;
952                 arg->inst_left = tree;
953                 arg->inst_right = (MonoInst*)call;
954                 arg->unused = reg;
955                 call->used_fregs |= 1 << reg;
956                 break;
957         default:
958                 g_assert_not_reached ();
959         }
960 }
961
962 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
963  * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info 
964  */
965
966 static int
967 arg_storage_to_ldind (ArgStorage storage)
968 {
969         switch (storage) {
970         case ArgInIReg:
971                 return CEE_LDIND_I;
972         case ArgInDoubleSSEReg:
973                 return CEE_LDIND_R8;
974         case ArgInFloatSSEReg:
975                 return CEE_LDIND_R4;
976         default:
977                 g_assert_not_reached ();
978         }
979
980         return -1;
981 }
982
983 /* 
984  * take the arguments and generate the arch-specific
985  * instructions to properly call the function in call.
986  * This includes pushing, moving arguments to the right register
987  * etc.
988  * Issue: who does the spilling if needed, and when?
989  */
990 MonoCallInst*
991 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
992         MonoInst *arg, *in;
993         MonoMethodSignature *sig;
994         int i, n, stack_size;
995         CallInfo *cinfo;
996         ArgInfo *ainfo;
997
998         stack_size = 0;
999
1000         sig = call->signature;
1001         n = sig->param_count + sig->hasthis;
1002
1003         cinfo = get_call_info (sig, sig->pinvoke);
1004
1005         for (i = 0; i < n; ++i) {
1006                 ainfo = cinfo->args + i;
1007
1008                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1009                         MonoMethodSignature *tmp_sig;
1010                         
1011                         /* Emit the signature cookie just before the implicit arguments */
1012                         MonoInst *sig_arg;
1013                         /* FIXME: Add support for signature tokens to AOT */
1014                         cfg->disable_aot = TRUE;
1015
1016                         g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1017
1018                         /*
1019                          * mono_ArgIterator_Setup assumes the signature cookie is 
1020                          * passed first and all the arguments which were before it are
1021                          * passed on the stack after the signature. So compensate by 
1022                          * passing a different signature.
1023                          */
1024                         tmp_sig = mono_metadata_signature_dup (call->signature);
1025                         tmp_sig->param_count -= call->signature->sentinelpos;
1026                         tmp_sig->sentinelpos = 0;
1027                         memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1028
1029                         MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1030                         sig_arg->inst_p0 = tmp_sig;
1031
1032                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1033                         arg->inst_left = sig_arg;
1034                         arg->type = STACK_PTR;
1035
1036                         /* prepend, so they get reversed */
1037                         arg->next = call->out_args;
1038                         call->out_args = arg;
1039                 }
1040
1041                 if (is_virtual && i == 0) {
1042                         /* the argument will be attached to the call instruction */
1043                         in = call->args [i];
1044                 } else {
1045                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1046                         in = call->args [i];
1047                         arg->cil_code = in->cil_code;
1048                         arg->inst_left = in;
1049                         arg->type = in->type;
1050                         /* prepend, so they get reversed */
1051                         arg->next = call->out_args;
1052                         call->out_args = arg;
1053
1054                         if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1055                                 gint align;
1056                                 guint32 size;
1057
1058                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1059                                         size = sizeof (MonoTypedRef);
1060                                         align = sizeof (gpointer);
1061                                 }
1062                                 else
1063                                 if (sig->pinvoke)
1064                                         size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1065                                 else
1066                                         size = mono_type_stack_size (&in->klass->byval_arg, &align);
1067                                 if (ainfo->storage == ArgValuetypeInReg) {
1068                                         if (ainfo->pair_storage [1] == ArgNone) {
1069                                                 MonoInst *load;
1070
1071                                                 /* Simpler case */
1072
1073                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1074                                                 load->inst_left = in;
1075
1076                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1077                                         }
1078                                         else {
1079                                                 /* Trees can't be shared so make a copy */
1080                                                 MonoInst *vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1081                                                 MonoInst *load, *load2, *offset_ins;
1082
1083                                                 /* Reg1 */
1084                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1085                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1086
1087                                                 NEW_ICONST (cfg, offset_ins, 0);
1088                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1089                                                 load2->inst_left = load;
1090                                                 load2->inst_right = offset_ins;
1091
1092                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1093                                                 load->inst_left = load2;
1094
1095                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1096
1097                                                 /* Reg2 */
1098                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1099                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1100
1101                                                 NEW_ICONST (cfg, offset_ins, 8);
1102                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1103                                                 load2->inst_left = load;
1104                                                 load2->inst_right = offset_ins;
1105
1106                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [1]));
1107                                                 load->inst_left = load2;
1108
1109                                                 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1110                                                 arg->cil_code = in->cil_code;
1111                                                 arg->type = in->type;
1112                                                 /* prepend, so they get reversed */
1113                                                 arg->next = call->out_args;
1114                                                 call->out_args = arg;
1115
1116                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [1], ainfo->pair_regs [1], load);
1117
1118                                                 /* Prepend a copy inst */
1119                                                 MONO_INST_NEW (cfg, arg, CEE_STIND_I);
1120                                                 arg->cil_code = in->cil_code;
1121                                                 arg->inst_left = vtaddr;
1122                                                 arg->inst_right = in;
1123                                                 arg->type = in->type;
1124
1125                                                 /* prepend, so they get reversed */
1126                                                 arg->next = call->out_args;
1127                                                 call->out_args = arg;
1128                                         }
1129                                 }
1130                                 else {
1131                                         arg->opcode = OP_OUTARG_VT;
1132                                         arg->klass = in->klass;
1133                                         arg->unused = sig->pinvoke;
1134                                         arg->inst_imm = size;
1135                                 }
1136                         }
1137                         else {
1138                                 switch (ainfo->storage) {
1139                                 case ArgInIReg:
1140                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1141                                         break;
1142                                 case ArgInFloatSSEReg:
1143                                 case ArgInDoubleSSEReg:
1144                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1145                                         break;
1146                                 case ArgOnStack:
1147                                         arg->opcode = OP_OUTARG;
1148                                         if (!sig->params [i - sig->hasthis]->byref) {
1149                                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4)
1150                                                         arg->opcode = OP_OUTARG_R4;
1151                                                 else
1152                                                         if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)
1153                                                                 arg->opcode = OP_OUTARG_R8;
1154                                         }
1155                                         break;
1156                                 default:
1157                                         g_assert_not_reached ();
1158                                 }
1159                         }
1160                 }
1161         }
1162
1163         if (cinfo->need_stack_align) {
1164                 MONO_INST_NEW (cfg, arg, OP_AMD64_OUTARG_ALIGN_STACK);
1165                 /* prepend, so they get reversed */
1166                 arg->next = call->out_args;
1167                 call->out_args = arg;
1168         }
1169
1170         call->stack_usage = cinfo->stack_usage;
1171         cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1172         cfg->flags |= MONO_CFG_HAS_CALLS;
1173
1174         g_free (cinfo);
1175
1176         return call;
1177 }
1178
1179 #define EMIT_COND_BRANCH(ins,cond,sign) \
1180 if (ins->flags & MONO_INST_BRLABEL) { \
1181         if (ins->inst_i0->inst_c0) { \
1182                 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
1183         } else { \
1184                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1185                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1186                     x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
1187                         x86_branch8 (code, cond, 0, sign); \
1188                 else \
1189                         x86_branch32 (code, cond, 0, sign); \
1190         } \
1191 } else { \
1192         if (ins->inst_true_bb->native_offset) { \
1193                 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1194         } else { \
1195                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1196                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1197                     x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1198                         x86_branch8 (code, cond, 0, sign); \
1199                 else \
1200                         x86_branch32 (code, cond, 0, sign); \
1201         } \
1202 }
1203
1204 /* emit an exception if condition is fail */
1205 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name)            \
1206         do {                                                        \
1207                 mono_add_patch_info (cfg, code - cfg->native_code,   \
1208                                     MONO_PATCH_INFO_EXC, exc_name);  \
1209                 x86_branch32 (code, cond, 0, signed);               \
1210         } while (0); 
1211
1212 #define EMIT_FPCOMPARE(code) do { \
1213         amd64_fcompp (code); \
1214         amd64_fnstsw (code); \
1215 } while (0); 
1216
1217 /*
1218  * Emitting a call and patching it later is expensive on amd64, so try to
1219  * determine the patch target immediately, and emit more efficient code if
1220  * possible.
1221  */
1222 static guint8*
1223 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
1224 {
1225         /* FIXME: */
1226         mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
1227         amd64_set_reg_template (code, GP_SCRATCH_REG);
1228         amd64_call_reg (code, GP_SCRATCH_REG);
1229
1230         return code;
1231 }
1232
1233 #define EMIT_CALL() do { \
1234     amd64_set_reg_template (code, GP_SCRATCH_REG); \
1235     amd64_call_reg (code, GP_SCRATCH_REG); \
1236 } while (0);
1237
1238 /* FIXME: Add more instructions */
1239 #define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG))
1240
1241 static void
1242 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1243 {
1244         MonoInst *ins, *last_ins = NULL;
1245         ins = bb->code;
1246
1247         while (ins) {
1248
1249                 switch (ins->opcode) {
1250                 case OP_ICONST:
1251                 case OP_I8CONST:
1252                         /* reg = 0 -> XOR (reg, reg) */
1253                         /* XOR sets cflags on x86, so we cant do it always */
1254                         if (ins->inst_c0 == 0 && ins->next && INST_IGNORES_CFLAGS (ins->next)) {
1255                                 ins->opcode = CEE_XOR;
1256                                 ins->sreg1 = ins->dreg;
1257                                 ins->sreg2 = ins->dreg;
1258                         }
1259                         break;
1260                 case OP_MUL_IMM: 
1261                         /* remove unnecessary multiplication with 1 */
1262                         if (ins->inst_imm == 1) {
1263                                 if (ins->dreg != ins->sreg1) {
1264                                         ins->opcode = OP_MOVE;
1265                                 } else {
1266                                         last_ins->next = ins->next;
1267                                         ins = ins->next;
1268                                         continue;
1269                                 }
1270                         }
1271                         break;
1272                 case OP_COMPARE_IMM:
1273                         /* OP_COMPARE_IMM (reg, 0) 
1274                          * --> 
1275                          * OP_AMD64_TEST_NULL (reg) 
1276                          */
1277                         if (!ins->inst_imm)
1278                                 ins->opcode = OP_AMD64_TEST_NULL;
1279                         break;
1280                 case OP_ICOMPARE_IMM:
1281                         if (!ins->inst_imm)
1282                                 ins->opcode = OP_X86_TEST_NULL;
1283                         break;
1284                 case OP_X86_COMPARE_MEMBASE_IMM:
1285                         /* 
1286                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1287                          * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1288                          * -->
1289                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1290                          * OP_COMPARE_IMM reg, imm
1291                          *
1292                          * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1293                          */
1294                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1295                             ins->inst_basereg == last_ins->inst_destbasereg &&
1296                             ins->inst_offset == last_ins->inst_offset) {
1297                                         ins->opcode = OP_COMPARE_IMM;
1298                                         ins->sreg1 = last_ins->sreg1;
1299
1300                                         /* check if we can remove cmp reg,0 with test null */
1301                                         if (!ins->inst_imm)
1302                                                 ins->opcode = OP_X86_TEST_NULL;
1303                                 }
1304
1305                         break;
1306                 case OP_LOAD_MEMBASE:
1307                 case OP_LOADI4_MEMBASE:
1308                         /* 
1309                          * Note: if reg1 = reg2 the load op is removed
1310                          *
1311                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1312                          * OP_LOAD_MEMBASE offset(basereg), reg2
1313                          * -->
1314                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1315                          * OP_MOVE reg1, reg2
1316                          */
1317                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG 
1318                                          || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1319                             ins->inst_basereg == last_ins->inst_destbasereg &&
1320                             ins->inst_offset == last_ins->inst_offset) {
1321                                 if (ins->dreg == last_ins->sreg1) {
1322                                         last_ins->next = ins->next;                             
1323                                         ins = ins->next;                                
1324                                         continue;
1325                                 } else {
1326                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1327                                         ins->opcode = OP_MOVE;
1328                                         ins->sreg1 = last_ins->sreg1;
1329                                 }
1330
1331                         /* 
1332                          * Note: reg1 must be different from the basereg in the second load
1333                          * Note: if reg1 = reg2 is equal then second load is removed
1334                          *
1335                          * OP_LOAD_MEMBASE offset(basereg), reg1
1336                          * OP_LOAD_MEMBASE offset(basereg), reg2
1337                          * -->
1338                          * OP_LOAD_MEMBASE offset(basereg), reg1
1339                          * OP_MOVE reg1, reg2
1340                          */
1341                         } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1342                                            || last_ins->opcode == OP_LOAD_MEMBASE) &&
1343                               ins->inst_basereg != last_ins->dreg &&
1344                               ins->inst_basereg == last_ins->inst_basereg &&
1345                               ins->inst_offset == last_ins->inst_offset) {
1346
1347                                 if (ins->dreg == last_ins->dreg) {
1348                                         last_ins->next = ins->next;                             
1349                                         ins = ins->next;                                
1350                                         continue;
1351                                 } else {
1352                                         ins->opcode = OP_MOVE;
1353                                         ins->sreg1 = last_ins->dreg;
1354                                 }
1355
1356                                 //g_assert_not_reached ();
1357
1358 #if 0
1359                         /* 
1360                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1361                          * OP_LOAD_MEMBASE offset(basereg), reg
1362                          * -->
1363                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1364                          * OP_ICONST reg, imm
1365                          */
1366                         } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1367                                                 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1368                                    ins->inst_basereg == last_ins->inst_destbasereg &&
1369                                    ins->inst_offset == last_ins->inst_offset) {
1370                                 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1371                                 ins->opcode = OP_ICONST;
1372                                 ins->inst_c0 = last_ins->inst_imm;
1373                                 g_assert_not_reached (); // check this rule
1374 #endif
1375                         }
1376                         break;
1377                 case OP_LOADU1_MEMBASE:
1378                 case OP_LOADI1_MEMBASE:
1379                         /* 
1380                          * Note: if reg1 = reg2 the load op is removed
1381                          *
1382                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1383                          * OP_LOAD_MEMBASE offset(basereg), reg2
1384                          * -->
1385                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1386                          * OP_MOVE reg1, reg2
1387                          */
1388                         if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1389                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1390                                         ins->inst_offset == last_ins->inst_offset) {
1391                                 if (ins->dreg == last_ins->sreg1) {
1392                                         last_ins->next = ins->next;                             
1393                                         ins = ins->next;                                
1394                                         continue;
1395                                 } else {
1396                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1397                                         ins->opcode = OP_MOVE;
1398                                         ins->sreg1 = last_ins->sreg1;
1399                                 }
1400                         }
1401                         break;
1402                 case OP_LOADU2_MEMBASE:
1403                 case OP_LOADI2_MEMBASE:
1404                         /* 
1405                          * Note: if reg1 = reg2 the load op is removed
1406                          *
1407                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1408                          * OP_LOAD_MEMBASE offset(basereg), reg2
1409                          * -->
1410                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1411                          * OP_MOVE reg1, reg2
1412                          */
1413                         if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1414                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1415                                         ins->inst_offset == last_ins->inst_offset) {
1416                                 if (ins->dreg == last_ins->sreg1) {
1417                                         last_ins->next = ins->next;                             
1418                                         ins = ins->next;                                
1419                                         continue;
1420                                 } else {
1421                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1422                                         ins->opcode = OP_MOVE;
1423                                         ins->sreg1 = last_ins->sreg1;
1424                                 }
1425                         }
1426                         break;
1427                 case CEE_CONV_I4:
1428                 case CEE_CONV_U4:
1429                 case OP_MOVE:
1430                         /*
1431                          * Removes:
1432                          *
1433                          * OP_MOVE reg, reg 
1434                          */
1435                         if (ins->dreg == ins->sreg1) {
1436                                 if (last_ins)
1437                                         last_ins->next = ins->next;                             
1438                                 ins = ins->next;
1439                                 continue;
1440                         }
1441                         /* 
1442                          * Removes:
1443                          *
1444                          * OP_MOVE sreg, dreg 
1445                          * OP_MOVE dreg, sreg
1446                          */
1447                         if (last_ins && last_ins->opcode == OP_MOVE &&
1448                             ins->sreg1 == last_ins->dreg &&
1449                             ins->dreg == last_ins->sreg1) {
1450                                 last_ins->next = ins->next;                             
1451                                 ins = ins->next;                                
1452                                 continue;
1453                         }
1454                         break;
1455                 }
1456                 last_ins = ins;
1457                 ins = ins->next;
1458         }
1459         bb->last_ins = last_ins;
1460 }
1461
1462 static const int 
1463 branch_cc_table [] = {
1464         X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1465         X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1466         X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1467 };
1468
1469 static int
1470 opcode_to_x86_cond (int opcode)
1471 {
1472         switch (opcode) {
1473         case OP_IBEQ:
1474                 return X86_CC_EQ;
1475         case OP_IBNE_UN:
1476                 return X86_CC_NE;
1477         case OP_IBLT:
1478                 return X86_CC_LT;
1479         case OP_IBLT_UN:
1480                 return X86_CC_LT;
1481         case OP_IBGT:
1482                 return X86_CC_GT;
1483         case OP_IBGT_UN:
1484                 return X86_CC_GT;
1485         case OP_IBGE:
1486                 return X86_CC_GE;
1487         case OP_IBGE_UN:
1488                 return X86_CC_GE;
1489         case OP_IBLE:
1490                 return X86_CC_LE;
1491         case OP_IBLE_UN:
1492                 return X86_CC_LE;
1493         case OP_COND_EXC_IOV:
1494                 return X86_CC_O;
1495         case OP_COND_EXC_IC:
1496                 return X86_CC_C;
1497         default:
1498                 g_assert_not_reached ();
1499         }
1500
1501         return -1;
1502 }
1503
1504 /*
1505  * returns the offset used by spillvar. It allocates a new
1506  * spill variable if necessary. 
1507  */
1508 static int
1509 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1510 {
1511         MonoSpillInfo **si, *info;
1512         int i = 0;
1513
1514         si = &cfg->spill_info; 
1515         
1516         while (i <= spillvar) {
1517
1518                 if (!*si) {
1519                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1520                         info->next = NULL;
1521                         cfg->stack_offset += sizeof (gpointer);
1522                         info->offset = - cfg->stack_offset;
1523                 }
1524
1525                 if (i == spillvar)
1526                         return (*si)->offset;
1527
1528                 i++;
1529                 si = &(*si)->next;
1530         }
1531
1532         g_assert_not_reached ();
1533         return 0;
1534 }
1535
1536 /*
1537  * returns the offset used by spillvar. It allocates a new
1538  * spill float variable if necessary. 
1539  * (same as mono_spillvar_offset but for float)
1540  */
1541 static int
1542 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1543 {
1544         MonoSpillInfo **si, *info;
1545         int i = 0;
1546
1547         si = &cfg->spill_info_float; 
1548         
1549         while (i <= spillvar) {
1550
1551                 if (!*si) {
1552                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1553                         info->next = NULL;
1554                         cfg->stack_offset += sizeof (double);
1555                         info->offset = - cfg->stack_offset;
1556                 }
1557
1558                 if (i == spillvar)
1559                         return (*si)->offset;
1560
1561                 i++;
1562                 si = &(*si)->next;
1563         }
1564
1565         g_assert_not_reached ();
1566         return 0;
1567 }
1568
1569 /*
1570  * Creates a store for spilled floating point items
1571  */
1572 static MonoInst*
1573 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1574 {
1575         MonoInst *store;
1576         MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
1577         store->sreg1 = reg;
1578         store->inst_destbasereg = AMD64_RBP;
1579         store->inst_offset = mono_spillvar_offset_float (cfg, spill);
1580
1581         DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)store->inst_offset, reg));
1582         return store;
1583 }
1584
1585 /*
1586  * Creates a load for spilled floating point items 
1587  */
1588 static MonoInst*
1589 create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1590 {
1591         MonoInst *load;
1592         MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
1593         load->dreg = reg;
1594         load->inst_basereg = AMD64_RBP;
1595         load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1596
1597         DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)load->inst_offset, reg));
1598         return load;
1599 }
1600
1601 #define ireg_is_freeable(r) ((r) >= 0 && (r) <= 7 && AMD64_IS_CALLEE_REG ((r)))
1602 #define freg_is_freeable(r) ((r) >= 0 && (r) <= AMD64_XMM_NREG)
1603
1604 #define reg_is_freeable(r,fp) ((fp) ? freg_is_freeable ((r)) : ireg_is_freeable ((r)))
1605 #define reg_is_hard(r,fp) ((fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
1606 #define reg_is_soft(r,fp) (!reg_is_hard((r),(fp)))
1607 #define rassign(cfg,reg,fp) ((fp) ? (cfg)->rs->fassign [(reg)] : (cfg)->rs->iassign [(reg)])
1608 #define sreg1_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC1] == 'f')
1609 #define sreg2_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC2] == 'f')
1610 #define dreg_is_fp(ins)  (ins_spec [(ins)->opcode] [MONO_INST_DEST] == 'f')
1611
1612 typedef struct {
1613         int born_in;
1614         int killed_in;
1615         int last_use;
1616         int prev_use;
1617         int flags;              /* used to track fp spill/load */
1618 } RegTrack;
1619
1620 static const char*const * ins_spec = amd64_desc;
1621
1622 static void
1623 print_ins (int i, MonoInst *ins)
1624 {
1625         const char *spec = ins_spec [ins->opcode];
1626         g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1627         if (!spec)
1628                 g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
1629         if (spec [MONO_INST_DEST]) {
1630                 gboolean fp = (spec [MONO_INST_DEST] == 'f');
1631                 if (reg_is_soft (ins->dreg, fp))
1632                         g_print (" R%d <-", ins->dreg);
1633                 else
1634                         g_print (" %s <-", mono_amd64_regname (ins->dreg, fp));
1635         }
1636         if (spec [MONO_INST_SRC1]) {
1637                 gboolean fp = (spec [MONO_INST_SRC1] == 'f');
1638                 if (reg_is_soft (ins->sreg1, fp))
1639                         g_print (" R%d", ins->sreg1);
1640                 else
1641                         g_print (" %s", mono_amd64_regname (ins->sreg1, fp));
1642         }
1643         if (spec [MONO_INST_SRC2]) {
1644                 gboolean fp = (spec [MONO_INST_SRC2] == 'f');
1645                 if (reg_is_soft (ins->sreg2, fp))
1646                         g_print (" R%d", ins->sreg2);
1647                 else
1648                         g_print (" %s", mono_amd64_regname (ins->sreg2, fp));
1649         }
1650         if (spec [MONO_INST_CLOB])
1651                 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1652         g_print ("\n");
1653 }
1654
1655 static void
1656 print_regtrack (RegTrack *t, int num)
1657 {
1658         int i;
1659         char buf [32];
1660         const char *r;
1661         
1662         for (i = 0; i < num; ++i) {
1663                 if (!t [i].born_in)
1664                         continue;
1665                 if (i >= MONO_MAX_IREGS) {
1666                         g_snprintf (buf, sizeof(buf), "R%d", i);
1667                         r = buf;
1668                 } else
1669                         r = mono_arch_regname (i);
1670                 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1671         }
1672 }
1673
1674 typedef struct InstList InstList;
1675
1676 struct InstList {
1677         InstList *prev;
1678         InstList *next;
1679         MonoInst *data;
1680 };
1681
1682 static inline InstList*
1683 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1684 {
1685         InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1686         item->data = data;
1687         item->prev = NULL;
1688         item->next = list;
1689         if (list)
1690                 list->prev = item;
1691         return item;
1692 }
1693
1694 /*
1695  * Force the spilling of the variable in the symbolic register 'reg'.
1696  */
1697 static int
1698 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg, gboolean fp)
1699 {
1700         MonoInst *load;
1701         int i, sel, spill;
1702         int *assign, *symbolic;
1703
1704         if (fp) {
1705                 assign = cfg->rs->fassign;
1706                 symbolic = cfg->rs->fsymbolic;
1707         }
1708         else {
1709                 assign = cfg->rs->iassign;
1710                 symbolic = cfg->rs->isymbolic;
1711         }       
1712         
1713         sel = assign [reg];
1714         /*i = cfg->rs->isymbolic [sel];
1715         g_assert (i == reg);*/
1716         i = reg;
1717         spill = ++cfg->spill_count;
1718         assign [i] = -spill - 1;
1719         if (fp)
1720                 mono_regstate_free_float (cfg->rs, sel);
1721         else
1722                 mono_regstate_free_int (cfg->rs, sel);
1723         /* we need to create a spill var and insert a load to sel after the current instruction */
1724         if (fp)
1725                 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1726         else
1727                 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1728         load->dreg = sel;
1729         load->inst_basereg = AMD64_RBP;
1730         load->inst_offset = mono_spillvar_offset (cfg, spill);
1731         if (item->prev) {
1732                 while (ins->next != item->prev->data)
1733                         ins = ins->next;
1734         }
1735         load->next = ins->next;
1736         ins->next = load;
1737         DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1738         if (fp)
1739                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1740         else
1741                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1742         g_assert (i == sel);
1743
1744         return sel;
1745 }
1746
1747 static int
1748 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg, gboolean fp)
1749 {
1750         MonoInst *load;
1751         int i, sel, spill;
1752         int *assign, *symbolic;
1753
1754         if (fp) {
1755                 assign = cfg->rs->fassign;
1756                 symbolic = cfg->rs->fsymbolic;
1757         }
1758         else {
1759                 assign = cfg->rs->iassign;
1760                 symbolic = cfg->rs->isymbolic;
1761         }
1762
1763         DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1764         /* exclude the registers in the current instruction */
1765         if ((sreg1_is_fp (ins) == fp) && (reg != ins->sreg1) && (reg_is_freeable (ins->sreg1, fp) || (reg_is_soft (ins->sreg1, fp) && rassign (cfg, ins->sreg1, fp) >= 0))) {
1766                 if (reg_is_soft (ins->sreg1, fp))
1767                         regmask &= ~ (1 << rassign (cfg, ins->sreg1, fp));
1768                 else
1769                         regmask &= ~ (1 << ins->sreg1);
1770                 DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_amd64_regname (ins->sreg1, fp)));
1771         }
1772         if ((sreg2_is_fp (ins) == fp) && (reg != ins->sreg2) && (reg_is_freeable (ins->sreg2, fp) || (reg_is_soft (ins->sreg2, fp) && rassign (cfg, ins->sreg2, fp) >= 0))) {
1773                 if (reg_is_soft (ins->sreg2, fp))
1774                         regmask &= ~ (1 << rassign (cfg, ins->sreg2, fp));
1775                 else
1776                         regmask &= ~ (1 << ins->sreg2);
1777                 DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_amd64_regname (ins->sreg2, fp), ins->sreg2));
1778         }
1779         if ((dreg_is_fp (ins) == fp) && (reg != ins->dreg) && reg_is_freeable (ins->dreg, fp)) {
1780                 regmask &= ~ (1 << ins->dreg);
1781                 DEBUG (g_print ("\t\texcluding dreg %s\n", mono_amd64_regname (ins->dreg, fp)));
1782         }
1783
1784         DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
1785         g_assert (regmask); /* need at least a register we can free */
1786         sel = -1;
1787         /* we should track prev_use and spill the register that's farther */
1788         if (fp) {
1789                 for (i = 0; i < MONO_MAX_FREGS; ++i) {
1790                         if (regmask & (1 << i)) {
1791                                 sel = i;
1792                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_fregname (sel), cfg->rs->fassign [sel]));
1793                                 break;
1794                         }
1795                 }
1796
1797                 i = cfg->rs->fsymbolic [sel];
1798                 spill = ++cfg->spill_count;
1799                 cfg->rs->fassign [i] = -spill - 1;
1800                 mono_regstate_free_float (cfg->rs, sel);
1801         }
1802         else {
1803                 for (i = 0; i < MONO_MAX_IREGS; ++i) {
1804                         if (regmask & (1 << i)) {
1805                                 sel = i;
1806                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1807                                 break;
1808                         }
1809                 }
1810
1811                 i = cfg->rs->isymbolic [sel];
1812                 spill = ++cfg->spill_count;
1813                 cfg->rs->iassign [i] = -spill - 1;
1814                 mono_regstate_free_int (cfg->rs, sel);
1815         }
1816
1817         /* we need to create a spill var and insert a load to sel after the current instruction */
1818         MONO_INST_NEW (cfg, load, fp ? OP_LOADR8_MEMBASE : OP_LOAD_MEMBASE);
1819         load->dreg = sel;
1820         load->inst_basereg = AMD64_RBP;
1821         load->inst_offset = mono_spillvar_offset (cfg, spill);
1822         if (item->prev) {
1823                 while (ins->next != item->prev->data)
1824                         ins = ins->next;
1825         }
1826         load->next = ins->next;
1827         ins->next = load;
1828         DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1829         if (fp)
1830                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1831         else
1832                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1833         g_assert (i == sel);
1834         
1835         return sel;
1836 }
1837
1838 static MonoInst*
1839 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins, gboolean fp)
1840 {
1841         MonoInst *copy;
1842
1843         if (fp)
1844                 MONO_INST_NEW (cfg, copy, OP_FMOVE);
1845         else
1846                 MONO_INST_NEW (cfg, copy, OP_MOVE);
1847
1848         copy->dreg = dest;
1849         copy->sreg1 = src;
1850         if (ins) {
1851                 copy->next = ins->next;
1852                 ins->next = copy;
1853         }
1854         DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
1855         return copy;
1856 }
1857
1858 static MonoInst*
1859 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins, gboolean fp)
1860 {
1861         MonoInst *store;
1862         MONO_INST_NEW (cfg, store, fp ? OP_STORER8_MEMBASE_REG : OP_STORE_MEMBASE_REG);
1863         store->sreg1 = reg;
1864         store->inst_destbasereg = AMD64_RBP;
1865         store->inst_offset = mono_spillvar_offset (cfg, spill);
1866         if (ins) {
1867                 store->next = ins->next;
1868                 ins->next = store;
1869         }
1870         DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_amd64_regname (reg, fp)));
1871         return store;
1872 }
1873
1874 static void
1875 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
1876 {
1877         MonoInst *prev;
1878         if (item->next) {
1879                 prev = item->next->data;
1880
1881                 while (prev->next != ins)
1882                         prev = prev->next;
1883                 to_insert->next = ins;
1884                 prev->next = to_insert;
1885         } else {
1886                 to_insert->next = ins;
1887         }
1888         /* 
1889          * needed otherwise in the next instruction we can add an ins to the 
1890          * end and that would get past this instruction.
1891          */
1892         item->data = to_insert; 
1893 }
1894
1895 /* flags used in reginfo->flags */
1896 enum {
1897         MONO_X86_FP_NEEDS_LOAD_SPILL    = 1 << 0,
1898         MONO_X86_FP_NEEDS_SPILL                 = 1 << 1,
1899         MONO_X86_FP_NEEDS_LOAD                  = 1 << 2,
1900         MONO_X86_REG_NOT_ECX                    = 1 << 3,
1901         MONO_X86_REG_EAX                                = 1 << 4,
1902         MONO_X86_REG_EDX                                = 1 << 5,
1903         MONO_X86_REG_ECX                                = 1 << 6
1904 };
1905
1906 static int
1907 mono_amd64_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
1908 {
1909         int val;
1910         int test_mask = dest_mask;
1911
1912         if (flags & MONO_X86_REG_EAX)
1913                 test_mask &= (1 << AMD64_RAX);
1914         else if (flags & MONO_X86_REG_EDX)
1915                 test_mask &= (1 << AMD64_RDX);
1916         else if (flags & MONO_X86_REG_ECX)
1917                 test_mask &= (1 << AMD64_RCX);
1918         else if (flags & MONO_X86_REG_NOT_ECX)
1919                 test_mask &= ~ (1 << AMD64_RCX);
1920
1921         val = mono_regstate_alloc_int (cfg->rs, test_mask);
1922         if (val >= 0 && test_mask != dest_mask)
1923                 DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
1924
1925         if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
1926                 DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
1927                 val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << AMD64_RCX)));
1928         }
1929
1930         if (val < 0) {
1931                 val = mono_regstate_alloc_int (cfg->rs, dest_mask);
1932                 if (val < 0)
1933                         val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, FALSE);
1934         }
1935
1936         return val;
1937 }
1938
1939 static int
1940 mono_amd64_alloc_float_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg)
1941 {
1942         int val;
1943
1944         val = mono_regstate_alloc_float (cfg->rs, dest_mask);
1945
1946         if (val < 0) {
1947                 val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, TRUE);
1948         }
1949
1950         return val;
1951 }
1952
1953
1954 /*#include "cprop.c"*/
1955
1956 /*
1957  * Local register allocation.
1958  * We first scan the list of instructions and we save the liveness info of
1959  * each register (when the register is first used, when it's value is set etc.).
1960  * We also reverse the list of instructions (in the InstList list) because assigning
1961  * registers backwards allows for more tricks to be used.
1962  */
1963 void
1964 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
1965 {
1966         MonoInst *ins;
1967         MonoRegState *rs = cfg->rs;
1968         int i, val, fpcount;
1969         RegTrack *reginfo, *reginfof;
1970         RegTrack *reginfo1, *reginfo2, *reginfod;
1971         InstList *tmp, *reversed = NULL;
1972         const char *spec;
1973         guint32 src1_mask, src2_mask, dest_mask;
1974         GList *fspill_list = NULL;
1975         int fspill = 0;
1976
1977         if (!bb->code)
1978                 return;
1979         rs->next_vireg = bb->max_ireg;
1980         rs->next_vfreg = bb->max_freg;
1981         mono_regstate_assign (rs);
1982         reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
1983         reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
1984         rs->ifree_mask = AMD64_CALLEE_REGS;
1985         rs->ffree_mask = AMD64_CALLEE_FREGS;
1986
1987         if (!use_sse2)
1988                 /* The fp stack is 6 entries deep */
1989                 rs->ffree_mask = 0x3f;
1990
1991         ins = bb->code;
1992
1993         /*if (cfg->opt & MONO_OPT_COPYPROP)
1994                 local_copy_prop (cfg, ins);*/
1995
1996         i = 1;
1997         fpcount = 0;
1998         DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
1999         /* forward pass on the instructions to collect register liveness info */
2000         while (ins) {
2001                 spec = ins_spec [ins->opcode];
2002                 
2003                 DEBUG (print_ins (i, ins));
2004
2005                 if (spec [MONO_INST_SRC1]) {
2006                         if (spec [MONO_INST_SRC1] == 'f') {
2007                                 reginfo1 = reginfof;
2008
2009                                 if (!use_sse2) {
2010                                         GList *spill;
2011
2012                                         spill = g_list_first (fspill_list);
2013                                         if (spill && fpcount < FPSTACK_SIZE) {
2014                                                 reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
2015                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2016                                         } else
2017                                                 fpcount--;
2018                                 }
2019                         }
2020                         else
2021                                 reginfo1 = reginfo;
2022                         reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
2023                         reginfo1 [ins->sreg1].last_use = i;
2024                         if (spec [MONO_INST_SRC1] == 'L') {
2025                                 /* The virtual register is allocated sequentially */
2026                                 reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
2027                                 reginfo1 [ins->sreg1 + 1].last_use = i;
2028                                 if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
2029                                         reginfo1 [ins->sreg1 + 1].born_in = i;
2030
2031                                 reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
2032                                 reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
2033                         }
2034                 } else {
2035                         ins->sreg1 = -1;
2036                 }
2037                 if (spec [MONO_INST_SRC2]) {
2038                         if (spec [MONO_INST_SRC2] == 'f') {
2039                                 reginfo2 = reginfof;
2040
2041                                 if (!use_sse2) {
2042                                         GList *spill;
2043
2044                                         spill = g_list_first (fspill_list);
2045                                         if (spill) {
2046                                                 reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
2047                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2048                                                 if (fpcount >= FPSTACK_SIZE) {
2049                                                         fspill++;
2050                                                         fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2051                                                         reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
2052                                                 }
2053                                         } else
2054                                                 fpcount--;
2055                                 }
2056                         }
2057                         else
2058                                 reginfo2 = reginfo;
2059                         reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
2060                         reginfo2 [ins->sreg2].last_use = i;
2061                         if (spec [MONO_INST_SRC2] == 'L') {
2062                                 /* The virtual register is allocated sequentially */
2063                                 reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
2064                                 reginfo2 [ins->sreg2 + 1].last_use = i;
2065                                 if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
2066                                         reginfo2 [ins->sreg2 + 1].born_in = i;
2067                         }
2068                         if (spec [MONO_INST_CLOB] == 's') {
2069                                 reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
2070                                 reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
2071                         }
2072                 } else {
2073                         ins->sreg2 = -1;
2074                 }
2075                 if (spec [MONO_INST_DEST]) {
2076                         if (spec [MONO_INST_DEST] == 'f') {
2077                                 reginfod = reginfof;
2078                                 if (!use_sse2 && (spec [MONO_INST_CLOB] != 'm')) {
2079                                         if (fpcount >= FPSTACK_SIZE) {
2080                                                 reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
2081                                                 fspill++;
2082                                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2083                                                 fpcount--;
2084                                         }
2085                                         fpcount++;
2086                                 }
2087                         }
2088                         else
2089                                 reginfod = reginfo;
2090                         if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
2091                                 reginfod [ins->dreg].killed_in = i;
2092                         reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
2093                         reginfod [ins->dreg].last_use = i;
2094                         if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
2095                                 reginfod [ins->dreg].born_in = i;
2096                         if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
2097                                 /* The virtual register is allocated sequentially */
2098                                 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
2099                                 reginfod [ins->dreg + 1].last_use = i;
2100                                 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
2101                                         reginfod [ins->dreg + 1].born_in = i;
2102
2103                                 reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
2104                                 reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
2105                         }
2106                 } else {
2107                         ins->dreg = -1;
2108                 }
2109
2110                 if (spec [MONO_INST_CLOB] == 'c') {
2111                         /* A call instruction implicitly uses all registers in call->out_ireg_args */
2112
2113                         MonoCallInst *call = (MonoCallInst*)ins;
2114                         GSList *list;
2115
2116                         list = call->out_ireg_args;
2117                         if (list) {
2118                                 while (list) {
2119                                         guint64 regpair;
2120                                         int reg, hreg;
2121
2122                                         regpair = (guint64) (list->data);
2123                                         hreg = regpair >> 32;
2124                                         reg = regpair & 0xffffffff;
2125
2126                                         reginfo [reg].prev_use = reginfo [reg].last_use;
2127                                         reginfo [reg].last_use = i;
2128
2129                                         list = g_slist_next (list);
2130                                 }
2131                         }
2132
2133                         list = call->out_freg_args;
2134                         if (use_sse2 && list) {
2135                                 while (list) {
2136                                         guint64 regpair;
2137                                         int reg, hreg;
2138
2139                                         regpair = (guint64) (list->data);
2140                                         hreg = regpair >> 32;
2141                                         reg = regpair & 0xffffffff;
2142
2143                                         reginfof [reg].prev_use = reginfof [reg].last_use;
2144                                         reginfof [reg].last_use = i;
2145
2146                                         list = g_slist_next (list);
2147                                 }
2148                         }
2149                 }
2150
2151                 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
2152                 ++i;
2153                 ins = ins->next;
2154         }
2155
2156         // todo: check if we have anything left on fp stack, in verify mode?
2157         fspill = 0;
2158
2159         DEBUG (print_regtrack (reginfo, rs->next_vireg));
2160         DEBUG (print_regtrack (reginfof, rs->next_vfreg));
2161         tmp = reversed;
2162         while (tmp) {
2163                 int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
2164                 dest_mask = src1_mask = src2_mask = AMD64_CALLEE_REGS;
2165                 --i;
2166                 ins = tmp->data;
2167                 spec = ins_spec [ins->opcode];
2168                 prev_dreg = -1;
2169                 clob_dreg = -1;
2170                 DEBUG (g_print ("processing:"));
2171                 DEBUG (print_ins (i, ins));
2172                 if (spec [MONO_INST_CLOB] == 's') {
2173                         if (rs->ifree_mask & (1 << AMD64_RCX)) {
2174                                 DEBUG (g_print ("\tshortcut assignment of R%d to ECX\n", ins->sreg2));
2175                                 if (ins->sreg2 < MONO_MAX_IREGS) {
2176                                         /* Argument already in hard reg, need to copy */
2177                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2178                                         insert_before_ins (ins, tmp, copy);
2179                                 }
2180                                 rs->iassign [ins->sreg2] = AMD64_RCX;
2181                                 rs->isymbolic [AMD64_RCX] = ins->sreg2;
2182                                 ins->sreg2 = AMD64_RCX;
2183                                 rs->ifree_mask &= ~ (1 << AMD64_RCX);
2184                         } else {
2185                                 int need_ecx_spill = TRUE;
2186                                 /* 
2187                                  * we first check if src1/dreg is already assigned a register
2188                                  * and then we force a spill of the var assigned to ECX.
2189                                  */
2190                                 /* the destination register can't be ECX */
2191                                 dest_mask &= ~ (1 << AMD64_RCX);
2192                                 src1_mask &= ~ (1 << AMD64_RCX);
2193                                 val = rs->iassign [ins->dreg];
2194                                 /* 
2195                                  * the destination register is already assigned to ECX:
2196                                  * we need to allocate another register for it and then
2197                                  * copy from this to ECX.
2198                                  */
2199                                 if (val == AMD64_RCX && ins->dreg != ins->sreg2) {
2200                                         int new_dest;
2201                                         new_dest = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2202                                         g_assert (new_dest >= 0);
2203                                         DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
2204
2205                                         rs->isymbolic [new_dest] = ins->dreg;
2206                                         rs->iassign [ins->dreg] = new_dest;
2207                                         clob_dreg = ins->dreg;
2208                                         ins->dreg = new_dest;
2209                                         create_copy_ins (cfg, AMD64_RCX, new_dest, ins, FALSE);
2210                                         need_ecx_spill = FALSE;
2211                                         /*DEBUG (g_print ("\tforced spill of R%d\n", ins->dreg));
2212                                         val = get_register_force_spilling (cfg, tmp, ins, ins->dreg);
2213                                         rs->iassign [ins->dreg] = val;
2214                                         rs->isymbolic [val] = prev_dreg;
2215                                         ins->dreg = val;*/
2216                                 }
2217                                 val = rs->iassign [ins->sreg2];
2218                                 if (val >= 0 && val != AMD64_RCX) {
2219                                         MonoInst *move = create_copy_ins (cfg, AMD64_RCX, val, NULL, FALSE);
2220                                         DEBUG (g_print ("\tmoved arg from R%d (%d) to ECX\n", val, ins->sreg2));
2221                                         move->next = ins;
2222                                         g_assert_not_reached ();
2223                                         /* FIXME: where is move connected to the instruction list? */
2224                                         //tmp->prev->data->next = move;
2225                                 }
2226                                 else 
2227                                         if (val == AMD64_RCX) {
2228                                                 if (ins->sreg2 < MONO_MAX_IREGS) {
2229                                                         /* sreg2 is already assigned to a hard reg, need to copy */
2230                                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2231                                                         insert_before_ins (ins, tmp, copy);
2232                                                 }
2233                                                 need_ecx_spill = FALSE;
2234                                         }
2235                                 if (need_ecx_spill && !(rs->ifree_mask & (1 << AMD64_RCX))) {
2236                                         DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RCX]));
2237                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RCX], FALSE);
2238                                         mono_regstate_free_int (rs, AMD64_RCX);
2239                                 }
2240                                 /* force-set sreg2 */
2241                                 rs->iassign [ins->sreg2] = AMD64_RCX;
2242                                 rs->isymbolic [AMD64_RCX] = ins->sreg2;
2243                                 ins->sreg2 = AMD64_RCX;
2244                                 rs->ifree_mask &= ~ (1 << AMD64_RCX);
2245                         }
2246                 } else if (spec [MONO_INST_CLOB] == 'd') { /* division */
2247                         int dest_reg = AMD64_RAX;
2248                         int clob_reg = AMD64_RDX;
2249                         if (spec [MONO_INST_DEST] == 'd') {
2250                                 dest_reg = AMD64_RDX; /* reminder */
2251                                 clob_reg = AMD64_RAX;
2252                         }
2253                         val = rs->iassign [ins->dreg];
2254                         if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
2255                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2256                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2257                                 mono_regstate_free_int (rs, dest_reg);
2258                         }
2259                         if (val < 0) {
2260                                 if (val < -1) {
2261                                         /* the register gets spilled after this inst */
2262                                         int spill = -val -1;
2263                                         dest_mask = 1 << clob_reg;
2264                                         prev_dreg = ins->dreg;
2265                                         val = mono_regstate_alloc_int (rs, dest_mask);
2266                                         if (val < 0)
2267                                                 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg, FALSE);
2268                                         rs->iassign [ins->dreg] = val;
2269                                         if (spill)
2270                                                 create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2271                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2272                                         rs->isymbolic [val] = prev_dreg;
2273                                         ins->dreg = val;
2274                                         if (val != dest_reg) { /* force a copy */
2275                                                 create_copy_ins (cfg, val, dest_reg, ins, FALSE);
2276                                         }
2277                                 } else {
2278                                         DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
2279                                         prev_dreg = ins->dreg;
2280                                         rs->iassign [ins->dreg] = dest_reg;
2281                                         rs->isymbolic [dest_reg] = ins->dreg;
2282                                         ins->dreg = dest_reg;
2283                                         rs->ifree_mask &= ~ (1 << dest_reg);
2284                                 }
2285                         } else {
2286                                 //DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
2287                                 if (val != dest_reg) { /* force a copy */
2288                                         create_copy_ins (cfg, val, dest_reg, ins, FALSE);
2289                                         if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
2290                                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2291                                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2292                                                 mono_regstate_free_int (rs, dest_reg);
2293                                         }
2294                                 }
2295                         }
2296                         if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= 8)) {
2297                                 DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
2298                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg], FALSE);
2299                                 mono_regstate_free_int (rs, clob_reg);
2300                         }
2301                         src1_mask = 1 << AMD64_RAX;
2302                         src2_mask = 1 << AMD64_RCX;
2303                 }
2304                 if (spec [MONO_INST_DEST] == 'l') {
2305                         int hreg;
2306                         val = rs->iassign [ins->dreg];
2307                         /* check special case when dreg have been moved from ecx (clob shift) */
2308                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2309                                 hreg = clob_dreg + 1;
2310                         else
2311                                 hreg = ins->dreg + 1;
2312
2313                         /* base prev_dreg on fixed hreg, handle clob case */
2314                         val = hreg - 1;
2315
2316                         if (val != rs->isymbolic [AMD64_RAX] && !(rs->ifree_mask & (1 << AMD64_RAX))) {
2317                                 DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2318                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2319                                 mono_regstate_free_int (rs, AMD64_RAX);
2320                         }
2321                         if (hreg != rs->isymbolic [AMD64_RDX] && !(rs->ifree_mask & (1 << AMD64_RDX))) {
2322                                 DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [AMD64_RDX]));
2323                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RDX], FALSE);
2324                                 mono_regstate_free_int (rs, AMD64_RDX);
2325                         }
2326                 }
2327
2328                 /*
2329                  * TRACK DREG
2330                  */
2331                 if (spec [MONO_INST_DEST] == 'f') {
2332                         if (use_sse2) {
2333                                 /* Allocate an XMM reg the same way as an int reg */
2334                                 if (reg_is_soft (ins->dreg, TRUE)) {
2335                                         val = rs->fassign [ins->dreg];
2336                                         prev_dreg = ins->dreg;
2337                                         
2338                                         if (val < 0) {
2339                                                 int spill = 0;
2340                                                 if (val < -1) {
2341                                                         /* the register gets spilled after this inst */
2342                                                         spill = -val -1;
2343                                                 }
2344                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->dreg);
2345                                                 rs->fassign [ins->dreg] = val;
2346                                                 if (spill)
2347                                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, TRUE);
2348                                         }
2349                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_amd64_regname (val, TRUE), ins->dreg));
2350                                         rs->fsymbolic [val] = prev_dreg;
2351                                         ins->dreg = val;
2352                                 }
2353                         }
2354                         else if (spec [MONO_INST_CLOB] != 'm') {
2355                                 if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
2356                                         GList *spill_node;
2357                                         MonoInst *store;
2358                                         spill_node = g_list_first (fspill_list);
2359                                         g_assert (spill_node);
2360
2361                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
2362                                         insert_before_ins (ins, tmp, store);
2363                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2364                                         fspill--;
2365                                 }
2366                         }
2367                 } else if (spec [MONO_INST_DEST] == 'L') {
2368                         int hreg;
2369                         val = rs->iassign [ins->dreg];
2370                         /* check special case when dreg have been moved from ecx (clob shift) */
2371                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2372                                 hreg = clob_dreg + 1;
2373                         else
2374                                 hreg = ins->dreg + 1;
2375
2376                         /* base prev_dreg on fixed hreg, handle clob case */
2377                         prev_dreg = hreg - 1;
2378
2379                         if (val < 0) {
2380                                 int spill = 0;
2381                                 if (val < -1) {
2382                                         /* the register gets spilled after this inst */
2383                                         spill = -val -1;
2384                                 }
2385                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2386                                 rs->iassign [ins->dreg] = val;
2387                                 if (spill)
2388                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2389                         }
2390
2391                         DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
2392  
2393                         rs->isymbolic [val] = hreg - 1;
2394                         ins->dreg = val;
2395                         
2396                         val = rs->iassign [hreg];
2397                         if (val < 0) {
2398                                 int spill = 0;
2399                                 if (val < -1) {
2400                                         /* the register gets spilled after this inst */
2401                                         spill = -val -1;
2402                                 }
2403                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2404                                 rs->iassign [hreg] = val;
2405                                 if (spill)
2406                                         create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2407                         }
2408
2409                         DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
2410                         rs->isymbolic [val] = hreg;
2411                         /* save reg allocating into unused */
2412                         ins->unused = val;
2413
2414                         /* check if we can free our long reg */
2415                         if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2416                                 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
2417                                 mono_regstate_free_int (rs, val);
2418                         }
2419                 }
2420                 else if (ins->dreg >= MONO_MAX_IREGS) {
2421                         int hreg;
2422                         val = rs->iassign [ins->dreg];
2423                         if (spec [MONO_INST_DEST] == 'l') {
2424                                 /* check special case when dreg have been moved from ecx (clob shift) */
2425                                 if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2426                                         hreg = clob_dreg + 1;
2427                                 else
2428                                         hreg = ins->dreg + 1;
2429
2430                                 /* base prev_dreg on fixed hreg, handle clob case */
2431                                 prev_dreg = hreg - 1;
2432                         } else
2433                                 prev_dreg = ins->dreg;
2434
2435                         if (val < 0) {
2436                                 int spill = 0;
2437                                 if (val < -1) {
2438                                         /* the register gets spilled after this inst */
2439                                         spill = -val -1;
2440                                 }
2441                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2442                                 rs->iassign [ins->dreg] = val;
2443                                 if (spill)
2444                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2445                         }
2446                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2447                         rs->isymbolic [val] = prev_dreg;
2448                         ins->dreg = val;
2449                         /* handle cases where lreg needs to be eax:edx */
2450                         if (spec [MONO_INST_DEST] == 'l') {
2451                                 /* check special case when dreg have been moved from ecx (clob shift) */
2452                                 int hreg = prev_dreg + 1;
2453                                 val = rs->iassign [hreg];
2454                                 if (val < 0) {
2455                                         int spill = 0;
2456                                         if (val < -1) {
2457                                                 /* the register gets spilled after this inst */
2458                                                 spill = -val -1;
2459                                         }
2460                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2461                                         rs->iassign [hreg] = val;
2462                                         if (spill)
2463                                                 create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2464                                 }
2465                                 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
2466                                 rs->isymbolic [val] = hreg;
2467                                 if (ins->dreg == AMD64_RAX) {
2468                                         if (val != AMD64_RDX)
2469                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2470                                 } else if (ins->dreg == AMD64_RDX) {
2471                                         if (val == AMD64_RAX) {
2472                                                 /* swap */
2473                                                 g_assert_not_reached ();
2474                                         } else {
2475                                                 /* two forced copies */
2476                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2477                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2478                                         }
2479                                 } else {
2480                                         if (val == AMD64_RDX) {
2481                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2482                                         } else {
2483                                                 /* two forced copies */
2484                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2485                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2486                                         }
2487                                 }
2488                                 if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2489                                         DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
2490                                         mono_regstate_free_int (rs, val);
2491                                 }
2492                         } else if (spec [MONO_INST_DEST] == 'a' && ins->dreg != AMD64_RAX && spec [MONO_INST_CLOB] != 'd') {
2493                                 /* this instruction only outputs to EAX, need to copy */
2494                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2495                         } else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != AMD64_RDX && spec [MONO_INST_CLOB] != 'd') {
2496                                 create_copy_ins (cfg, ins->dreg, AMD64_RDX, ins, FALSE);
2497                         }
2498                 }
2499
2500                 if (use_sse2 && spec [MONO_INST_DEST] == 'f' && reg_is_freeable (ins->dreg, TRUE) && prev_dreg >= 0 && reginfof [prev_dreg].born_in >= i) {
2501                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_fregname (ins->dreg), prev_dreg, reginfof [prev_dreg].born_in));
2502                         mono_regstate_free_float (rs, ins->dreg);
2503                 }
2504                 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg, FALSE) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
2505                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2506                         mono_regstate_free_int (rs, ins->dreg);
2507                 }
2508
2509                 /* put src1 in EAX if it needs to be */
2510                 if (spec [MONO_INST_SRC1] == 'a') {
2511                         if (!(rs->ifree_mask & (1 << AMD64_RAX))) {
2512                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2513                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2514                                 mono_regstate_free_int (rs, AMD64_RAX);
2515                         }
2516                         if (ins->sreg1 < MONO_MAX_IREGS) {
2517                                 /* The argument is already in a hard reg, need to copy */
2518                                 MonoInst *copy = create_copy_ins (cfg, AMD64_RAX, ins->sreg1, NULL, FALSE);
2519                                 insert_before_ins (ins, tmp, copy);
2520                         }
2521                         /* force-set sreg1 */
2522                         rs->iassign [ins->sreg1] = AMD64_RAX;
2523                         rs->isymbolic [AMD64_RAX] = ins->sreg1;
2524                         ins->sreg1 = AMD64_RAX;
2525                         rs->ifree_mask &= ~ (1 << AMD64_RAX);
2526                 }
2527
2528                 /*
2529                  * TRACK SREG1
2530                  */
2531                 if (spec [MONO_INST_SRC1] == 'f') {
2532                         if (use_sse2) {
2533                                 if (reg_is_soft (ins->sreg1, TRUE)) {
2534                                         val = rs->fassign [ins->sreg1];
2535                                         prev_sreg1 = ins->sreg1;
2536                                         if (val < 0) {
2537                                                 int spill = 0;
2538                                                 if (val < -1) {
2539                                                         /* the register gets spilled after this inst */
2540                                                         spill = -val -1;
2541                                                 }
2542                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg1);
2543                                                 rs->fassign [ins->sreg1] = val;
2544                                                 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_fregname (val), ins->sreg1));
2545                                                 if (spill) {
2546                                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, TRUE);
2547                                                         insert_before_ins (ins, tmp, store);
2548                                                 }
2549                                         }
2550                                         rs->fsymbolic [val] = prev_sreg1;
2551                                         ins->sreg1 = val;
2552                                 } else {
2553                                         prev_sreg1 = -1;
2554                                 }
2555                         }
2556                         else
2557                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
2558                                 MonoInst *load;
2559                                 MonoInst *store = NULL;
2560
2561                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2562                                         GList *spill_node;
2563                                         spill_node = g_list_first (fspill_list);
2564                                         g_assert (spill_node);
2565
2566                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);          
2567                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2568                                 }
2569
2570                                 fspill++;
2571                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2572                                 load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
2573                                 insert_before_ins (ins, tmp, load);
2574                                 if (store) 
2575                                         insert_before_ins (load, tmp, store);
2576                         }
2577                 } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
2578                         /* force source to be same as dest */
2579                         rs->iassign [ins->sreg1] = ins->dreg;
2580                         rs->iassign [ins->sreg1 + 1] = ins->unused;
2581
2582                         DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
2583                         DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
2584
2585                         ins->sreg1 = ins->dreg;
2586                         /* 
2587                          * No need for saving the reg, we know that src1=dest in this cases
2588                          * ins->inst_c0 = ins->unused;
2589                          */
2590
2591                         /* make sure that we remove them from free mask */
2592                         rs->ifree_mask &= ~ (1 << ins->dreg);
2593                         rs->ifree_mask &= ~ (1 << ins->unused);
2594                 }
2595                 else if (ins->sreg1 >= MONO_MAX_IREGS) {
2596                         val = rs->iassign [ins->sreg1];
2597                         prev_sreg1 = ins->sreg1;
2598                         if (val < 0) {
2599                                 int spill = 0;
2600                                 if (val < -1) {
2601                                         /* the register gets spilled after this inst */
2602                                         spill = -val -1;
2603                                 }
2604                                 if (0 && (ins->opcode == OP_MOVE)) {
2605                                         /* 
2606                                          * small optimization: the dest register is already allocated
2607                                          * but the src one is not: we can simply assign the same register
2608                                          * here and peephole will get rid of the instruction later.
2609                                          * This optimization may interfere with the clobbering handling:
2610                                          * it removes a mov operation that will be added again to handle clobbering.
2611                                          * There are also some other issues that should with make testjit.
2612                                          */
2613                                         mono_regstate_alloc_int (rs, 1 << ins->dreg);
2614                                         val = rs->iassign [ins->sreg1] = ins->dreg;
2615                                         //g_assert (val >= 0);
2616                                         DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2617                                 } else {
2618                                         //g_assert (val == -1); /* source cannot be spilled */
2619                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
2620                                         rs->iassign [ins->sreg1] = val;
2621                                         DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2622                                 }
2623                                 if (spill) {
2624                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, FALSE);
2625                                         insert_before_ins (ins, tmp, store);
2626                                 }
2627                         }
2628                         rs->isymbolic [val] = prev_sreg1;
2629                         ins->sreg1 = val;
2630                 } else {
2631                         prev_sreg1 = -1;
2632                 }
2633
2634                 /* handle clobbering of sreg1 */
2635                 if (((spec [MONO_INST_DEST] == 'f' && spec [MONO_INST_SRC1] == 'f' && use_sse2) || spec [MONO_INST_CLOB] == '1' || spec [MONO_INST_CLOB] == 's') && ins->dreg != ins->sreg1) {
2636                         MonoInst *sreg2_copy = NULL;
2637
2638                         gboolean fp = (spec [MONO_INST_SRC1] == 'f');
2639
2640                         if (ins->dreg == ins->sreg2) {
2641                                 /* 
2642                                  * copying sreg1 to dreg could clobber sreg2, so allocate a new
2643                                  * register for it.
2644                                  */
2645                                 int reg2 = 0;
2646
2647                                 if (fp)
2648                                         reg2 = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2649                                 else
2650                                         reg2 = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->sreg2, 0);
2651
2652                                 DEBUG (g_print ("\tneed to copy sreg2 %s to reg %s\n", mono_amd64_regname (ins->sreg2, fp), mono_amd64_regname (reg2, fp)));
2653                                 sreg2_copy = create_copy_ins (cfg, reg2, ins->sreg2, NULL, fp);
2654                                 prev_sreg2 = ins->sreg2 = reg2;
2655                         }
2656
2657                         MonoInst *copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL, fp);
2658                         DEBUG (g_print ("\tneed to copy sreg1 %s to dreg %s\n", mono_amd64_regname (ins->sreg1, fp), mono_amd64_regname (ins->dreg, fp)));
2659                         insert_before_ins (ins, tmp, copy);
2660
2661                         if (sreg2_copy)
2662                                 insert_before_ins (copy, tmp, sreg2_copy);
2663
2664                         /* we set sreg1 to dest as well */
2665                         prev_sreg1 = ins->sreg1 = ins->dreg;
2666                         src2_mask &= ~ (1 << ins->dreg);
2667                 }
2668
2669                 /*
2670                  * TRACK SREG2
2671                  */
2672                 if (spec [MONO_INST_SRC2] == 'f') {
2673                         if (use_sse2) {
2674                                 if (reg_is_soft (ins->sreg2, TRUE)) {
2675                                         val = rs->fassign [ins->sreg2];
2676                                         prev_sreg2 = ins->sreg2;
2677                                         if (val < 0) {
2678                                                 int spill = 0;
2679                                                 if (val < -1) {
2680                                                         /* the register gets spilled after this inst */
2681                                                         spill = -val -1;
2682                                                 }
2683                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2684                                                 rs->fassign [ins->sreg2] = val;
2685                                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_fregname (val), ins->sreg2));
2686                                                 if (spill)
2687                                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, TRUE);
2688                                         }
2689                                         rs->fsymbolic [val] = prev_sreg2;
2690                                         ins->sreg2 = val;
2691                                 } else {
2692                                         prev_sreg2 = -1;
2693                                 }
2694                         }
2695                         else
2696                         if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
2697                                 MonoInst *load;
2698                                 MonoInst *store = NULL;
2699
2700                                 if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2701                                         GList *spill_node;
2702
2703                                         spill_node = g_list_first (fspill_list);
2704                                         g_assert (spill_node);
2705                                         if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
2706                                                 spill_node = g_list_next (spill_node);
2707         
2708                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
2709                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2710                                 } 
2711                                 
2712                                 fspill++;
2713                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2714                                 load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
2715                                 insert_before_ins (ins, tmp, load);
2716                                 if (store) 
2717                                         insert_before_ins (load, tmp, store);
2718                         }
2719                 } 
2720                 else if (ins->sreg2 >= MONO_MAX_IREGS) {
2721                         val = rs->iassign [ins->sreg2];
2722                         prev_sreg2 = ins->sreg2;
2723                         if (val < 0) {
2724                                 int spill = 0;
2725                                 if (val < -1) {
2726                                         /* the register gets spilled after this inst */
2727                                         spill = -val -1;
2728                                 }
2729                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
2730                                 rs->iassign [ins->sreg2] = val;
2731                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2732                                 if (spill)
2733                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, FALSE);
2734                         }
2735                         rs->isymbolic [val] = prev_sreg2;
2736                         ins->sreg2 = val;
2737                         if (spec [MONO_INST_CLOB] == 's' && ins->sreg2 != AMD64_RCX) {
2738                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d, but ECX is needed (R%d)\n", mono_arch_regname (val), ins->sreg2, rs->iassign [AMD64_RCX]));
2739                         }
2740                 } else {
2741                         prev_sreg2 = -1;
2742                 }
2743
2744                 if (spec [MONO_INST_CLOB] == 'c') {
2745                         int j, s;
2746                         MonoCallInst *call = (MonoCallInst*)ins;
2747                         GSList *list;
2748                         guint32 clob_mask = AMD64_CALLEE_REGS;
2749
2750                         for (j = 0; j < MONO_MAX_IREGS; ++j) {
2751                                 s = 1 << j;
2752                                 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2753                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [j], FALSE);
2754                                         mono_regstate_free_int (rs, j);
2755                                         //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2756                                 }
2757                         }
2758
2759                         if (use_sse2) {
2760                                 clob_mask = AMD64_CALLEE_FREGS;
2761
2762                                 for (j = 0; j < MONO_MAX_FREGS; ++j) {
2763                                         s = 1 << j;
2764                                         if ((clob_mask & s) && !(rs->ffree_mask & s) && j != ins->sreg1) {
2765                                                 get_register_force_spilling (cfg, tmp, ins, rs->fsymbolic [j], TRUE);
2766                                                 mono_regstate_free_float (rs, j);
2767                                                 //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2768                                         }
2769                                 }
2770                         }
2771
2772                         /* 
2773                          * Assign all registers in call->out_reg_args to the proper 
2774                          * argument registers.
2775                          */
2776
2777                         list = call->out_ireg_args;
2778                         if (list) {
2779                                 while (list) {
2780                                         guint64 regpair;
2781                                         int reg, hreg;
2782
2783                                         regpair = (guint64) (list->data);
2784                                         hreg = regpair >> 32;
2785                                         reg = regpair & 0xffffffff;
2786
2787                                         rs->iassign [reg] = hreg;
2788                                         rs->isymbolic [hreg] = reg;
2789                                         rs->ifree_mask &= ~ (1 << hreg);
2790
2791                                         list = g_slist_next (list);
2792                                 }
2793                                 g_slist_free (call->out_ireg_args);
2794                         }
2795
2796                         list = call->out_freg_args;
2797                         if (list && use_sse2) {
2798                                 while (list) {
2799                                         guint64 regpair;
2800                                         int reg, hreg;
2801
2802                                         regpair = (guint64) (list->data);
2803                                         hreg = regpair >> 32;
2804                                         reg = regpair & 0xffffffff;
2805
2806                                         rs->fassign [reg] = hreg;
2807                                         rs->fsymbolic [hreg] = reg;
2808                                         rs->ffree_mask &= ~ (1 << hreg);
2809
2810                                         list = g_slist_next (list);
2811                                 }
2812                         }
2813                         if (call->out_freg_args)
2814                                 g_slist_free (call->out_freg_args);
2815                 }
2816
2817                 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2818                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2819                         mono_regstate_free_int (rs, ins->sreg1);
2820                 }
2821                 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2822                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2823                         mono_regstate_free_int (rs, ins->sreg2);
2824                 }*/
2825         
2826                 DEBUG (print_ins (i, ins));
2827                 /* this may result from a insert_before call */
2828                 if (!tmp->next)
2829                         bb->code = tmp->data;
2830                 tmp = tmp->next;
2831         }
2832
2833         g_free (reginfo);
2834         g_free (reginfof);
2835         g_list_free (fspill_list);
2836 }
2837
2838 static unsigned char*
2839 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2840 {
2841         if (use_sse2) {
2842                 amd64_sse_cvtsd2si_reg_reg (code, dreg, sreg);
2843         }
2844         else {
2845                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
2846                 x86_fnstcw_membase(code, AMD64_RSP, 0);
2847                 amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
2848                 amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
2849                 amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
2850                 amd64_fldcw_membase (code, AMD64_RSP, 2);
2851                 amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
2852                 amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
2853                 amd64_pop_reg (code, dreg);
2854                 amd64_fldcw_membase (code, AMD64_RSP, 0);
2855                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
2856         }
2857
2858         if (size == 1)
2859                 amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
2860         else if (size == 2)
2861                 amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
2862         return code;
2863 }
2864
2865 static unsigned char*
2866 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
2867 {
2868         int sreg = tree->sreg1;
2869 #ifdef PLATFORM_WIN32
2870         guint8* br[5];
2871
2872         NOT_IMPLEMENTED;
2873
2874         /*
2875          * Under Windows:
2876          * If requested stack size is larger than one page,
2877          * perform stack-touch operation
2878          */
2879         /*
2880          * Generate stack probe code.
2881          * Under Windows, it is necessary to allocate one page at a time,
2882          * "touching" stack after each successful sub-allocation. This is
2883          * because of the way stack growth is implemented - there is a
2884          * guard page before the lowest stack page that is currently commited.
2885          * Stack normally grows sequentially so OS traps access to the
2886          * guard page and commits more pages when needed.
2887          */
2888         amd64_test_reg_imm (code, sreg, ~0xFFF);
2889         br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
2890
2891         br[2] = code; /* loop */
2892         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
2893         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
2894         amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
2895         amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
2896         br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
2897         amd64_patch (br[3], br[2]);
2898         amd64_test_reg_reg (code, sreg, sreg);
2899         br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
2900         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
2901
2902         br[1] = code; x86_jump8 (code, 0);
2903
2904         amd64_patch (br[0], code);
2905         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
2906         amd64_patch (br[1], code);
2907         amd64_patch (br[4], code);
2908 #else /* PLATFORM_WIN32 */
2909         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
2910 #endif
2911         if (tree->flags & MONO_INST_INIT) {
2912                 int offset = 0;
2913                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
2914                         amd64_push_reg (code, AMD64_RAX);
2915                         offset += 8;
2916                 }
2917                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
2918                         amd64_push_reg (code, AMD64_RCX);
2919                         offset += 8;
2920                 }
2921                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
2922                         amd64_push_reg (code, AMD64_RDI);
2923                         offset += 8;
2924                 }
2925                 
2926                 amd64_shift_reg_imm (code, X86_SHR, sreg, 4);
2927                 if (sreg != AMD64_RCX)
2928                         amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
2929                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
2930                                 
2931                 amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
2932                 amd64_cld (code);
2933                 amd64_prefix (code, X86_REP_PREFIX);
2934                 amd64_stosl (code);
2935                 
2936                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
2937                         amd64_pop_reg (code, AMD64_RDI);
2938                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
2939                         amd64_pop_reg (code, AMD64_RCX);
2940                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
2941                         amd64_pop_reg (code, AMD64_RAX);
2942         }
2943         return code;
2944 }
2945
2946 static guint8*
2947 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
2948 {
2949         CallInfo *cinfo;
2950         guint32 offset, quad;
2951
2952         /* Move return value to the target register */
2953         /* FIXME: do this in the local reg allocator */
2954         switch (ins->opcode) {
2955         case CEE_CALL:
2956         case OP_CALL_REG:
2957         case OP_CALL_MEMBASE:
2958         case OP_LCALL:
2959         case OP_LCALL_REG:
2960         case OP_LCALL_MEMBASE:
2961                 if (ins->dreg != AMD64_RAX)
2962                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, 8);
2963                 break;
2964         case OP_FCALL:
2965         case OP_FCALL_REG:
2966         case OP_FCALL_MEMBASE:
2967                 /* FIXME: optimize this */
2968                 offset = mono_spillvar_offset_float (cfg, 0);
2969                 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2970                         if (use_sse2)
2971                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
2972                         else {
2973                                 amd64_movss_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
2974                                 amd64_fld_membase (code, AMD64_RBP, offset, FALSE);
2975                         }
2976                 }
2977                 else {
2978                         if (use_sse2) {
2979                                 if (ins->dreg != AMD64_XMM0)
2980                                         amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
2981                         }
2982                         else {
2983                                 amd64_movsd_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
2984                                 amd64_fld_membase (code, AMD64_RBP, offset, TRUE);
2985                         }
2986                 }
2987                 break;
2988         case OP_VCALL:
2989         case OP_VCALL_REG:
2990         case OP_VCALL_MEMBASE:
2991                 cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
2992                 if (cinfo->ret.storage == ArgValuetypeInReg) {
2993                         /* Pop the destination address from the stack */
2994                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
2995                         amd64_pop_reg (code, AMD64_RCX);
2996                         
2997                         for (quad = 0; quad < 2; quad ++) {
2998                                 switch (cinfo->ret.pair_storage [quad]) {
2999                                 case ArgInIReg:
3000                                         amd64_mov_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad], 8);
3001                                         break;
3002                                 case ArgInFloatSSEReg:
3003                                         amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3004                                         break;
3005                                 case ArgInDoubleSSEReg:
3006                                         amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3007                                         break;
3008                                 case ArgNone:
3009                                         break;
3010                                 default:
3011                                         NOT_IMPLEMENTED;
3012                                 }
3013                         }
3014                 }
3015                 break;
3016         }
3017
3018         return code;
3019 }
3020
3021 /*
3022  * emit_load_volatile_arguments:
3023  *
3024  *  Load volatile arguments from the stack to the original input registers.
3025  * Required before a tail call.
3026  */
3027 static guint8*
3028 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3029 {
3030         MonoMethod *method = cfg->method;
3031         MonoMethodSignature *sig;
3032         MonoInst *inst;
3033         CallInfo *cinfo;
3034         guint32 i;
3035
3036         /* FIXME: Generate intermediate code instead */
3037
3038         sig = method->signature;
3039
3040         cinfo = get_call_info (sig, FALSE);
3041         
3042         /* This is the opposite of the code in emit_prolog */
3043
3044         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3045                 ArgInfo *ainfo = cinfo->args + i;
3046                 MonoType *arg_type;
3047                 inst = cfg->varinfo [i];
3048
3049                 if (sig->hasthis && (i == 0))
3050                         arg_type = &mono_defaults.object_class->byval_arg;
3051                 else
3052                         arg_type = sig->params [i - sig->hasthis];
3053
3054                 if (inst->opcode != OP_REGVAR) {
3055                         switch (ainfo->storage) {
3056                         case ArgInIReg: {
3057                                 guint32 size = 8;
3058
3059                                 /* FIXME: I1 etc */
3060                                 amd64_mov_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset, size);
3061                                 break;
3062                         }
3063                         case ArgInFloatSSEReg:
3064                                 amd64_movss_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3065                                 break;
3066                         case ArgInDoubleSSEReg:
3067                                 amd64_movsd_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3068                                 break;
3069                         default:
3070                                 break;
3071                         }
3072                 }
3073         }
3074
3075         g_free (cinfo);
3076
3077         return code;
3078 }
3079
3080 #define REAL_PRINT_REG(text,reg) \
3081 mono_assert (reg >= 0); \
3082 amd64_push_reg (code, AMD64_RAX); \
3083 amd64_push_reg (code, AMD64_RDX); \
3084 amd64_push_reg (code, AMD64_RCX); \
3085 amd64_push_reg (code, reg); \
3086 amd64_push_imm (code, reg); \
3087 amd64_push_imm (code, text " %d %p\n"); \
3088 amd64_mov_reg_imm (code, AMD64_RAX, printf); \
3089 amd64_call_reg (code, AMD64_RAX); \
3090 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 3*4); \
3091 amd64_pop_reg (code, AMD64_RCX); \
3092 amd64_pop_reg (code, AMD64_RDX); \
3093 amd64_pop_reg (code, AMD64_RAX);
3094
3095 /* benchmark and set based on cpu */
3096 #define LOOP_ALIGNMENT 8
3097 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
3098
3099 void
3100 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3101 {
3102         MonoInst *ins;
3103         MonoCallInst *call;
3104         guint offset;
3105         guint8 *code = cfg->native_code + cfg->code_len;
3106         MonoInst *last_ins = NULL;
3107         guint last_offset = 0;
3108         int max_len, cpos;
3109
3110         if (cfg->opt & MONO_OPT_PEEPHOLE)
3111                 peephole_pass (cfg, bb);
3112
3113         if (cfg->opt & MONO_OPT_LOOP) {
3114                 int pad, align = LOOP_ALIGNMENT;
3115                 /* set alignment depending on cpu */
3116                 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
3117                         pad = align - pad;
3118                         /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
3119                         amd64_padding (code, pad);
3120                         cfg->code_len += pad;
3121                         bb->native_offset = cfg->code_len;
3122                 }
3123         }
3124
3125         if (cfg->verbose_level > 2)
3126                 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3127
3128         cpos = bb->max_offset;
3129
3130         if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3131                 MonoProfileCoverageInfo *cov = cfg->coverage_info;
3132                 g_assert (!mono_compile_aot);
3133                 cpos += 6;
3134
3135                 cov->data [bb->dfn].cil_code = bb->cil_code;
3136                 /* this is not thread save, but good enough */
3137                 amd64_inc_mem (code, (guint64)&cov->data [bb->dfn].count); 
3138         }
3139
3140         offset = code - cfg->native_code;
3141
3142         ins = bb->code;
3143         while (ins) {
3144                 offset = code - cfg->native_code;
3145
3146                 max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
3147
3148                 if (offset > (cfg->code_size - max_len - 16)) {
3149                         cfg->code_size *= 2;
3150                         cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3151                         code = cfg->native_code + offset;
3152                         mono_jit_stats.code_reallocs++;
3153                 }
3154
3155                 mono_debug_record_line_number (cfg, ins, offset);
3156
3157                 switch (ins->opcode) {
3158                 case OP_BIGMUL:
3159                         amd64_mul_reg (code, ins->sreg2, TRUE);
3160                         break;
3161                 case OP_BIGMUL_UN:
3162                         amd64_mul_reg (code, ins->sreg2, FALSE);
3163                         break;
3164                 case OP_X86_SETEQ_MEMBASE:
3165                         amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
3166                         break;
3167                 case OP_STOREI1_MEMBASE_IMM:
3168                         g_assert (amd64_is_imm32 (ins->inst_imm));
3169                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
3170                         break;
3171                 case OP_STOREI2_MEMBASE_IMM:
3172                         g_assert (amd64_is_imm32 (ins->inst_imm));
3173                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
3174                         break;
3175                 case OP_STOREI4_MEMBASE_IMM:
3176                         g_assert (amd64_is_imm32 (ins->inst_imm));
3177                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
3178                         break;
3179                 case OP_STOREI1_MEMBASE_REG:
3180                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
3181                         break;
3182                 case OP_STOREI2_MEMBASE_REG:
3183                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
3184                         break;
3185                 case OP_STORE_MEMBASE_REG:
3186                 case OP_STOREI8_MEMBASE_REG:
3187                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
3188                         break;
3189                 case OP_STOREI4_MEMBASE_REG:
3190                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
3191                         break;
3192                 case OP_STORE_MEMBASE_IMM:
3193                 case OP_STOREI8_MEMBASE_IMM:
3194                         if (amd64_is_imm32 (ins->inst_imm))
3195                                 amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
3196                         else {
3197                                 amd64_mov_reg_imm (code, GP_SCRATCH_REG, ins->inst_imm);
3198                                 amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, GP_SCRATCH_REG, 8);
3199                         }
3200                         break;
3201                 case CEE_LDIND_I:
3202                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, sizeof (gpointer));
3203                         break;
3204                 case CEE_LDIND_I4:
3205                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3206                         break;
3207                 case CEE_LDIND_U4:
3208                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3209                         break;
3210                 case OP_LOADU4_MEM:
3211                         amd64_mov_reg_imm (code, ins->dreg, ins->inst_p0);
3212                         amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
3213                         break;
3214                 case OP_LOAD_MEMBASE:
3215                 case OP_LOADI8_MEMBASE:
3216                         if (amd64_is_imm32 (ins->inst_offset)) {
3217                                 amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
3218                         }
3219                         else {
3220                                 amd64_mov_reg_imm_size (code, GP_SCRATCH_REG, ins->inst_offset, 8);
3221                                 amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, GP_SCRATCH_REG, 0, 8);
3222                         }
3223                         break;
3224                 case OP_LOADI4_MEMBASE:
3225                         amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3226                         break;
3227                 case OP_LOADU4_MEMBASE:
3228                         amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
3229                         break;
3230                 case OP_LOADU1_MEMBASE:
3231                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
3232                         break;
3233                 case OP_LOADI1_MEMBASE:
3234                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
3235                         break;
3236                 case OP_LOADU2_MEMBASE:
3237                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
3238                         break;
3239                 case OP_LOADI2_MEMBASE:
3240                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
3241                         break;
3242                 case CEE_CONV_I1:
3243                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3244                         break;
3245                 case CEE_CONV_I2:
3246                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3247                         break;
3248                 case CEE_CONV_U1:
3249                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
3250                         break;
3251                 case CEE_CONV_U2:
3252                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
3253                         break;
3254                 case CEE_CONV_U8:
3255                 case CEE_CONV_U:
3256                         /* Clean out the upper word */
3257                         amd64_mov_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
3258                         break;
3259                 case CEE_CONV_I8:
3260                 case CEE_CONV_I:
3261                         amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
3262                         break;                  
3263                 case OP_COMPARE:
3264                 case OP_LCOMPARE:
3265                         amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3266                         break;
3267                 case OP_COMPARE_IMM:
3268                         if (!amd64_is_imm32 (ins->inst_imm)) {
3269                                 amd64_mov_reg_imm (code, AMD64_R11, ins->inst_imm);
3270                                 amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, AMD64_R11);
3271                         } else {
3272                                 amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
3273                         }
3274                         break;
3275                 case OP_X86_COMPARE_MEMBASE_REG:
3276                         amd64_alu_membase_reg (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2);
3277                         break;
3278                 case OP_X86_COMPARE_MEMBASE_IMM:
3279                         g_assert (amd64_is_imm32 (ins->inst_imm));
3280                         amd64_alu_membase_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
3281                         break;
3282                 case OP_X86_COMPARE_REG_MEMBASE:
3283                         amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
3284                         break;
3285                 case OP_X86_TEST_NULL:
3286                         amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4);
3287                         break;
3288                 case OP_AMD64_TEST_NULL:
3289                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
3290                         break;
3291                 case OP_X86_ADD_MEMBASE_IMM:
3292                         /* FIXME: Make a 64 version too */
3293                         amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3294                         break;
3295                 case OP_X86_ADD_MEMBASE:
3296                         amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3297                         break;
3298                 case OP_X86_SUB_MEMBASE_IMM:
3299                         g_assert (amd64_is_imm32 (ins->inst_imm));
3300                         amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3301                         break;
3302                 case OP_X86_SUB_MEMBASE:
3303                         amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3304                         break;
3305                 case OP_X86_INC_MEMBASE:
3306                         amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3307                         break;
3308                 case OP_X86_INC_REG:
3309                         amd64_inc_reg_size (code, ins->dreg, 4);
3310                         break;
3311                 case OP_X86_DEC_MEMBASE:
3312                         amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3313                         break;
3314                 case OP_X86_DEC_REG:
3315                         amd64_dec_reg_size (code, ins->dreg, 4);
3316                         break;
3317                 case OP_X86_MUL_MEMBASE:
3318                         amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3319                         break;
3320                 case OP_AMD64_ICOMPARE_MEMBASE_REG:
3321                         amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
3322                         break;
3323                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
3324                         amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3325                         break;
3326                 case OP_AMD64_ICOMPARE_REG_MEMBASE:
3327                         amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3328                         break;
3329                 case CEE_BREAK:
3330                         amd64_breakpoint (code);
3331                         break;
3332
3333                 case OP_ADDCC:
3334                 case CEE_ADD:
3335                         amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
3336                         break;
3337                 case OP_ADC:
3338                         amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
3339                         break;
3340                 case OP_ADD_IMM:
3341                         g_assert (amd64_is_imm32 (ins->inst_imm));
3342                         amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
3343                         break;
3344                 case OP_ADC_IMM:
3345                         g_assert (amd64_is_imm32 (ins->inst_imm));
3346                         amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
3347                         break;
3348                 case OP_SUBCC:
3349                 case CEE_SUB:
3350                         amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
3351                         break;
3352                 case OP_SBB:
3353                         amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
3354                         break;
3355                 case OP_SUB_IMM:
3356                         g_assert (amd64_is_imm32 (ins->inst_imm));
3357                         amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
3358                         break;
3359                 case OP_SBB_IMM:
3360                         g_assert (amd64_is_imm32 (ins->inst_imm));
3361                         amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
3362                         break;
3363                 case CEE_AND:
3364                         amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
3365                         break;
3366                 case OP_AND_IMM:
3367                         g_assert (amd64_is_imm32 (ins->inst_imm));
3368                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
3369                         break;
3370                 case CEE_MUL:
3371                         amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
3372                         break;
3373                 case OP_MUL_IMM:
3374                         amd64_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
3375                         break;
3376                 case CEE_DIV:
3377                         amd64_cdq (code);
3378                         amd64_div_reg (code, ins->sreg2, TRUE);
3379                         break;
3380                 case CEE_DIV_UN:
3381                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3382                         amd64_div_reg (code, ins->sreg2, FALSE);
3383                         break;
3384                 case OP_DIV_IMM:
3385                         g_assert (amd64_is_imm32 (ins->inst_imm));
3386                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3387                         amd64_cdq (code);
3388                         amd64_div_reg (code, ins->sreg2, TRUE);
3389                         break;
3390                 case CEE_REM:
3391                         amd64_cdq (code);
3392                         amd64_div_reg (code, ins->sreg2, TRUE);
3393                         break;
3394                 case CEE_REM_UN:
3395                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3396                         amd64_div_reg (code, ins->sreg2, FALSE);
3397                         break;
3398                 case OP_REM_IMM:
3399                         g_assert (amd64_is_imm32 (ins->inst_imm));
3400                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3401                         amd64_cdq (code);
3402                         amd64_div_reg (code, ins->sreg2, TRUE);
3403                         break;
3404                 case CEE_OR:
3405                         amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
3406                         break;
3407                 case OP_OR_IMM
3408 :                       g_assert (amd64_is_imm32 (ins->inst_imm));
3409                         amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
3410                         break;
3411                 case CEE_XOR:
3412                         amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
3413                         break;
3414                 case OP_XOR_IMM:
3415                         g_assert (amd64_is_imm32 (ins->inst_imm));
3416                         amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
3417                         break;
3418                 case CEE_SHL:
3419                 case OP_LSHL:
3420                         g_assert (ins->sreg2 == AMD64_RCX);
3421                         amd64_shift_reg (code, X86_SHL, ins->dreg);
3422                         break;
3423                 case CEE_SHR:
3424                 case OP_LSHR:
3425                         g_assert (ins->sreg2 == AMD64_RCX);
3426                         amd64_shift_reg (code, X86_SAR, ins->dreg);
3427                         break;
3428                 case OP_SHR_IMM:
3429                         g_assert (amd64_is_imm32 (ins->inst_imm));
3430                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3431                         break;
3432                 case OP_LSHR_IMM:
3433                         g_assert (amd64_is_imm32 (ins->inst_imm));
3434                         amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
3435                         break;
3436                 case OP_SHR_UN_IMM:
3437                         g_assert (amd64_is_imm32 (ins->inst_imm));
3438                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3439                         break;
3440                 case OP_LSHR_UN_IMM:
3441                         g_assert (amd64_is_imm32 (ins->inst_imm));
3442                         amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
3443                         break;
3444                 case CEE_SHR_UN:
3445                         g_assert (ins->sreg2 == AMD64_RCX);
3446                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3447                         break;
3448                 case OP_LSHR_UN:
3449                         g_assert (ins->sreg2 == AMD64_RCX);
3450                         amd64_shift_reg (code, X86_SHR, ins->dreg);
3451                         break;
3452                 case OP_SHL_IMM:
3453                         g_assert (amd64_is_imm32 (ins->inst_imm));
3454                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3455                         break;
3456                 case OP_LSHL_IMM:
3457                         g_assert (amd64_is_imm32 (ins->inst_imm));
3458                         amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
3459                         break;
3460
3461                 case OP_IADDCC:
3462                 case OP_IADD:
3463                         amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4);
3464                         break;
3465                 case OP_IADC:
3466                         amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4);
3467                         break;
3468                 case OP_IADD_IMM:
3469                         amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4);
3470                         break;
3471                 case OP_IADC_IMM:
3472                         amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4);
3473                         break;
3474                 case OP_ISUBCC:
3475                 case OP_ISUB:
3476                         amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4);
3477                         break;
3478                 case OP_ISBB:
3479                         amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4);
3480                         break;
3481                 case OP_ISUB_IMM:
3482                         amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4);
3483                         break;
3484                 case OP_ISBB_IMM:
3485                         amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4);
3486                         break;
3487                 case OP_IAND:
3488                         amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4);
3489                         break;
3490                 case OP_IAND_IMM:
3491                         amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4);
3492                         break;
3493                 case OP_IOR:
3494                         amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4);
3495                         break;
3496                 case OP_IOR_IMM:
3497                         amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4);
3498                         break;
3499                 case OP_IXOR:
3500                         amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4);
3501                         break;
3502                 case OP_IXOR_IMM:
3503                         amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4);
3504                         break;
3505                 case OP_INEG:
3506                         amd64_neg_reg_size (code, ins->sreg1, 4);
3507                         break;
3508                 case OP_INOT:
3509                         amd64_not_reg_size (code, ins->sreg1, 4);
3510                         break;
3511                 case OP_ISHL:
3512                         g_assert (ins->sreg2 == AMD64_RCX);
3513                         amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4);
3514                         break;
3515                 case OP_ISHR:
3516                         g_assert (ins->sreg2 == AMD64_RCX);
3517                         amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4);
3518                         break;
3519                 case OP_ISHR_IMM:
3520                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3521                         break;
3522                 case OP_ISHR_UN_IMM:
3523                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3524                         break;
3525                 case OP_ISHR_UN:
3526                         g_assert (ins->sreg2 == AMD64_RCX);
3527                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3528                         break;
3529                 case OP_ISHL_IMM:
3530                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3531                         break;
3532                 case OP_IMUL:
3533                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3534                         break;
3535                 case OP_IMUL_IMM:
3536                         amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, 4);
3537                         break;
3538                 case OP_IMUL_OVF:
3539                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3540                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3541                         break;
3542                 case OP_IMUL_OVF_UN: {
3543                         /* the mul operation and the exception check should most likely be split */
3544                         int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
3545                         /*g_assert (ins->sreg2 == X86_EAX);
3546                         g_assert (ins->dreg == X86_EAX);*/
3547                         if (ins->sreg2 == X86_EAX) {
3548                                 non_eax_reg = ins->sreg1;
3549                         } else if (ins->sreg1 == X86_EAX) {
3550                                 non_eax_reg = ins->sreg2;
3551                         } else {
3552                                 /* no need to save since we're going to store to it anyway */
3553                                 if (ins->dreg != X86_EAX) {
3554                                         saved_eax = TRUE;
3555                                         amd64_push_reg (code, X86_EAX);
3556                                 }
3557                                 amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
3558                                 non_eax_reg = ins->sreg2;
3559                         }
3560                         if (ins->dreg == X86_EDX) {
3561                                 if (!saved_eax) {
3562                                         saved_eax = TRUE;
3563                                         amd64_push_reg (code, X86_EAX);
3564                                 }
3565                         } else if (ins->dreg != X86_EAX) {
3566                                 saved_edx = TRUE;
3567                                 amd64_push_reg (code, X86_EDX);
3568                         }
3569                         amd64_mul_reg_size (code, non_eax_reg, FALSE, 4);
3570                         /* save before the check since pop and mov don't change the flags */
3571                         if (ins->dreg != X86_EAX)
3572                                 amd64_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
3573                         if (saved_edx)
3574                                 amd64_pop_reg (code, X86_EDX);
3575                         if (saved_eax)
3576                                 amd64_pop_reg (code, X86_EAX);
3577                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3578                         break;
3579                 }
3580                 case OP_IDIV:
3581                         amd64_cdq_size (code, 4);
3582                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3583                         break;
3584                 case OP_IDIV_UN:
3585                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3586                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
3587                         break;
3588                 case OP_IDIV_IMM:
3589                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3590                         amd64_cdq_size (code, 4);
3591                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3592                         break;
3593                 case OP_IREM:
3594                         amd64_cdq_size (code, 4);
3595                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3596                         break;
3597                 case OP_IREM_UN:
3598                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3599                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
3600                         break;
3601                 case OP_IREM_IMM:
3602                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3603                         amd64_cdq_size (code, 4);
3604                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3605                         break;
3606
3607                 case OP_ICOMPARE:
3608                         amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
3609                         break;
3610                 case OP_ICOMPARE_IMM:
3611                         amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
3612                         break;
3613
3614                 case OP_IBEQ:
3615                 case OP_IBLT:
3616                 case OP_IBGT:
3617                 case OP_IBGE:
3618                 case OP_IBLE:
3619                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), TRUE);
3620                         break;
3621                 case OP_IBNE_UN:
3622                 case OP_IBLT_UN:
3623                 case OP_IBGT_UN:
3624                 case OP_IBGE_UN:
3625                 case OP_IBLE_UN:
3626                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), FALSE);
3627                         break;
3628                 case OP_COND_EXC_IOV:
3629                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3630                                                                                 TRUE, ins->inst_p1);
3631                         break;
3632                 case OP_COND_EXC_IC:
3633                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3634                                                                                 FALSE, ins->inst_p1);
3635                         break;
3636                 case CEE_NOT:
3637                         amd64_not_reg (code, ins->sreg1);
3638                         break;
3639                 case CEE_NEG:
3640                         amd64_neg_reg (code, ins->sreg1);
3641                         break;
3642                 case OP_SEXT_I1:
3643                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3644                         break;
3645                 case OP_SEXT_I2:
3646                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3647                         break;
3648                 case OP_ICONST:
3649                 case OP_I8CONST:
3650                         if ((((guint64)ins->inst_c0) >> 32) == 0)
3651                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
3652                         else
3653                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
3654                         break;
3655                 case OP_AOTCONST:
3656                         mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3657                         amd64_set_reg_template (code, ins->dreg);
3658                         break;
3659                 case CEE_CONV_I4:
3660                 case CEE_CONV_U4:
3661                 case OP_MOVE:
3662                 case OP_SETREG:
3663                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
3664                         break;
3665                 case OP_AMD64_SET_XMMREG_R4: {
3666                         if (use_sse2) {
3667                                 amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
3668                         }
3669                         else {
3670                                 amd64_fst_membase (code, AMD64_RSP, -8, FALSE, TRUE);
3671                                 /* ins->dreg is set to -1 by the reg allocator */
3672                                 amd64_movss_reg_membase (code, ins->unused, AMD64_RSP, -8);
3673                         }
3674                         break;
3675                 }
3676                 case OP_AMD64_SET_XMMREG_R8: {
3677                         if (use_sse2) {
3678                                 if (ins->dreg != ins->sreg1)
3679                                         amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
3680                         }
3681                         else {
3682                                 amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE);
3683                                 /* ins->dreg is set to -1 by the reg allocator */
3684                                 amd64_movsd_reg_membase (code, ins->unused, AMD64_RSP, -8);
3685                         }
3686                         break;
3687                 }
3688                 case CEE_JMP: {
3689                         /*
3690                          * Note: this 'frame destruction' logic is useful for tail calls, too.
3691                          * Keep in sync with the code in emit_epilog.
3692                          */
3693                         int pos = 0, i;
3694
3695                         /* FIXME: no tracing support... */
3696                         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3697                                 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
3698
3699                         g_assert (!cfg->method->save_lmf);
3700
3701                         code = emit_load_volatile_arguments (cfg, code);
3702
3703                         for (i = 0; i < AMD64_NREG; ++i)
3704                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
3705                                         pos -= sizeof (gpointer);
3706                         
3707                         if (pos)
3708                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
3709
3710                         /* Pop registers in reverse order */
3711                         for (i = AMD64_NREG - 1; i > 0; --i)
3712                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
3713                                         amd64_pop_reg (code, i);
3714                                 }
3715
3716                         amd64_leave (code);
3717                         offset = code - cfg->native_code;
3718                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3719                         amd64_set_reg_template (code, AMD64_R11);
3720                         amd64_jump_reg (code, AMD64_R11);
3721                         break;
3722                 }
3723                 case OP_CHECK_THIS:
3724                         /* ensure ins->sreg1 is not NULL */
3725                         amd64_alu_membase_imm (code, X86_CMP, ins->sreg1, 0, 0);
3726                         break;
3727                 case OP_ARGLIST: {
3728                         amd64_lea_membase (code, AMD64_R11, AMD64_RBP, cfg->sig_cookie);
3729                         amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
3730                         break;
3731                 }
3732                 case OP_FCALL:
3733                 case OP_LCALL:
3734                 case OP_VCALL:
3735                 case OP_VOIDCALL:
3736                 case CEE_CALL:
3737                         call = (MonoCallInst*)ins;
3738                         /*
3739                          * The AMD64 ABI forces callers to know about varargs.
3740                          */
3741                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke))
3742                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3743
3744                         if (ins->flags & MONO_INST_HAS_METHOD)
3745                                 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
3746                         else
3747                                 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
3748                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3749                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3750                         code = emit_move_return_value (cfg, ins, code);
3751                         break;
3752                 case OP_FCALL_REG:
3753                 case OP_LCALL_REG:
3754                 case OP_VCALL_REG:
3755                 case OP_VOIDCALL_REG:
3756                 case OP_CALL_REG:
3757                         call = (MonoCallInst*)ins;
3758
3759                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3760                                 amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
3761                                 ins->sreg1 = AMD64_R11;
3762                         }
3763
3764                         /*
3765                          * The AMD64 ABI forces callers to know about varargs.
3766                          */
3767                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke)) {
3768                                 if (ins->sreg1 == AMD64_RAX) {
3769                                         amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
3770                                         ins->sreg1 = AMD64_R11;
3771                                 }
3772                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3773                         }
3774                         amd64_call_reg (code, ins->sreg1);
3775                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3776                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3777                         code = emit_move_return_value (cfg, ins, code);
3778                         break;
3779                 case OP_FCALL_MEMBASE:
3780                 case OP_LCALL_MEMBASE:
3781                 case OP_VCALL_MEMBASE:
3782                 case OP_VOIDCALL_MEMBASE:
3783                 case OP_CALL_MEMBASE:
3784                         call = (MonoCallInst*)ins;
3785
3786                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3787                                 amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
3788                                 ins->sreg1 = AMD64_R11;
3789                         }
3790
3791                         amd64_call_membase (code, ins->sreg1, ins->inst_offset);
3792                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3793                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3794                         code = emit_move_return_value (cfg, ins, code);
3795                         break;
3796                 case OP_OUTARG:
3797                 case OP_X86_PUSH:
3798                         amd64_push_reg (code, ins->sreg1);
3799                         break;
3800                 case OP_X86_PUSH_IMM:
3801                         g_assert (amd64_is_imm32 (ins->inst_imm));
3802                         amd64_push_imm (code, ins->inst_imm);
3803                         break;
3804                 case OP_X86_PUSH_MEMBASE:
3805                         amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
3806                         break;
3807                 case OP_X86_PUSH_OBJ: 
3808                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ins->inst_imm);
3809                         amd64_push_reg (code, AMD64_RDI);
3810                         amd64_push_reg (code, AMD64_RSI);
3811                         amd64_push_reg (code, AMD64_RCX);
3812                         if (ins->inst_offset)
3813                                 amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset);
3814                         else
3815                                 amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8);
3816                         amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, 3 * 8);
3817                         amd64_mov_reg_imm (code, AMD64_RCX, (ins->inst_imm >> 3));
3818                         amd64_cld (code);
3819                         amd64_prefix (code, X86_REP_PREFIX);
3820                         amd64_movsd (code);
3821                         amd64_pop_reg (code, AMD64_RCX);
3822                         amd64_pop_reg (code, AMD64_RSI);
3823                         amd64_pop_reg (code, AMD64_RDI);
3824                         break;
3825                 case OP_X86_LEA:
3826                         amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
3827                         break;
3828                 case OP_X86_LEA_MEMBASE:
3829                         amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
3830                         break;
3831                 case OP_X86_XCHG:
3832                         amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
3833                         break;
3834                 case OP_LOCALLOC:
3835                         /* keep alignment */
3836                         amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
3837                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
3838                         code = mono_emit_stack_alloc (code, ins);
3839                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
3840                         break;
3841                 case CEE_RET:
3842                         amd64_ret (code);
3843                         break;
3844                 case CEE_THROW: {
3845                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
3846                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3847                                              (gpointer)"mono_arch_throw_exception");
3848                         break;
3849                 }
3850                 case OP_RETHROW: {
3851                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
3852                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3853                                              (gpointer)"mono_arch_rethrow_exception");
3854                         break;
3855                 }
3856                 case OP_CALL_HANDLER: 
3857                         /* Align stack */
3858                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
3859                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3860                         amd64_call_imm (code, 0);
3861                         /* Restore stack alignment */
3862                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3863                         break;
3864                 case OP_LABEL:
3865                         ins->inst_c0 = code - cfg->native_code;
3866                         break;
3867                 case CEE_BR:
3868                         //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3869                         //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3870                         //break;
3871                         if (ins->flags & MONO_INST_BRLABEL) {
3872                                 if (ins->inst_i0->inst_c0) {
3873                                         amd64_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3874                                 } else {
3875                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3876                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3877                                             x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
3878                                                 x86_jump8 (code, 0);
3879                                         else 
3880                                                 x86_jump32 (code, 0);
3881                                 }
3882                         } else {
3883                                 if (ins->inst_target_bb->native_offset) {
3884                                         amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); 
3885                                 } else {
3886                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3887                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3888                                             x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
3889                                                 x86_jump8 (code, 0);
3890                                         else 
3891                                                 x86_jump32 (code, 0);
3892                                 } 
3893                         }
3894                         break;
3895                 case OP_BR_REG:
3896                         amd64_jump_reg (code, ins->sreg1);
3897                         break;
3898                 case OP_CEQ:
3899                 case OP_ICEQ:
3900                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3901                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3902                         break;
3903                 case OP_CLT:
3904                 case OP_ICLT:
3905                         amd64_set_reg (code, X86_CC_LT, ins->dreg, TRUE);
3906                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3907                         break;
3908                 case OP_CLT_UN:
3909                 case OP_ICLT_UN:
3910                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3911                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3912                         break;
3913                 case OP_CGT:
3914                 case OP_ICGT:
3915                         amd64_set_reg (code, X86_CC_GT, ins->dreg, TRUE);
3916                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3917                         break;
3918                 case OP_CGT_UN:
3919                 case OP_ICGT_UN:
3920                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3921                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3922                         break;
3923                 case OP_COND_EXC_EQ:
3924                 case OP_COND_EXC_NE_UN:
3925                 case OP_COND_EXC_LT:
3926                 case OP_COND_EXC_LT_UN:
3927                 case OP_COND_EXC_GT:
3928                 case OP_COND_EXC_GT_UN:
3929                 case OP_COND_EXC_GE:
3930                 case OP_COND_EXC_GE_UN:
3931                 case OP_COND_EXC_LE:
3932                 case OP_COND_EXC_LE_UN:
3933                 case OP_COND_EXC_OV:
3934                 case OP_COND_EXC_NO:
3935                 case OP_COND_EXC_C:
3936                 case OP_COND_EXC_NC:
3937                         EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], 
3938                                                     (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
3939                         break;
3940                 case CEE_BEQ:
3941                 case CEE_BNE_UN:
3942                 case CEE_BLT:
3943                 case CEE_BLT_UN:
3944                 case CEE_BGT:
3945                 case CEE_BGT_UN:
3946                 case CEE_BGE:
3947                 case CEE_BGE_UN:
3948                 case CEE_BLE:
3949                 case CEE_BLE_UN:
3950                         EMIT_COND_BRANCH (ins, branch_cc_table [ins->opcode - CEE_BEQ], (ins->opcode < CEE_BNE_UN));
3951                         break;
3952
3953                 /* floating point opcodes */
3954                 case OP_R8CONST: {
3955                         double d = *(double *)ins->inst_p0;
3956
3957                         if (use_sse2) {
3958                                 if ((d == 0.0) && (mono_signbit (d) == 0)) {
3959                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
3960                                 }
3961                                 else {
3962                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3963                                         amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
3964                                 }
3965                         }
3966                         else if ((d == 0.0) && (mono_signbit (d) == 0)) {
3967                                 amd64_fldz (code);
3968                         } else if (d == 1.0) {
3969                                 x86_fld1 (code);
3970                         } else {
3971                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3972                                 amd64_fld_membase (code, AMD64_RIP, 0, TRUE);
3973                         }
3974                         break;
3975                 }
3976                 case OP_R4CONST: {
3977                         float f = *(float *)ins->inst_p0;
3978
3979                         if (use_sse2) {
3980                                 if ((f == 0.0) && (mono_signbit (f) == 0)) {
3981                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
3982                                 }
3983                                 else {
3984                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3985                                         amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0);
3986                                         amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
3987                                 }
3988                         }
3989                         else if ((f == 0.0) && (mono_signbit (f) == 0)) {
3990                                 amd64_fldz (code);
3991                         } else if (f == 1.0) {
3992                                 x86_fld1 (code);
3993                         } else {
3994                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3995                                 amd64_fld_membase (code, AMD64_RIP, 0, FALSE);
3996                         }
3997                         break;
3998                 }
3999                 case OP_STORER8_MEMBASE_REG:
4000                         if (use_sse2)
4001                                 amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
4002                         else
4003                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
4004                         break;
4005                 case OP_LOADR8_SPILL_MEMBASE:
4006                         if (use_sse2)
4007                                 g_assert_not_reached ();
4008                         amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4009                         amd64_fxch (code, 1);
4010                         break;
4011                 case OP_LOADR8_MEMBASE:
4012                         if (use_sse2)
4013                                 amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4014                         else
4015                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4016                         break;
4017                 case OP_STORER4_MEMBASE_REG:
4018                         if (use_sse2) {
4019                                 /* This requires a double->single conversion */
4020                                 amd64_sse_cvtsd2ss_reg_reg (code, AMD64_XMM15, ins->sreg1);
4021                                 amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, AMD64_XMM15);
4022                         }
4023                         else
4024                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
4025                         break;
4026                 case OP_LOADR4_MEMBASE:
4027                         if (use_sse2) {
4028                                 amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4029                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
4030                         }
4031                         else
4032                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4033                         break;
4034                 case CEE_CONV_R4: /* FIXME: change precision */
4035                 case CEE_CONV_R8:
4036                         if (use_sse2)
4037                                 amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
4038                         else {
4039                                 amd64_push_reg (code, ins->sreg1);
4040                                 amd64_fild_membase (code, AMD64_RSP, 0, FALSE);
4041                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4042                         }
4043                         break;
4044                 case CEE_CONV_R_UN:
4045                         /* Emulated */
4046                         g_assert_not_reached ();
4047                         break;
4048                 case OP_LCONV_TO_R4: /* FIXME: change precision */
4049                 case OP_LCONV_TO_R8:
4050                         if (use_sse2)
4051                                 amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
4052                         else {
4053                                 amd64_push_reg (code, ins->sreg1);
4054                                 amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4055                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4056                         }
4057                         break;
4058                 case OP_X86_FP_LOAD_I8:
4059                         if (use_sse2)
4060                                 g_assert_not_reached ();
4061                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4062                         break;
4063                 case OP_X86_FP_LOAD_I4:
4064                         if (use_sse2)
4065                                 g_assert_not_reached ();
4066                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4067                         break;
4068                 case OP_FCONV_TO_I1:
4069                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4070                         break;
4071                 case OP_FCONV_TO_U1:
4072                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4073                         break;
4074                 case OP_FCONV_TO_I2:
4075                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4076                         break;
4077                 case OP_FCONV_TO_U2:
4078                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4079                         break;
4080                 case OP_FCONV_TO_I4:
4081                 case OP_FCONV_TO_I:
4082                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4083                         break;
4084                 case OP_FCONV_TO_I8:
4085                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
4086                         break;
4087                 case OP_LCONV_TO_R_UN: { 
4088                         static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
4089                         guint8 *br;
4090
4091                         if (use_sse2)
4092                                 g_assert_not_reached ();
4093
4094                         /* load 64bit integer to FP stack */
4095                         amd64_push_imm (code, 0);
4096                         amd64_push_reg (code, ins->sreg2);
4097                         amd64_push_reg (code, ins->sreg1);
4098                         amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4099                         /* store as 80bit FP value */
4100                         x86_fst80_membase (code, AMD64_RSP, 0);
4101                         
4102                         /* test if lreg is negative */
4103                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4104                         br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
4105         
4106                         /* add correction constant mn */
4107                         x86_fld80_mem (code, mn);
4108                         x86_fld80_membase (code, AMD64_RSP, 0);
4109                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4110                         x86_fst80_membase (code, AMD64_RSP, 0);
4111
4112                         amd64_patch (br, code);
4113
4114                         x86_fld80_membase (code, AMD64_RSP, 0);
4115                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 12);
4116
4117                         break;
4118                 }
4119                 case OP_LCONV_TO_OVF_I: {
4120                         guint8 *br [3], *label [1];
4121
4122                         if (use_sse2)
4123                                 g_assert_not_reached ();
4124
4125                         /* 
4126                          * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4127                          */
4128                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
4129
4130                         /* If the low word top bit is set, see if we are negative */
4131                         br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
4132                         /* We are not negative (no top bit set, check for our top word to be zero */
4133                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4134                         br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
4135                         label [0] = code;
4136
4137                         /* throw exception */
4138                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
4139                         x86_jump32 (code, 0);
4140         
4141                         amd64_patch (br [0], code);
4142                         /* our top bit is set, check that top word is 0xfffffff */
4143                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
4144                 
4145                         amd64_patch (br [1], code);
4146                         /* nope, emit exception */
4147                         br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
4148                         amd64_patch (br [2], label [0]);
4149
4150                         if (ins->dreg != ins->sreg1)
4151                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
4152                         break;
4153                 }
4154                 case CEE_CONV_OVF_U4:
4155                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0);
4156                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException");
4157                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4158                         break;
4159                 case CEE_CONV_OVF_I4_UN:
4160                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff);
4161                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException");
4162                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4163                         break;
4164                 case OP_FMOVE:
4165                         if (use_sse2 && (ins->dreg != ins->sreg1))
4166                                 amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
4167                         break;
4168                 case OP_FADD:
4169                         if (use_sse2)
4170                                 amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2);
4171                         else
4172                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4173                         break;
4174                 case OP_FSUB:
4175                         if (use_sse2)
4176                                 amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2);
4177                         else
4178                                 amd64_fp_op_reg (code, X86_FSUB, 1, TRUE);
4179                         break;          
4180                 case OP_FMUL:
4181                         if (use_sse2)
4182                                 amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2);
4183                         else
4184                                 amd64_fp_op_reg (code, X86_FMUL, 1, TRUE);
4185                         break;          
4186                 case OP_FDIV:
4187                         if (use_sse2)
4188                                 amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2);
4189                         else
4190                                 amd64_fp_op_reg (code, X86_FDIV, 1, TRUE);
4191                         break;          
4192                 case OP_FNEG:
4193                         if (use_sse2) {
4194                                 amd64_mov_reg_imm_size (code, AMD64_R11, 0x8000000000000000, 8);
4195                                 amd64_push_reg (code, AMD64_R11);
4196                                 amd64_push_reg (code, AMD64_R11);
4197                                 amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RSP, 0);
4198                         }
4199                         else
4200                                 amd64_fchs (code);
4201                         break;          
4202                 case OP_SIN:
4203                         if (use_sse2)
4204                                 g_assert_not_reached ();
4205                         amd64_fsin (code);
4206                         amd64_fldz (code);
4207                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4208                         break;          
4209                 case OP_COS:
4210                         if (use_sse2)
4211                                 g_assert_not_reached ();
4212                         amd64_fcos (code);
4213                         amd64_fldz (code);
4214                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4215                         break;          
4216                 case OP_ABS:
4217                         if (use_sse2)
4218                                 g_assert_not_reached ();
4219                         amd64_fabs (code);
4220                         break;          
4221                 case OP_TAN: {
4222                         /* 
4223                          * it really doesn't make sense to inline all this code,
4224                          * it's here just to show that things may not be as simple 
4225                          * as they appear.
4226                          */
4227                         guchar *check_pos, *end_tan, *pop_jump;
4228                         if (use_sse2)
4229                                 g_assert_not_reached ();
4230                         amd64_push_reg (code, AMD64_RAX);
4231                         amd64_fptan (code);
4232                         amd64_fnstsw (code);
4233                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4234                         check_pos = code;
4235                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4236                         amd64_fstp (code, 0); /* pop the 1.0 */
4237                         end_tan = code;
4238                         x86_jump8 (code, 0);
4239                         amd64_fldpi (code);
4240                         amd64_fp_op (code, X86_FADD, 0);
4241                         amd64_fxch (code, 1);
4242                         x86_fprem1 (code);
4243                         amd64_fstsw (code);
4244                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4245                         pop_jump = code;
4246                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4247                         amd64_fstp (code, 1);
4248                         amd64_fptan (code);
4249                         amd64_patch (pop_jump, code);
4250                         amd64_fstp (code, 0); /* pop the 1.0 */
4251                         amd64_patch (check_pos, code);
4252                         amd64_patch (end_tan, code);
4253                         amd64_fldz (code);
4254                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4255                         amd64_pop_reg (code, AMD64_RAX);
4256                         break;
4257                 }
4258                 case OP_ATAN:
4259                         if (use_sse2)
4260                                 g_assert_not_reached ();
4261                         x86_fld1 (code);
4262                         amd64_fpatan (code);
4263                         amd64_fldz (code);
4264                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4265                         break;          
4266                 case OP_SQRT:
4267                         if (use_sse2)
4268                                 g_assert_not_reached ();
4269                         amd64_fsqrt (code);
4270                         break;          
4271                 case OP_X86_FPOP:
4272                         if (!use_sse2)
4273                                 amd64_fstp (code, 0);
4274                         break;          
4275                 case OP_FREM: {
4276                         guint8 *l1, *l2;
4277
4278                         if (use_sse2)
4279                                 g_assert_not_reached ();
4280                         amd64_push_reg (code, AMD64_RAX);
4281                         /* we need to exchange ST(0) with ST(1) */
4282                         amd64_fxch (code, 1);
4283
4284                         /* this requires a loop, because fprem somtimes 
4285                          * returns a partial remainder */
4286                         l1 = code;
4287                         /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
4288                         /* x86_fprem1 (code); */
4289                         amd64_fprem (code);
4290                         amd64_fnstsw (code);
4291                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_C2);
4292                         l2 = code + 2;
4293                         x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
4294
4295                         /* pop result */
4296                         amd64_fstp (code, 1);
4297
4298                         amd64_pop_reg (code, AMD64_RAX);
4299                         break;
4300                 }
4301                 case OP_FCOMPARE:
4302                         if (use_sse2) {
4303                                 amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4304                                 break;
4305                         }
4306                         if (cfg->opt & MONO_OPT_FCMOV) {
4307                                 amd64_fcomip (code, 1);
4308                                 amd64_fstp (code, 0);
4309                                 break;
4310                         }
4311                         /* this overwrites EAX */
4312                         EMIT_FPCOMPARE(code);
4313                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4314                         break;
4315                 case OP_FCEQ:
4316                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4317                                 /* zeroing the register at the start results in 
4318                                  * shorter and faster code (we can also remove the widening op)
4319                                  */
4320                                 guchar *unordered_check;
4321                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4322                                 
4323                                 if (use_sse2)
4324                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4325                                 else {
4326                                         amd64_fcomip (code, 1);
4327                                         amd64_fstp (code, 0);
4328                                 }
4329                                 unordered_check = code;
4330                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4331                                 amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
4332                                 amd64_patch (unordered_check, code);
4333                                 break;
4334                         }
4335                         if (ins->dreg != AMD64_RAX) 
4336                                 amd64_push_reg (code, AMD64_RAX);
4337
4338                         EMIT_FPCOMPARE(code);
4339                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4340                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4341                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4342                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4343
4344                         if (ins->dreg != AMD64_RAX) 
4345                                 amd64_pop_reg (code, AMD64_RAX);
4346                         break;
4347                 case OP_FCLT:
4348                 case OP_FCLT_UN:
4349                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4350                                 /* zeroing the register at the start results in 
4351                                  * shorter and faster code (we can also remove the widening op)
4352                                  */
4353                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4354                                 if (use_sse2)
4355                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4356                                 else {
4357                                         amd64_fcomip (code, 1);
4358                                         amd64_fstp (code, 0);
4359                                 }
4360                                 if (ins->opcode == OP_FCLT_UN) {
4361                                         guchar *unordered_check = code;
4362                                         guchar *jump_to_end;
4363                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4364                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4365                                         jump_to_end = code;
4366                                         x86_jump8 (code, 0);
4367                                         amd64_patch (unordered_check, code);
4368                                         amd64_inc_reg (code, ins->dreg);
4369                                         amd64_patch (jump_to_end, code);
4370                                 } else {
4371                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4372                                 }
4373                                 break;
4374                         }
4375                         if (ins->dreg != AMD64_RAX) 
4376                                 amd64_push_reg (code, AMD64_RAX);
4377
4378                         EMIT_FPCOMPARE(code);
4379                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4380                         if (ins->opcode == OP_FCLT_UN) {
4381                                 guchar *is_not_zero_check, *end_jump;
4382                                 is_not_zero_check = code;
4383                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4384                                 end_jump = code;
4385                                 x86_jump8 (code, 0);
4386                                 amd64_patch (is_not_zero_check, code);
4387                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4388
4389                                 amd64_patch (end_jump, code);
4390                         }
4391                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4392                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4393
4394                         if (ins->dreg != AMD64_RAX) 
4395                                 amd64_pop_reg (code, AMD64_RAX);
4396                         break;
4397                 case OP_FCGT:
4398                 case OP_FCGT_UN:
4399                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4400                                 /* zeroing the register at the start results in 
4401                                  * shorter and faster code (we can also remove the widening op)
4402                                  */
4403                                 guchar *unordered_check;
4404                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4405                                 if (use_sse2)
4406                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4407                                 else {
4408                                         amd64_fcomip (code, 1);
4409                                         amd64_fstp (code, 0);
4410                                 }
4411                                 if (ins->opcode == OP_FCGT) {
4412                                         unordered_check = code;
4413                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4414                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4415                                         amd64_patch (unordered_check, code);
4416                                 } else {
4417                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4418                                 }
4419                                 break;
4420                         }
4421                         if (ins->dreg != AMD64_RAX) 
4422                                 amd64_push_reg (code, AMD64_RAX);
4423
4424                         EMIT_FPCOMPARE(code);
4425                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4426                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4427                         if (ins->opcode == OP_FCGT_UN) {
4428                                 guchar *is_not_zero_check, *end_jump;
4429                                 is_not_zero_check = code;
4430                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4431                                 end_jump = code;
4432                                 x86_jump8 (code, 0);
4433                                 amd64_patch (is_not_zero_check, code);
4434                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4435
4436                                 amd64_patch (end_jump, code);
4437                         }
4438                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4439                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4440
4441                         if (ins->dreg != AMD64_RAX) 
4442                                 amd64_pop_reg (code, AMD64_RAX);
4443                         break;
4444                 case OP_FBEQ:
4445                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4446                                 guchar *jump = code;
4447                                 x86_branch8 (code, X86_CC_P, 0, TRUE);
4448                                 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4449                                 amd64_patch (jump, code);
4450                                 break;
4451                         }
4452                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4453                         EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
4454                         break;
4455                 case OP_FBNE_UN:
4456                         /* Branch if C013 != 100 */
4457                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4458                                 /* branch if !ZF or (PF|CF) */
4459                                 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4460                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4461                                 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
4462                                 break;
4463                         }
4464                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4465                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4466                         break;
4467                 case OP_FBLT:
4468                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4469                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4470                                 break;
4471                         }
4472                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4473                         break;
4474                 case OP_FBLT_UN:
4475                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4476                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4477                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4478                                 break;
4479                         }
4480                         if (ins->opcode == OP_FBLT_UN) {
4481                                 guchar *is_not_zero_check, *end_jump;
4482                                 is_not_zero_check = code;
4483                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4484                                 end_jump = code;
4485                                 x86_jump8 (code, 0);
4486                                 amd64_patch (is_not_zero_check, code);
4487                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4488
4489                                 amd64_patch (end_jump, code);
4490                         }
4491                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4492                         break;
4493                 case OP_FBGT:
4494                 case OP_FBGT_UN:
4495                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4496                                 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
4497                                 break;
4498                         }
4499                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4500                         if (ins->opcode == OP_FBGT_UN) {
4501                                 guchar *is_not_zero_check, *end_jump;
4502                                 is_not_zero_check = code;
4503                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4504                                 end_jump = code;
4505                                 x86_jump8 (code, 0);
4506                                 amd64_patch (is_not_zero_check, code);
4507                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4508
4509                                 amd64_patch (end_jump, code);
4510                         }
4511                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4512                         break;
4513                 case OP_FBGE:
4514                         /* Branch if C013 == 100 or 001 */
4515                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4516                                 guchar *br1;
4517
4518                                 /* skip branch if C1=1 */
4519                                 br1 = code;
4520                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4521                                 /* branch if (C0 | C3) = 1 */
4522                                 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
4523                                 amd64_patch (br1, code);
4524                                 break;
4525                         }
4526                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4527                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4528                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4529                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4530                         break;
4531                 case OP_FBGE_UN:
4532                         /* Branch if C013 == 000 */
4533                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4534                                 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
4535                                 break;
4536                         }
4537                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4538                         break;
4539                 case OP_FBLE:
4540                         /* Branch if C013=000 or 100 */
4541                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4542                                 guchar *br1;
4543
4544                                 /* skip branch if C1=1 */
4545                                 br1 = code;
4546                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4547                                 /* branch if C0=0 */
4548                                 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
4549                                 amd64_patch (br1, code);
4550                                 break;
4551                         }
4552                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, (X86_FP_C0|X86_FP_C1));
4553                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4554                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4555                         break;
4556                 case OP_FBLE_UN:
4557                         /* Branch if C013 != 001 */
4558                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4559                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4560                                 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
4561                                 break;
4562                         }
4563                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4564                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4565                         break;
4566                 case CEE_CKFINITE: {
4567                         if (use_sse2) {
4568                                 /* Transfer value to the fp stack */
4569                                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
4570                                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
4571                                 amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
4572                         }
4573                         amd64_push_reg (code, AMD64_RAX);
4574                         amd64_fxam (code);
4575                         amd64_fnstsw (code);
4576                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100);
4577                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4578                         amd64_pop_reg (code, AMD64_RAX);
4579                         if (use_sse2) {
4580                                 amd64_fstp (code, 0);
4581                         }                               
4582                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
4583                         break;
4584                 }
4585                 case OP_X86_TLS_GET: {
4586                         x86_prefix (code, X86_FS_PREFIX);
4587                         amd64_mov_reg_mem (code, ins->dreg, ins->inst_offset, 8);
4588                         break;
4589                 }
4590                 default:
4591                         g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4592                         g_assert_not_reached ();
4593                 }
4594
4595                 if ((code - cfg->native_code - offset) > max_len) {
4596                         g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4597                                    mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4598                         g_assert_not_reached ();
4599                 }
4600                
4601                 cpos += max_len;
4602
4603                 last_ins = ins;
4604                 last_offset = offset;
4605                 
4606                 ins = ins->next;
4607         }
4608
4609         cfg->code_len = code - cfg->native_code;
4610 }
4611
4612 void
4613 mono_arch_register_lowlevel_calls (void)
4614 {
4615 }
4616
4617 void
4618 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4619 {
4620         MonoJumpInfo *patch_info;
4621
4622         for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4623                 unsigned char *ip = patch_info->ip.i + code;
4624                 const unsigned char *target;
4625
4626                 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4627
4628                 switch (patch_info->type) {
4629                 case MONO_PATCH_INFO_METHOD_REL:
4630                 case MONO_PATCH_INFO_METHOD_JUMP:
4631                         *((gconstpointer *)(ip + 2)) = target;
4632                         continue;
4633                 case MONO_PATCH_INFO_SWITCH: {
4634                         *((gconstpointer *)(ip + 2)) = target;
4635                         continue;
4636                 }
4637                 case MONO_PATCH_INFO_IID:
4638                         *((guint32 *)(ip + 2)) = (guint32)(guint64)target;
4639                         continue;                       
4640                 case MONO_PATCH_INFO_CLASS_INIT: {
4641                         /* Might already been changed to a nop */
4642                         guint8* ip2 = ip;
4643                         amd64_set_reg_template (ip2, GP_SCRATCH_REG);
4644                         amd64_call_reg (ip2, GP_SCRATCH_REG);                   
4645                         *((gconstpointer *)(ip + 2)) = target;
4646                         continue;
4647                 }
4648                 case MONO_PATCH_INFO_R8:
4649                 case MONO_PATCH_INFO_R4:
4650                         g_assert_not_reached ();
4651                         continue;
4652                 case MONO_PATCH_INFO_METHODCONST:
4653                 case MONO_PATCH_INFO_CLASS:
4654                 case MONO_PATCH_INFO_IMAGE:
4655                 case MONO_PATCH_INFO_FIELD:
4656                 case MONO_PATCH_INFO_VTABLE:
4657                 case MONO_PATCH_INFO_SFLDA:
4658                 case MONO_PATCH_INFO_EXC_NAME:
4659                 case MONO_PATCH_INFO_LDSTR:
4660                 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4661                 case MONO_PATCH_INFO_LDTOKEN:
4662                 case MONO_PATCH_INFO_IP:
4663                         *((gconstpointer *)(ip + 2)) = target;
4664                         continue;
4665                 case MONO_PATCH_INFO_METHOD:
4666                         *((gconstpointer *)(ip + 2)) = target;
4667                         continue;
4668                 case MONO_PATCH_INFO_ABS:
4669                 case MONO_PATCH_INFO_INTERNAL_METHOD:
4670                         break;
4671                 default:
4672                         break;
4673                 }
4674                 amd64_patch (ip, (gpointer)target);
4675         }
4676 }
4677
4678 guint8 *
4679 mono_arch_emit_prolog (MonoCompile *cfg)
4680 {
4681         MonoMethod *method = cfg->method;
4682         MonoBasicBlock *bb;
4683         MonoMethodSignature *sig;
4684         MonoInst *inst;
4685         int alloc_size, pos, max_offset, i;
4686         guint8 *code;
4687         CallInfo *cinfo;
4688
4689         cfg->code_size =  MAX (((MonoMethodNormal *)method)->header->code_size * 4, 512);
4690         code = cfg->native_code = g_malloc (cfg->code_size);
4691
4692         amd64_push_reg (code, AMD64_RBP);
4693         amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
4694
4695         /* Stack alignment check */
4696 #if 0
4697         {
4698                 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8);
4699                 amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf);
4700                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4701                 x86_branch8 (code, X86_CC_EQ, 2, FALSE);
4702                 amd64_breakpoint (code);
4703         }
4704 #endif
4705
4706         alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
4707         pos = 0;
4708
4709         if (method->save_lmf) {
4710
4711                 pos = ALIGN_TO (pos + sizeof (MonoLMF), 16);
4712
4713                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, pos);
4714
4715                 gint32 lmf_offset = - cfg->arch.lmf_offset;
4716
4717                 /* Save ip */
4718                 amd64_lea_membase (code, AMD64_R11, AMD64_RIP, 0);
4719                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
4720                 /* Save fp */
4721                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), AMD64_RBP, 8);
4722                 /* Save method */
4723                 /* FIXME: add a relocation for this */
4724                 if (IS_IMM32 (cfg->method))
4725                         amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), (guint64)cfg->method, 8);
4726                 else {
4727                         amd64_mov_reg_imm (code, AMD64_R11, cfg->method);
4728                         amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
4729                 }
4730                 /* Save callee saved regs */
4731                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
4732                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
4733                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
4734                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
4735                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
4736         } else {
4737
4738                 for (i = 0; i < AMD64_NREG; ++i)
4739                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4740                                 amd64_push_reg (code, i);
4741                                 pos += sizeof (gpointer);
4742                         }
4743         }
4744
4745         alloc_size -= pos;
4746
4747         if (alloc_size) {
4748                 /* See mono_emit_stack_alloc */
4749 #ifdef PLATFORM_WIN32
4750                 guint32 remaining_size = alloc_size;
4751                 while (remaining_size >= 0x1000) {
4752                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
4753                         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
4754                         remaining_size -= 0x1000;
4755                 }
4756                 if (remaining_size)
4757                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
4758 #else
4759                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
4760 #endif
4761         }
4762
4763         /* compute max_offset in order to use short forward jumps */
4764         max_offset = 0;
4765         if (cfg->opt & MONO_OPT_BRANCH) {
4766                 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4767                         MonoInst *ins = bb->code;
4768                         bb->max_offset = max_offset;
4769
4770                         if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4771                                 max_offset += 6;
4772                         /* max alignment for loops */
4773                         if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
4774                                 max_offset += LOOP_ALIGNMENT;
4775
4776                         while (ins) {
4777                                 if (ins->opcode == OP_LABEL)
4778                                         ins->inst_c1 = max_offset;
4779                                 
4780                                 max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
4781                                 ins = ins->next;
4782                         }
4783                 }
4784         }
4785
4786         sig = method->signature;
4787         pos = 0;
4788
4789         cinfo = get_call_info (sig, FALSE);
4790
4791         if (sig->ret->type != MONO_TYPE_VOID) {
4792                 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
4793                         /* Save volatile arguments to the stack */
4794                         amd64_mov_membase_reg (code, cfg->ret->inst_basereg, cfg->ret->inst_offset, cinfo->ret.reg, 8);
4795                 }
4796         }
4797
4798         /* Keep this in sync with emit_load_volatile_arguments */
4799         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4800                 ArgInfo *ainfo = cinfo->args + i;
4801                 gint32 stack_offset;
4802                 MonoType *arg_type;
4803                 inst = cfg->varinfo [i];
4804
4805                 if (sig->hasthis && (i == 0))
4806                         arg_type = &mono_defaults.object_class->byval_arg;
4807                 else
4808                         arg_type = sig->params [i - sig->hasthis];
4809
4810                 stack_offset = ainfo->offset + ARGS_OFFSET;
4811
4812                 /* Save volatile arguments to the stack */
4813                 if (inst->opcode != OP_REGVAR) {
4814                         switch (ainfo->storage) {
4815                         case ArgInIReg: {
4816                                 guint32 size = 8;
4817
4818                                 /* FIXME: I1 etc */
4819                                 /*
4820                                 if (stack_offset & 0x1)
4821                                         size = 1;
4822                                 else if (stack_offset & 0x2)
4823                                         size = 2;
4824                                 else if (stack_offset & 0x4)
4825                                         size = 4;
4826                                 else
4827                                         size = 8;
4828                                 */
4829                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg, size);
4830                                 break;
4831                         }
4832                         case ArgInFloatSSEReg:
4833                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
4834                                 break;
4835                         case ArgInDoubleSSEReg:
4836                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
4837                                 break;
4838                         default:
4839                                 break;
4840                         }
4841                 }
4842
4843                 if (inst->opcode == OP_REGVAR) {
4844                         /* Argument allocated to (non-volatile) register */
4845                         switch (ainfo->storage) {
4846                         case ArgInIReg:
4847                                 amd64_mov_reg_reg (code, inst->dreg, ainfo->reg, 8);
4848                                 break;
4849                         case ArgOnStack:
4850                                 amd64_mov_reg_membase (code, inst->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
4851                                 break;
4852                         default:
4853                                 g_assert_not_reached ();
4854                         }
4855                 }
4856         }
4857
4858         if (method->save_lmf) {
4859                 if (lmf_tls_offset != -1) {
4860                         /* Load lmf quicky using the FS register */
4861                         x86_prefix (code, X86_FS_PREFIX);
4862                         amd64_mov_reg_mem (code, AMD64_RAX, lmf_tls_offset, 8);
4863                 }
4864                 else {
4865                         /* 
4866                          * The call might clobber argument registers, but they are already
4867                          * saved to the stack/global regs.
4868                          */
4869
4870                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
4871                                                                  (gpointer)"mono_get_lmf_addr");                
4872                 }
4873
4874                 gint32 lmf_offset = - cfg->arch.lmf_offset;
4875
4876                 /* Save lmf_addr */
4877                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
4878                 /* Save previous_lmf */
4879                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
4880                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
4881                 /* Set new lmf */
4882                 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
4883                 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
4884         }
4885
4886
4887         g_free (cinfo);
4888
4889         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4890                 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4891
4892         cfg->code_len = code - cfg->native_code;
4893
4894         g_assert (cfg->code_len < cfg->code_size);
4895
4896         return code;
4897 }
4898
4899 void
4900 mono_arch_emit_epilog (MonoCompile *cfg)
4901 {
4902         MonoJumpInfo *patch_info;
4903         MonoMethod *method = cfg->method;
4904         int pos, i;
4905         guint8 *code;
4906
4907         code = cfg->native_code + cfg->code_len;
4908
4909         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4910                 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4911
4912         /* the code restoring the registers must be kept in sync with CEE_JMP */
4913         pos = 0;
4914         
4915         if (method->save_lmf) {
4916                 gint32 lmf_offset = - cfg->arch.lmf_offset;
4917
4918                 /* Restore previous lmf */
4919                 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
4920                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
4921                 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
4922
4923                 /* Restore caller saved regs */
4924                 if (cfg->used_int_regs & (1 << AMD64_RBX)) {
4925                         amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
4926                 }
4927                 if (cfg->used_int_regs & (1 << AMD64_R12)) {
4928                         amd64_mov_reg_membase (code, AMD64_R12, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
4929                 }
4930                 if (cfg->used_int_regs & (1 << AMD64_R13)) {
4931                         amd64_mov_reg_membase (code, AMD64_R13, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
4932                 }
4933                 if (cfg->used_int_regs & (1 << AMD64_R14)) {
4934                         amd64_mov_reg_membase (code, AMD64_R14, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
4935                 }
4936                 if (cfg->used_int_regs & (1 << AMD64_R15)) {
4937                         amd64_mov_reg_membase (code, AMD64_R15, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
4938                 }
4939         } else {
4940
4941                 for (i = 0; i < AMD64_NREG; ++i)
4942                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
4943                                 pos -= sizeof (gpointer);
4944
4945                 if (pos) {
4946                         if (pos == - sizeof (gpointer)) {
4947                                 /* Only one register, so avoid lea */
4948                                 for (i = AMD64_NREG - 1; i > 0; --i)
4949                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4950                                                 amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
4951                                         }
4952                         }
4953                         else {
4954                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
4955
4956                                 /* Pop registers in reverse order */
4957                                 for (i = AMD64_NREG - 1; i > 0; --i)
4958                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4959                                                 amd64_pop_reg (code, i);
4960                                         }
4961                         }
4962                 }
4963         }
4964
4965         amd64_leave (code);
4966         amd64_ret (code);
4967
4968         /* add code to raise exceptions */
4969         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4970                 switch (patch_info->type) {
4971                 case MONO_PATCH_INFO_EXC: {
4972                         guint64 offset;
4973
4974                         amd64_patch (patch_info->ip.i + cfg->native_code, code);
4975                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);
4976                         amd64_set_reg_template (code, AMD64_RDI);
4977                         /* 7 is the length of the lea */
4978                         offset = (((guint64)code + 7) - (guint64)cfg->native_code) - (guint64)patch_info->ip.i;
4979                         amd64_lea_membase (code, AMD64_RSI, AMD64_RIP, - offset);
4980                         patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4981                         patch_info->data.name = "mono_arch_throw_exception_by_name";
4982                         patch_info->ip.i = code - cfg->native_code;
4983                         EMIT_CALL ();
4984                         break;
4985                 }
4986                 default:
4987                         /* do nothing */
4988                         break;
4989                 }
4990         }
4991
4992         /* Handle relocations with RIP relative addressing */
4993         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4994                 gboolean remove = FALSE;
4995
4996                 switch (patch_info->type) {
4997                 case MONO_PATCH_INFO_R8: {
4998                         code = (guint8*)ALIGN_TO (code, 8);
4999
5000                         guint8* pos = cfg->native_code + patch_info->ip.i;
5001
5002                         *(double*)code = *(double*)patch_info->data.target;
5003
5004                         if (use_sse2)
5005                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
5006                         else
5007                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
5008                         code += 8;
5009
5010                         remove = TRUE;
5011                         break;
5012                 }
5013                 case MONO_PATCH_INFO_R4: {
5014                         code = (guint8*)ALIGN_TO (code, 8);
5015
5016                         guint8* pos = cfg->native_code + patch_info->ip.i;
5017
5018                         *(float*)code = *(float*)patch_info->data.target;
5019
5020                         if (use_sse2)
5021                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
5022                         else
5023                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
5024                         code += 4;
5025
5026                         remove = TRUE;
5027                         break;
5028                 }
5029                 default:
5030                         break;
5031                 }
5032
5033                 if (remove) {
5034                         if (patch_info == cfg->patch_info)
5035                                 cfg->patch_info = patch_info->next;
5036                         else {
5037                                 MonoJumpInfo *tmp;
5038
5039                                 for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next)
5040                                         ;
5041                                 tmp->next = patch_info->next;
5042                         }
5043                 }
5044         }
5045
5046         cfg->code_len = code - cfg->native_code;
5047
5048         g_assert (cfg->code_len < cfg->code_size);
5049
5050 }
5051
5052 /*
5053  * Allow tracing to work with this interface (with an optional argument)
5054  */
5055
5056 /*
5057  * This may be needed on some archs or for debugging support.
5058  */
5059 void
5060 mono_arch_instrument_mem_needs (MonoMethod *method, int *stack, int *code)
5061 {
5062         /* no stack room needed now (may be needed for FASTCALL-trace support) */
5063         *stack = 0;
5064         /* split prolog-epilog requirements? */
5065         *code = 50; /* max bytes needed: check this number */
5066 }
5067
5068 void*
5069 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5070 {
5071         guchar *code = p;
5072         CallInfo *cinfo;
5073         MonoMethodSignature *sig;
5074         MonoInst *inst;
5075         int i, n, stack_area = 0;
5076
5077         /* Keep this in sync with mono_arch_get_argument_info */
5078
5079         if (enable_arguments) {
5080                 /* Allocate a new area on the stack and save arguments there */
5081                 sig = cfg->method->signature;
5082
5083                 cinfo = get_call_info (sig, FALSE);
5084
5085                 n = sig->param_count + sig->hasthis;
5086
5087                 stack_area = ALIGN_TO (n * 8, 16);
5088
5089                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_area);
5090
5091                 for (i = 0; i < n; ++i) {
5092                         inst = cfg->varinfo [i];
5093
5094                         if (inst->opcode == OP_REGVAR)
5095                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), inst->dreg, 8);
5096                         else {
5097                                 amd64_mov_reg_membase (code, AMD64_R11, inst->inst_basereg, inst->inst_offset, 8);
5098                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), AMD64_R11, 8);
5099                         }
5100                 }
5101         }
5102
5103         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
5104         amd64_set_reg_template (code, AMD64_RDI);
5105         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RSP, 8);
5106         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5107
5108         if (enable_arguments) {
5109                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, stack_area);
5110
5111                 g_free (cinfo);
5112         }
5113
5114         return code;
5115 }
5116
5117 enum {
5118         SAVE_NONE,
5119         SAVE_STRUCT,
5120         SAVE_EAX,
5121         SAVE_EAX_EDX,
5122         SAVE_XMM
5123 };
5124
5125 void*
5126 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5127 {
5128         guchar *code = p;
5129         int save_mode = SAVE_NONE;
5130         MonoMethod *method = cfg->method;
5131         int rtype = mono_type_get_underlying_type (method->signature->ret)->type;
5132         
5133         switch (rtype) {
5134         case MONO_TYPE_VOID:
5135                 /* special case string .ctor icall */
5136                 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
5137                         save_mode = SAVE_EAX;
5138                 else
5139                         save_mode = SAVE_NONE;
5140                 break;
5141         case MONO_TYPE_I8:
5142         case MONO_TYPE_U8:
5143                 save_mode = SAVE_EAX;
5144                 break;
5145         case MONO_TYPE_R4:
5146         case MONO_TYPE_R8:
5147                 save_mode = SAVE_XMM;
5148                 break;
5149         case MONO_TYPE_VALUETYPE:
5150                 save_mode = SAVE_STRUCT;
5151                 break;
5152         default:
5153                 save_mode = SAVE_EAX;
5154                 break;
5155         }
5156
5157         /* Save the result and copy it into the proper argument register */
5158         switch (save_mode) {
5159         case SAVE_EAX:
5160                 amd64_push_reg (code, AMD64_RAX);
5161                 /* Align stack */
5162                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5163                 if (enable_arguments)
5164                         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RAX, 8);
5165                 break;
5166         case SAVE_STRUCT:
5167                 /* FIXME: */
5168                 if (enable_arguments)
5169                         amd64_mov_reg_imm (code, AMD64_RSI, 0);
5170                 break;
5171         case SAVE_XMM:
5172                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5173                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
5174                 /* Align stack */
5175                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5176                 /* 
5177                  * The result is already in the proper argument register so no copying
5178                  * needed.
5179                  */
5180                 break;
5181         case SAVE_NONE:
5182                 break;
5183         default:
5184                 g_assert_not_reached ();
5185         }
5186
5187         /* Set %al since this is a varargs call */
5188         if (save_mode == SAVE_XMM)
5189                 amd64_mov_reg_imm (code, AMD64_RAX, 1);
5190         else
5191                 amd64_mov_reg_imm (code, AMD64_RAX, 0);
5192
5193         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
5194         amd64_set_reg_template (code, AMD64_RDI);
5195         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5196
5197         /* Restore result */
5198         switch (save_mode) {
5199         case SAVE_EAX:
5200                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5201                 amd64_pop_reg (code, AMD64_RAX);
5202                 break;
5203         case SAVE_STRUCT:
5204                 /* FIXME: */
5205                 break;
5206         case SAVE_XMM:
5207                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5208                 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
5209                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5210                 break;
5211         case SAVE_NONE:
5212                 break;
5213         default:
5214                 g_assert_not_reached ();
5215         }
5216
5217         return code;
5218 }
5219
5220 int
5221 mono_arch_max_epilog_size (MonoCompile *cfg)
5222 {
5223         int max_epilog_size = 16;
5224         MonoJumpInfo *patch_info;
5225         
5226         if (cfg->method->save_lmf)
5227                 max_epilog_size += 256;
5228         
5229         if (mono_jit_trace_calls != NULL)
5230                 max_epilog_size += 50;
5231
5232         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5233                 max_epilog_size += 50;
5234
5235         max_epilog_size += (AMD64_NREG * 2);
5236
5237         /* 
5238          * make sure we have enough space for exceptions
5239          */
5240         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5241                 if (patch_info->type == MONO_PATCH_INFO_EXC)
5242                         max_epilog_size += 40;
5243                 if (patch_info->type == MONO_PATCH_INFO_R8)
5244                         max_epilog_size += 8 + 7; /* sizeof (double) + alignment */
5245                 if (patch_info->type == MONO_PATCH_INFO_R4)
5246                         max_epilog_size += 4 + 7; /* sizeof (float) + alignment */
5247         }
5248
5249         return max_epilog_size;
5250 }
5251
5252 void
5253 mono_arch_flush_icache (guint8 *code, gint size)
5254 {
5255         /* Not needed */
5256 }
5257
5258 void
5259 mono_arch_flush_register_windows (void)
5260 {
5261 }
5262
5263 gboolean 
5264 mono_arch_is_inst_imm (gint64 imm)
5265 {
5266         return amd64_is_imm32 (imm);
5267 }
5268
5269 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
5270
5271 static int reg_to_ucontext_reg [] = {
5272         REG_RAX, REG_RCX, REG_RDX, REG_RBX, REG_RSP, REG_RBP, REG_RSI, REG_RDI,
5273         REG_R8, REG_R9, REG_R10, REG_R11, REG_R12, REG_R13, REG_R14, REG_R15,
5274         REG_RIP
5275 };
5276
5277 /*
5278  * Determine whenever the trap whose info is in SIGINFO is caused by
5279  * integer overflow.
5280  */
5281 gboolean
5282 mono_arch_is_int_overflow (void *sigctx, void *info)
5283 {
5284         ucontext_t *ctx = (ucontext_t*)sigctx;
5285         guint8* rip;
5286         int reg;
5287
5288         rip = (guint8*)ctx->uc_mcontext.gregs [REG_RIP];
5289
5290         if (IS_REX (rip [0])) {
5291                 reg = amd64_rex_r (rip [0]);
5292                 rip ++;
5293         }
5294         else
5295                 reg = 0;
5296
5297         if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) {
5298                 /* idiv REG */
5299                 reg += x86_modrm_rm (rip [1]);
5300
5301                 if (ctx->uc_mcontext.gregs [reg_to_ucontext_reg [reg]] == -1)
5302                         return TRUE;
5303         }
5304
5305         return FALSE;
5306 }
5307
5308 gpointer*
5309 mono_amd64_get_vcall_slot_addr (guint8* code, guint64 *regs)
5310 {
5311         guint32 reg;
5312         guint32 disp;
5313         guint8 rex = 0;
5314
5315         /* go to the start of the call instruction
5316          *
5317          * address_byte = (m << 6) | (o << 3) | reg
5318          * call opcode: 0xff address_byte displacement
5319          * 0xff m=1,o=2 imm8
5320          * 0xff m=2,o=2 imm32
5321          */
5322         code -= 7;
5323
5324         if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
5325                 /* call *%reg */
5326                 return NULL;
5327         }
5328         else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
5329                 /* call *[reg+disp32] */
5330                 if (IS_REX (code [0]))
5331                         rex = code [0];
5332                 reg = amd64_modrm_rm (code [2]);
5333                 disp = *(guint32*)(code + 3);
5334                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5335         }
5336         else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
5337                 /* call *[reg+disp8] */
5338                 if (IS_REX (code [3]))
5339                         rex = code [3];
5340                 reg = amd64_modrm_rm (code [5]);
5341                 disp = *(guint8*)(code + 6);
5342                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5343         }
5344         else if ((code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x0)) {
5345                         /*
5346                          * This is a interface call: should check the above code can't catch it earlier 
5347                          * 8b 40 30   mov    0x30(%eax),%eax
5348                          * ff 10      call   *(%eax)
5349                          */
5350                 if (IS_REX (code [4]))
5351                         rex = code [4];
5352                 reg = amd64_modrm_rm (code [6]);
5353                 disp = 0;
5354         }
5355         else
5356                 g_assert_not_reached ();
5357
5358         reg += amd64_rex_b (rex);
5359
5360         return (gpointer)((regs [reg]) + disp);
5361 }
5362
5363 /*
5364  * Support for fast access to the thread-local lmf structure using the GS
5365  * segment register on NPTL + kernel 2.6.x.
5366  */
5367
5368 static gboolean tls_offset_inited = FALSE;
5369
5370 /* code should be simply return <tls var>; */
5371 static int 
5372 read_tls_offset_from_method (void* method)
5373 {
5374         guint8 *code = (guint8*)method;
5375
5376         /* 
5377          * Determine the offset of mono_lfm_addr inside the TLS structures
5378          * by disassembling the function above.
5379          */
5380         /* This is generated by gcc 3.3.2 */
5381         if ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5382                 (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5383                 (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5384                 (code [9] == 0x00) && (code [10] == 0x00) && (code [11] == 0x00) &&
5385                 (code [12] == 0x0) && (code [13] == 0x48) && (code [14] == 0x8b) &&
5386                 (code [15] == 0x80)) {
5387                 return *(gint32*)&(code [16]);
5388         } else if
5389                 /* This is generated by gcc-3.3.2 with -O=2 */
5390                 /* mov fs:0, %rax ; mov <offset>(%rax), %rax ; retq */
5391                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5392                  (code [3] == 0x04) && (code [4] == 0x25) &&
5393                  (code [9] == 0x48) && (code [10] == 0x8b) && (code [11] == 0x80) &&
5394                  (code [16] == 0xc3)) {
5395                         return *(gint32*)&(code [12]);
5396         } else if 
5397                 /* This is generated by gcc-3.4.1 */
5398                 ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5399                  (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5400                  (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5401                  (code [13] == 0xc9) && (code [14] == 0xc3)) {
5402                         return *(gint32*)&(code [9]);
5403         } else if
5404                 /* This is generated by gcc-3.4.1 with -O=2 */
5405                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5406                  (code [3] == 0x04) && (code [4] == 0x25)) {
5407                 return *(gint32*)&(code [5]);
5408         }
5409
5410         return -1;
5411 }
5412
5413 void
5414 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5415 {
5416 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5417         pthread_t self = pthread_self();
5418         pthread_attr_t attr;
5419         void *staddr = NULL;
5420         size_t stsize = 0;
5421         struct sigaltstack sa;
5422 #endif
5423
5424         if (!tls_offset_inited) {
5425                 tls_offset_inited = TRUE;
5426
5427                 lmf_tls_offset = read_tls_offset_from_method (mono_get_lmf_addr);
5428                 appdomain_tls_offset = read_tls_offset_from_method (mono_domain_get);
5429                 //thread_tls_offset = read_tls_offset_from_method (mono_thread_current);
5430         }               
5431
5432 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5433
5434         /* Determine stack boundaries */
5435         if (!mono_running_on_valgrind ()) {
5436 #ifdef HAVE_PTHREAD_GETATTR_NP
5437                 pthread_getattr_np( self, &attr );
5438 #else
5439 #ifdef HAVE_PTHREAD_ATTR_GET_NP
5440                 pthread_attr_get_np( self, &attr );
5441 #elif defined(sun)
5442                 pthread_attr_init( &attr );
5443                 pthread_attr_getstacksize( &attr, &stsize );
5444 #else
5445 #error "Not implemented"
5446 #endif
5447 #endif
5448 #ifndef sun
5449                 pthread_attr_getstack( &attr, &staddr, &stsize );
5450 #endif
5451         }
5452
5453         /* 
5454          * staddr seems to be wrong for the main thread, so we keep the value in
5455          * tls->end_of_stack
5456          */
5457         tls->stack_size = stsize;
5458
5459         /* Setup an alternate signal stack */
5460         tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
5461         tls->signal_stack_size = SIGNAL_STACK_SIZE;
5462
5463         sa.ss_sp = tls->signal_stack;
5464         sa.ss_size = SIGNAL_STACK_SIZE;
5465         sa.ss_flags = SS_ONSTACK;
5466         sigaltstack (&sa, NULL);
5467 #endif
5468 }
5469
5470 void
5471 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5472 {
5473 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5474         struct sigaltstack sa;
5475
5476         sa.ss_sp = tls->signal_stack;
5477         sa.ss_size = SIGNAL_STACK_SIZE;
5478         sa.ss_flags = SS_DISABLE;
5479         sigaltstack  (&sa, NULL);
5480
5481         if (tls->signal_stack)
5482                 g_free (tls->signal_stack);
5483 #endif
5484 }
5485
5486 void
5487 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
5488 {
5489         int out_reg = param_regs [0];
5490
5491         /* FIXME: RDI and RSI might get clobbered */
5492
5493         if (vt_reg != -1) {
5494                 CallInfo * cinfo = get_call_info (inst->signature, FALSE);
5495                 MonoInst *vtarg;
5496
5497                 if (cinfo->ret.storage == ArgValuetypeInReg) {
5498                         /*
5499                          * The valuetype is in RAX:RDX after the call, need to be copied to
5500                          * the stack. Push the address here, so the call instruction can
5501                          * access it.
5502                          */
5503                         MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
5504                         vtarg->sreg1 = vt_reg;
5505                         mono_bblock_add_inst (cfg->cbb, vtarg);
5506
5507                         /* Align stack */
5508                         MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
5509                 }
5510                 else {
5511                         MONO_INST_NEW (cfg, vtarg, OP_SETREG);
5512                         vtarg->sreg1 = vt_reg;
5513                         vtarg->dreg = out_reg;
5514                         out_reg = param_regs [1];
5515                         mono_bblock_add_inst (cfg->cbb, vtarg);
5516                 }
5517
5518                 g_free (cinfo);
5519         }
5520
5521         /* add the this argument */
5522         if (this_reg != -1) {
5523                 MonoInst *this;
5524                 MONO_INST_NEW (cfg, this, OP_SETREG);
5525                 this->type = this_type;
5526                 this->sreg1 = this_reg;
5527                 this->dreg = out_reg;
5528                 mono_bblock_add_inst (cfg->cbb, this);
5529         }
5530 }
5531
5532 gint
5533 mono_arch_get_opcode_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5534 {
5535         if (use_sse2)
5536                 return -1;
5537
5538         if (cmethod->klass == mono_defaults.math_class) {
5539                 if (strcmp (cmethod->name, "Sin") == 0)
5540                         return OP_SIN;
5541                 else if (strcmp (cmethod->name, "Cos") == 0)
5542                         return OP_COS;
5543                 else if (strcmp (cmethod->name, "Tan") == 0)
5544                         return OP_TAN;
5545                 else if (strcmp (cmethod->name, "Atan") == 0)
5546                         return OP_ATAN;
5547                 else if (strcmp (cmethod->name, "Sqrt") == 0)
5548                         return OP_SQRT;
5549                 else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8)
5550                         return OP_ABS;
5551 #if 0
5552                 /* OP_FREM is not IEEE compatible */
5553                 else if (strcmp (cmethod->name, "IEEERemainder") == 0)
5554                         return OP_FREM;
5555 #endif
5556                 else
5557                         return -1;
5558         } else {
5559                 return -1;
5560         }
5561         return -1;
5562 }
5563
5564
5565 gboolean
5566 mono_arch_print_tree (MonoInst *tree, int arity)
5567 {
5568         return 0;
5569 }
5570
5571 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5572 {
5573         MonoInst* ins;
5574         
5575         if (appdomain_tls_offset == -1)
5576                 return NULL;
5577         
5578         MONO_INST_NEW (cfg, ins, OP_X86_TLS_GET);
5579         ins->inst_offset = appdomain_tls_offset;
5580         return ins;
5581 }
5582
5583 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
5584 {
5585         MonoInst* ins;
5586         
5587         if (thread_tls_offset == -1)
5588                 return NULL;
5589         
5590         MONO_INST_NEW (cfg, ins, OP_X86_TLS_GET);
5591         ins->inst_offset = thread_tls_offset;
5592         return ins;
5593 }