2004-09-24 Zoltan Varga <vargaz@freemail.hu>
[mono.git] / mono / mini / mini-amd64.c
1 /*
2  * mini-amd64.c: AMD64 backend for the Mono code generator
3  *
4  * Based on mini-x86.c.
5  *
6  * Authors:
7  *   Paolo Molaro (lupus@ximian.com)
8  *   Dietmar Maurer (dietmar@ximian.com)
9  *   Patrik Torstensson
10  *
11  * (C) 2003 Ximian, Inc.
12  */
13 #include "mini.h"
14 #include <string.h>
15 #include <math.h>
16
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/metadata/profiler-private.h>
21 #include <mono/utils/mono-math.h>
22
23 #include "trace.h"
24 #include "mini-amd64.h"
25 #include "inssel.h"
26 #include "cpu-amd64.h"
27
28 static gint lmf_tls_offset = -1;
29 static gint appdomain_tls_offset = -1;
30 static gint thread_tls_offset = -1;
31
32 /* Use SSE2 instructions for fp arithmetic */
33 static gboolean use_sse2 = FALSE;
34
35 /* xmm15 is reserved for use by some opcodes */
36 #define AMD64_CALLEE_FREGS 0xef
37
38 #define FPSTACK_SIZE 6
39
40 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
41
42 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
43
44 #ifdef PLATFORM_WIN32
45 /* Under windows, the default pinvoke calling convention is stdcall */
46 #define CALLCONV_IS_STDCALL(call_conv) (((call_conv) == MONO_CALL_STDCALL) || ((call_conv) == MONO_CALL_DEFAULT))
47 #else
48 #define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
49 #endif
50
51 #define SIGNAL_STACK_SIZE (64 * 1024)
52
53 #define ARGS_OFFSET 16
54 #define GP_SCRATCH_REG AMD64_R11
55
56 /*
57  * AMD64 register usage:
58  * - callee saved registers are used for global register allocation
59  * - %r11 is used for materializing 64 bit constants in opcodes
60  * - the rest is used for local allocation
61  */
62
63 /*
64  * FIXME: 
65  * - Use xmm registers instead of the x87 stack
66  * - Allocate arguments to global registers
67  * - implement emulated opcodes
68  * - (all archs) do not store trampoline addresses in method->info since they
69  *   are domain specific.   
70  */
71
72 #define NOT_IMPLEMENTED g_assert_not_reached ()
73
74 const char*
75 mono_arch_regname (int reg) {
76         switch (reg) {
77         case AMD64_RAX: return "%rax";
78         case AMD64_RBX: return "%rbx";
79         case AMD64_RCX: return "%rcx";
80         case AMD64_RDX: return "%rdx";
81         case AMD64_RSP: return "%rsp";  
82         case AMD64_RBP: return "%rbp";
83         case AMD64_RDI: return "%rdi";
84         case AMD64_RSI: return "%rsi";
85         case AMD64_R8: return "%r8";
86         case AMD64_R9: return "%r9";
87         case AMD64_R10: return "%r10";
88         case AMD64_R11: return "%r11";
89         case AMD64_R12: return "%r12";
90         case AMD64_R13: return "%r13";
91         case AMD64_R14: return "%r14";
92         case AMD64_R15: return "%r15";
93         }
94         return "unknown";
95 }
96
97 static const char * xmmregs [] = {
98         "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8",
99         "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
100 };
101
102 static const char*
103 mono_arch_fregname (int reg)
104 {
105         if (reg < AMD64_XMM_NREG)
106                 return xmmregs [reg];
107         else
108                 return "unknown";
109 }
110
111 static const char*
112 mono_amd64_regname (int reg, gboolean fp)
113 {
114         if (fp)
115                 return mono_arch_fregname (reg);
116         else
117                 return mono_arch_regname (reg);
118 }
119
120 static inline void 
121 amd64_patch (unsigned char* code, gpointer target)
122 {
123         /* Skip REX */
124         if ((code [0] >= 0x40) && (code [0] <= 0x4f))
125                 code += 1;
126
127         if (code [0] == 0xbb) {
128                 /* amd64_set_reg_template */
129                 *(guint64*)(code + 1) = (guint64)target;
130         }
131         else
132                 x86_patch (code, (unsigned char*)target);
133 }
134
135 typedef enum {
136         ArgInIReg,
137         ArgInFloatSSEReg,
138         ArgInDoubleSSEReg,
139         ArgOnStack,
140         ArgValuetypeInReg,
141         ArgNone /* only in pair_storage */
142 } ArgStorage;
143
144 typedef struct {
145         gint16 offset;
146         gint8  reg;
147         ArgStorage storage;
148
149         /* Only if storage == ArgValuetypeInReg */
150         ArgStorage pair_storage [2];
151         gint8 pair_regs [2];
152 } ArgInfo;
153
154 typedef struct {
155         int nargs;
156         guint32 stack_usage;
157         guint32 reg_usage;
158         guint32 freg_usage;
159         gboolean need_stack_align;
160         ArgInfo ret;
161         ArgInfo sig_cookie;
162         ArgInfo args [1];
163 } CallInfo;
164
165 #define DEBUG(a) if (cfg->verbose_level > 1) a
166
167 #define NEW_ICONST(cfg,dest,val) do {   \
168                 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst));       \
169                 (dest)->opcode = OP_ICONST;     \
170                 (dest)->inst_c0 = (val);        \
171                 (dest)->type = STACK_I4;        \
172         } while (0)
173
174 #define PARAM_REGS 6
175
176 static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
177
178 static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
179
180 static void inline
181 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
182 {
183     ainfo->offset = *stack_size;
184
185     if (*gr >= PARAM_REGS) {
186                 ainfo->storage = ArgOnStack;
187                 (*stack_size) += sizeof (gpointer);
188     }
189     else {
190                 ainfo->storage = ArgInIReg;
191                 ainfo->reg = param_regs [*gr];
192                 (*gr) ++;
193     }
194 }
195
196 #define FLOAT_PARAM_REGS 8
197
198 static void inline
199 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
200 {
201     ainfo->offset = *stack_size;
202
203     if (*gr >= FLOAT_PARAM_REGS) {
204                 ainfo->storage = ArgOnStack;
205                 (*stack_size) += sizeof (gpointer);
206     }
207     else {
208                 /* A double register */
209                 if (is_double)
210                         ainfo->storage = ArgInDoubleSSEReg;
211                 else
212                         ainfo->storage = ArgInFloatSSEReg;
213                 ainfo->reg = *gr;
214                 (*gr) += 1;
215     }
216 }
217
218 typedef enum ArgumentClass {
219         ARG_CLASS_NO_CLASS,
220         ARG_CLASS_MEMORY,
221         ARG_CLASS_INTEGER,
222         ARG_CLASS_SSE
223 } ArgumentClass;
224
225 static ArgumentClass
226 merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
227 {
228         ArgumentClass class2;
229         MonoType *ptype;
230
231         ptype = mono_type_get_underlying_type (type);
232         switch (ptype->type) {
233         case MONO_TYPE_BOOLEAN:
234         case MONO_TYPE_CHAR:
235         case MONO_TYPE_I1:
236         case MONO_TYPE_U1:
237         case MONO_TYPE_I2:
238         case MONO_TYPE_U2:
239         case MONO_TYPE_I4:
240         case MONO_TYPE_U4:
241         case MONO_TYPE_I:
242         case MONO_TYPE_U:
243         case MONO_TYPE_STRING:
244         case MONO_TYPE_OBJECT:
245         case MONO_TYPE_CLASS:
246         case MONO_TYPE_SZARRAY:
247         case MONO_TYPE_PTR:
248         case MONO_TYPE_FNPTR:
249         case MONO_TYPE_ARRAY:
250         case MONO_TYPE_I8:
251         case MONO_TYPE_U8:
252                 class2 = ARG_CLASS_INTEGER;
253                 break;
254         case MONO_TYPE_R4:
255         case MONO_TYPE_R8:
256                 class2 = ARG_CLASS_SSE;
257                 break;
258
259         case MONO_TYPE_TYPEDBYREF:
260                 g_assert_not_reached ();
261
262         case MONO_TYPE_VALUETYPE: {
263                 MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass);
264                 int i;
265
266                 for (i = 0; i < info->num_fields; ++i) {
267                         class2 = class1;
268                         class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
269                 }
270                 break;
271         }
272         default:
273                 g_assert_not_reached ();
274         }
275
276         /* Merge */
277         if (class1 == class2)
278                 ;
279         else if (class1 == ARG_CLASS_NO_CLASS)
280                 class1 = class2;
281         else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY))
282                 class1 = ARG_CLASS_MEMORY;
283         else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER))
284                 class1 = ARG_CLASS_INTEGER;
285         else
286                 class1 = ARG_CLASS_SSE;
287
288         return class1;
289 }
290
291 static void
292 add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
293                gboolean is_return,
294                guint32 *gr, guint32 *fr, guint32 *stack_size)
295 {
296         guint32 size, quad, nquads, i;
297         ArgumentClass args [2];
298         MonoMarshalType *info;
299         MonoClass *klass;
300
301         klass = mono_class_from_mono_type (type);
302         if (sig->pinvoke) 
303                 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
304         else 
305                 size = mono_type_stack_size (&klass->byval_arg, NULL);
306
307         if (!sig->pinvoke || (size == 0) || (size > 16)) {
308                 /* Allways pass in memory */
309                 ainfo->offset = *stack_size;
310                 *stack_size += ALIGN_TO (size, 8);
311                 ainfo->storage = ArgOnStack;
312
313                 return;
314         }
315
316         /* FIXME: Handle structs smaller than 8 bytes */
317         //if ((size % 8) != 0)
318         //      NOT_IMPLEMENTED;
319
320         if (size > 8)
321                 nquads = 2;
322         else
323                 nquads = 1;
324
325         /*
326          * Implement the algorithm from section 3.2.3 of the X86_64 ABI.
327          * The X87 and SSEUP stuff is left out since there are no such types in
328          * the CLR.
329          */
330         info = mono_marshal_load_type_info (klass);
331         g_assert (info);
332         if (info->native_size > 16) {
333                 ainfo->offset = *stack_size;
334                 *stack_size += ALIGN_TO (info->native_size, 8);
335                 ainfo->storage = ArgOnStack;
336
337                 return;
338         }
339
340         for (quad = 0; quad < nquads; ++quad) {
341                 int size, align;
342                 ArgumentClass class1;
343                 
344                 class1 = ARG_CLASS_NO_CLASS;
345                 for (i = 0; i < info->num_fields; ++i) {
346                         size = mono_marshal_type_size (info->fields [i].field->type, 
347                                                                                    info->fields [i].mspec, 
348                                                                                    &align, TRUE, klass->unicode);
349                         if ((info->fields [i].offset < 8) && (info->fields [i].offset + size) > 8) {
350                                 /* Unaligned field */
351                                 NOT_IMPLEMENTED;
352                         }
353
354                         /* Skip fields in other quad */
355                         if ((quad == 0) && (info->fields [i].offset >= 8))
356                                 continue;
357                         if ((quad == 1) && (info->fields [i].offset < 8))
358                                 continue;
359
360                         class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
361                 }
362                 g_assert (class1 != ARG_CLASS_NO_CLASS);
363                 args [quad] = class1;
364         }
365
366         /* Post merger cleanup */
367         if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY))
368                 args [0] = args [1] = ARG_CLASS_MEMORY;
369
370         /* Allocate registers */
371         {
372                 int orig_gr = *gr;
373                 int orig_fr = *fr;
374
375                 ainfo->storage = ArgValuetypeInReg;
376                 ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
377                 for (quad = 0; quad < nquads; ++quad) {
378                         switch (args [quad]) {
379                         case ARG_CLASS_INTEGER:
380                                 if (*gr >= PARAM_REGS)
381                                         args [quad] = ARG_CLASS_MEMORY;
382                                 else {
383                                         ainfo->pair_storage [quad] = ArgInIReg;
384                                         if (is_return)
385                                                 ainfo->pair_regs [quad] = return_regs [*gr];
386                                         else
387                                                 ainfo->pair_regs [quad] = param_regs [*gr];
388                                         (*gr) ++;
389                                 }
390                                 break;
391                         case ARG_CLASS_SSE:
392                                 if (*fr >= FLOAT_PARAM_REGS)
393                                         args [quad] = ARG_CLASS_MEMORY;
394                                 else {
395                                         ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
396                                         ainfo->pair_regs [quad] = *fr;
397                                         (*fr) ++;
398                                 }
399                                 break;
400                         case ARG_CLASS_MEMORY:
401                                 break;
402                         default:
403                                 g_assert_not_reached ();
404                         }
405                 }
406
407                 if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) {
408                         /* Revert possible register assignments */
409                         *gr = orig_gr;
410                         *fr = orig_fr;
411
412                         ainfo->offset = *stack_size;
413                         *stack_size += ALIGN_TO (info->native_size, 8);
414                         ainfo->storage = ArgOnStack;
415                 }
416         }
417 }
418
419 /*
420  * get_call_info:
421  *
422  *  Obtain information about a call according to the calling convention.
423  * For AMD64, see the "System V ABI, x86-64 Architecture Processor Supplement 
424  * Draft Version 0.23" document for more information.
425  */
426 static CallInfo*
427 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
428 {
429         guint32 i, gr, fr;
430         MonoType *ret_type;
431         int n = sig->hasthis + sig->param_count;
432         guint32 stack_size = 0;
433         CallInfo *cinfo;
434
435         cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
436
437         gr = 0;
438         fr = 0;
439
440         /* return value */
441         {
442                 ret_type = mono_type_get_underlying_type (sig->ret);
443                 switch (ret_type->type) {
444                 case MONO_TYPE_BOOLEAN:
445                 case MONO_TYPE_I1:
446                 case MONO_TYPE_U1:
447                 case MONO_TYPE_I2:
448                 case MONO_TYPE_U2:
449                 case MONO_TYPE_CHAR:
450                 case MONO_TYPE_I4:
451                 case MONO_TYPE_U4:
452                 case MONO_TYPE_I:
453                 case MONO_TYPE_U:
454                 case MONO_TYPE_PTR:
455                 case MONO_TYPE_CLASS:
456                 case MONO_TYPE_OBJECT:
457                 case MONO_TYPE_SZARRAY:
458                 case MONO_TYPE_ARRAY:
459                 case MONO_TYPE_STRING:
460                         cinfo->ret.storage = ArgInIReg;
461                         cinfo->ret.reg = AMD64_RAX;
462                         break;
463                 case MONO_TYPE_U8:
464                 case MONO_TYPE_I8:
465                         cinfo->ret.storage = ArgInIReg;
466                         cinfo->ret.reg = AMD64_RAX;
467                         break;
468                 case MONO_TYPE_R4:
469                         cinfo->ret.storage = ArgInFloatSSEReg;
470                         cinfo->ret.reg = AMD64_XMM0;
471                         break;
472                 case MONO_TYPE_R8:
473                         cinfo->ret.storage = ArgInDoubleSSEReg;
474                         cinfo->ret.reg = AMD64_XMM0;
475                         break;
476                 case MONO_TYPE_VALUETYPE: {
477                         guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
478
479                         add_valuetype (sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
480                         if (cinfo->ret.storage == ArgOnStack)
481                                 /* The caller passes the address where the value is stored */
482                                 add_general (&gr, &stack_size, &cinfo->ret);
483                         break;
484                 }
485                 case MONO_TYPE_TYPEDBYREF:
486                         /* Same as a valuetype with size 24 */
487                         add_general (&gr, &stack_size, &cinfo->ret);
488                         ;
489                         break;
490                 case MONO_TYPE_VOID:
491                         break;
492                 default:
493                         g_error ("Can't handle as return value 0x%x", sig->ret->type);
494                 }
495         }
496
497         /* this */
498         if (sig->hasthis)
499                 add_general (&gr, &stack_size, cinfo->args + 0);
500
501         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
502                 gr = PARAM_REGS;
503                 fr = FLOAT_PARAM_REGS;
504                 
505                 /* Emit the signature cookie just before the implicit arguments */
506                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
507         }
508
509         for (i = 0; i < sig->param_count; ++i) {
510                 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
511                 MonoType *ptype;
512
513                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
514                         /* We allways pass the sig cookie on the stack for simplicity */
515                         /* 
516                          * Prevent implicit arguments + the sig cookie from being passed 
517                          * in registers.
518                          */
519                         gr = PARAM_REGS;
520                         fr = FLOAT_PARAM_REGS;
521
522                         /* Emit the signature cookie just before the implicit arguments */
523                         add_general (&gr, &stack_size, &cinfo->sig_cookie);
524                 }
525
526                 if (sig->params [i]->byref) {
527                         add_general (&gr, &stack_size, ainfo);
528                         continue;
529                 }
530                 ptype = mono_type_get_underlying_type (sig->params [i]);
531                 switch (ptype->type) {
532                 case MONO_TYPE_BOOLEAN:
533                 case MONO_TYPE_I1:
534                 case MONO_TYPE_U1:
535                         add_general (&gr, &stack_size, ainfo);
536                         break;
537                 case MONO_TYPE_I2:
538                 case MONO_TYPE_U2:
539                 case MONO_TYPE_CHAR:
540                         add_general (&gr, &stack_size, ainfo);
541                         break;
542                 case MONO_TYPE_I4:
543                 case MONO_TYPE_U4:
544                         add_general (&gr, &stack_size, ainfo);
545                         break;
546                 case MONO_TYPE_I:
547                 case MONO_TYPE_U:
548                 case MONO_TYPE_PTR:
549                 case MONO_TYPE_CLASS:
550                 case MONO_TYPE_OBJECT:
551                 case MONO_TYPE_STRING:
552                 case MONO_TYPE_SZARRAY:
553                 case MONO_TYPE_ARRAY:
554                         add_general (&gr, &stack_size, ainfo);
555                         break;
556                 case MONO_TYPE_VALUETYPE:
557                         add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
558                         break;
559                 case MONO_TYPE_TYPEDBYREF:
560                         stack_size += sizeof (MonoTypedRef);
561                         ainfo->storage = ArgOnStack;
562                         break;
563                 case MONO_TYPE_U8:
564                 case MONO_TYPE_I8:
565                         add_general (&gr, &stack_size, ainfo);
566                         break;
567                 case MONO_TYPE_R4:
568                         add_float (&fr, &stack_size, ainfo, FALSE);
569                         break;
570                 case MONO_TYPE_R8:
571                         add_float (&fr, &stack_size, ainfo, TRUE);
572                         break;
573                 default:
574                         g_assert_not_reached ();
575                 }
576         }
577
578         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
579                 gr = PARAM_REGS;
580                 fr = FLOAT_PARAM_REGS;
581                 
582                 /* Emit the signature cookie just before the implicit arguments */
583                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
584         }
585
586         if (stack_size & 0x8) {
587                 /* The AMD64 ABI requires each stack frame to be 16 byte aligned */
588                 cinfo->need_stack_align = TRUE;
589                 stack_size += 8;
590         }
591
592         cinfo->stack_usage = stack_size;
593         cinfo->reg_usage = gr;
594         cinfo->freg_usage = fr;
595         return cinfo;
596 }
597
598 /*
599  * mono_arch_get_argument_info:
600  * @csig:  a method signature
601  * @param_count: the number of parameters to consider
602  * @arg_info: an array to store the result infos
603  *
604  * Gathers information on parameters such as size, alignment and
605  * padding. arg_info should be large enought to hold param_count + 1 entries. 
606  *
607  * Returns the size of the argument area on the stack.
608  */
609 int
610 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
611 {
612         int k;
613         CallInfo *cinfo = get_call_info (csig, FALSE);
614         guint32 args_size = cinfo->stack_usage;
615
616         /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
617         if (csig->hasthis) {
618                 arg_info [0].offset = 0;
619         }
620
621         for (k = 0; k < param_count; k++) {
622                 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
623                 /* FIXME: */
624                 arg_info [k + 1].size = 0;
625         }
626
627         g_free (cinfo);
628
629         return args_size;
630 }
631
632 static int 
633 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
634 {
635         return 0;
636 }
637
638 /*
639  * Initialize the cpu to execute managed code.
640  */
641 void
642 mono_arch_cpu_init (void)
643 {
644         guint16 fpcw;
645
646         /* spec compliance requires running with double precision */
647         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
648         fpcw &= ~X86_FPCW_PRECC_MASK;
649         fpcw |= X86_FPCW_PREC_DOUBLE;
650         __asm__  __volatile__ ("fldcw %0\n": : "m" (fpcw));
651         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
652
653         mono_amd64_exceptions_init ();
654 }
655
656 /*
657  * This function returns the optimizations supported on this cpu.
658  */
659 guint32
660 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
661 {
662         int eax, ebx, ecx, edx;
663         guint32 opts = 0;
664
665         /* FIXME: AMD64 */
666
667         *exclude_mask = 0;
668         /* Feature Flags function, flags returned in EDX. */
669         if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
670                 if (edx & (1 << 15)) {
671                         opts |= MONO_OPT_CMOV;
672                         if (edx & 1)
673                                 opts |= MONO_OPT_FCMOV;
674                         else
675                                 *exclude_mask |= MONO_OPT_FCMOV;
676                 } else
677                         *exclude_mask |= MONO_OPT_CMOV;
678         }
679         return opts;
680 }
681
682 static gboolean
683 is_regsize_var (MonoType *t) {
684         if (t->byref)
685                 return TRUE;
686         t = mono_type_get_underlying_type (t);
687         switch (t->type) {
688         case MONO_TYPE_I4:
689         case MONO_TYPE_U4:
690         case MONO_TYPE_I:
691         case MONO_TYPE_U:
692         case MONO_TYPE_PTR:
693                 return TRUE;
694         case MONO_TYPE_OBJECT:
695         case MONO_TYPE_STRING:
696         case MONO_TYPE_CLASS:
697         case MONO_TYPE_SZARRAY:
698         case MONO_TYPE_ARRAY:
699                 return TRUE;
700         case MONO_TYPE_VALUETYPE:
701                 return FALSE;
702         }
703         return FALSE;
704 }
705
706 GList *
707 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
708 {
709         GList *vars = NULL;
710         int i;
711
712         for (i = 0; i < cfg->num_varinfo; i++) {
713                 MonoInst *ins = cfg->varinfo [i];
714                 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
715
716                 /* unused vars */
717                 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
718                         continue;
719
720                 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || 
721                     (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
722                         continue;
723
724                 /* we dont allocate I1 to registers because there is no simply way to sign extend 
725                  * 8bit quantities in caller saved registers on x86 */
726                 if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) || 
727                     (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
728                     (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
729                         g_assert (MONO_VARINFO (cfg, i)->reg == -1);
730                         g_assert (i == vmv->idx);
731                         vars = g_list_prepend (vars, vmv);
732                 }
733         }
734
735         vars = mono_varlist_sort (cfg, vars, 0);
736
737         return vars;
738 }
739
740 GList *
741 mono_arch_get_global_int_regs (MonoCompile *cfg)
742 {
743         GList *regs = NULL;
744
745         /* We use the callee saved registers for global allocation */
746         regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
747         regs = g_list_prepend (regs, (gpointer)AMD64_R12);
748         regs = g_list_prepend (regs, (gpointer)AMD64_R13);
749         regs = g_list_prepend (regs, (gpointer)AMD64_R14);
750         regs = g_list_prepend (regs, (gpointer)AMD64_R15);
751
752         return regs;
753 }
754
755 /*
756  * mono_arch_regalloc_cost:
757  *
758  *  Return the cost, in number of memory references, of the action of 
759  * allocating the variable VMV into a register during global register
760  * allocation.
761  */
762 guint32
763 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
764 {
765         MonoInst *ins = cfg->varinfo [vmv->idx];
766
767         if (cfg->method->save_lmf)
768                 /* The register is already saved */
769                 /* substract 1 for the invisible store in the prolog */
770                 return (ins->opcode == OP_ARG) ? 0 : 1;
771         else
772                 /* push+pop */
773                 return (ins->opcode == OP_ARG) ? 1 : 2;
774 }
775  
776 void
777 mono_arch_allocate_vars (MonoCompile *m)
778 {
779         MonoMethodSignature *sig;
780         MonoMethodHeader *header;
781         MonoInst *inst;
782         int i, offset, size, align, curinst;
783         CallInfo *cinfo;
784
785         header = ((MonoMethodNormal *)m->method)->header;
786
787         sig = m->method->signature;
788
789         cinfo = get_call_info (sig, FALSE);
790
791         /*
792          * We use the ABI calling conventions for managed code as well.
793          * Exception: valuetypes are never passed or returned in registers.
794          */
795
796         /* Locals are allocated backwards from %fp */
797         m->frame_reg = AMD64_RBP;
798         offset = 0;
799
800         /* Reserve space for caller saved registers */
801         for (i = 0; i < AMD64_NREG; ++i)
802                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (m->used_int_regs & (1 << i))) {
803                         offset += sizeof (gpointer);
804                 }
805
806         if (m->method->save_lmf) {
807                 /* Reserve stack space for saving LMF + argument regs */
808                 offset += sizeof (MonoLMF);
809                 if (lmf_tls_offset == -1)
810                         /* Need to save argument regs too */
811                         offset += (AMD64_NREG * 8) + (8 * 8);
812                 m->arch.lmf_offset = offset;
813         }
814
815         if (sig->ret->type != MONO_TYPE_VOID) {
816                 switch (cinfo->ret.storage) {
817                 case ArgInIReg:
818                 case ArgInFloatSSEReg:
819                 case ArgInDoubleSSEReg:
820                         if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
821                                 /* The register is volatile */
822                                 m->ret->opcode = OP_REGOFFSET;
823                                 m->ret->inst_basereg = AMD64_RBP;
824                                 offset += 8;
825                                 m->ret->inst_offset = - offset;
826                         }
827                         else {
828                                 m->ret->opcode = OP_REGVAR;
829                                 m->ret->inst_c0 = cinfo->ret.reg;
830                         }
831                         break;
832                 default:
833                         g_assert_not_reached ();
834                 }
835                 m->ret->dreg = m->ret->inst_c0;
836         }
837
838         curinst = m->locals_start;
839         for (i = curinst; i < m->num_varinfo; ++i) {
840                 inst = m->varinfo [i];
841
842                 if (inst->opcode == OP_REGVAR) {
843                         //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
844                         continue;
845                 }
846
847                 /* inst->unused indicates native sized value types, this is used by the
848                 * pinvoke wrappers when they call functions returning structure */
849                 if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
850                         size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
851                 else
852                         size = mono_type_stack_size (inst->inst_vtype, &align);
853
854                 /*
855                  * variables are accessed as negative offsets from %fp, so increase
856                  * the offset before assigning it to a variable
857                  */
858                 offset += size;
859
860                 offset += align - 1;
861                 offset &= ~(align - 1);
862                 inst->opcode = OP_REGOFFSET;
863                 inst->inst_basereg = AMD64_RBP;
864                 inst->inst_offset = - offset;
865
866                 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
867         }
868
869         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
870                 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
871                 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
872         }
873
874         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
875                 inst = m->varinfo [i];
876                 if (inst->opcode != OP_REGVAR) {
877                         ArgInfo *ainfo = &cinfo->args [i];
878                         gboolean inreg = TRUE;
879                         MonoType *arg_type;
880
881                         if (sig->hasthis && (i == 0))
882                                 arg_type = &mono_defaults.object_class->byval_arg;
883                         else
884                                 arg_type = sig->params [i - sig->hasthis];
885
886                         /* FIXME: Allocate volatile arguments to registers */
887                         if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
888                                 inreg = FALSE;
889
890                         /* 
891                          * Under AMD64, all registers used to pass arguments to functions
892                          * are volatile across calls.
893                          * FIXME: Optimize this.
894                          */
895                         if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg))
896                                 inreg = FALSE;
897
898                         inst->opcode = OP_REGOFFSET;
899
900                         switch (ainfo->storage) {
901                         case ArgInIReg:
902                         case ArgInFloatSSEReg:
903                         case ArgInDoubleSSEReg:
904                                 inst->opcode = OP_REGVAR;
905                                 inst->dreg = ainfo->reg;
906                                 break;
907                         case ArgOnStack:
908                                 inst->opcode = OP_REGOFFSET;
909                                 inst->inst_basereg = AMD64_RBP;
910                                 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
911                                 break;
912                         default:
913                                 NOT_IMPLEMENTED;
914                         }
915
916                         if (!inreg && (ainfo->storage != ArgOnStack)) {
917                                 inst->opcode = OP_REGOFFSET;
918                                 inst->inst_basereg = AMD64_RBP;
919                                 /* These arguments are saved to the stack in the prolog */
920                                 offset += 8;
921                                 inst->inst_offset = - offset;
922                         }
923                 }
924         }
925
926         m->stack_offset = offset;
927
928         g_free (cinfo);
929 }
930
931 static void
932 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, MonoInst *arg, ArgStorage storage, int reg, MonoInst *tree)
933 {
934         switch (storage) {
935         case ArgInIReg:
936                 arg->opcode = OP_OUTARG_REG;
937                 arg->inst_left = tree;
938                 arg->inst_right = (MonoInst*)call;
939                 arg->unused = reg;
940                 call->used_iregs |= 1 << reg;
941                 break;
942         case ArgInFloatSSEReg:
943                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R4;
944                 arg->inst_left = tree;
945                 arg->inst_right = (MonoInst*)call;
946                 arg->unused = reg;
947                 call->used_fregs |= 1 << reg;
948                 break;
949         case ArgInDoubleSSEReg:
950                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R8;
951                 arg->inst_left = tree;
952                 arg->inst_right = (MonoInst*)call;
953                 arg->unused = reg;
954                 call->used_fregs |= 1 << reg;
955                 break;
956         default:
957                 g_assert_not_reached ();
958         }
959 }
960
961 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
962  * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info 
963  */
964
965 static int
966 arg_storage_to_ldind (ArgStorage storage)
967 {
968         switch (storage) {
969         case ArgInIReg:
970                 return CEE_LDIND_I;
971         case ArgInDoubleSSEReg:
972                 return CEE_LDIND_R8;
973         case ArgInFloatSSEReg:
974                 return CEE_LDIND_R4;
975         default:
976                 g_assert_not_reached ();
977         }
978
979         return -1;
980 }
981
982 /* 
983  * take the arguments and generate the arch-specific
984  * instructions to properly call the function in call.
985  * This includes pushing, moving arguments to the right register
986  * etc.
987  * Issue: who does the spilling if needed, and when?
988  */
989 MonoCallInst*
990 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
991         MonoInst *arg, *in;
992         MonoMethodSignature *sig;
993         int i, n, stack_size;
994         CallInfo *cinfo;
995         ArgInfo *ainfo;
996
997         stack_size = 0;
998
999         sig = call->signature;
1000         n = sig->param_count + sig->hasthis;
1001
1002         cinfo = get_call_info (sig, sig->pinvoke);
1003
1004         for (i = 0; i < n; ++i) {
1005                 ainfo = cinfo->args + i;
1006
1007                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1008                         MonoMethodSignature *tmp_sig;
1009                         
1010                         /* Emit the signature cookie just before the implicit arguments */
1011                         MonoInst *sig_arg;
1012                         /* FIXME: Add support for signature tokens to AOT */
1013                         cfg->disable_aot = TRUE;
1014
1015                         g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1016
1017                         /*
1018                          * mono_ArgIterator_Setup assumes the signature cookie is 
1019                          * passed first and all the arguments which were before it are
1020                          * passed on the stack after the signature. So compensate by 
1021                          * passing a different signature.
1022                          */
1023                         tmp_sig = mono_metadata_signature_dup (call->signature);
1024                         tmp_sig->param_count -= call->signature->sentinelpos;
1025                         tmp_sig->sentinelpos = 0;
1026                         memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1027
1028                         MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1029                         sig_arg->inst_p0 = tmp_sig;
1030
1031                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1032                         arg->inst_left = sig_arg;
1033                         arg->type = STACK_PTR;
1034
1035                         /* prepend, so they get reversed */
1036                         arg->next = call->out_args;
1037                         call->out_args = arg;
1038                 }
1039
1040                 if (is_virtual && i == 0) {
1041                         /* the argument will be attached to the call instruction */
1042                         in = call->args [i];
1043                 } else {
1044                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1045                         in = call->args [i];
1046                         arg->cil_code = in->cil_code;
1047                         arg->inst_left = in;
1048                         arg->type = in->type;
1049                         /* prepend, so they get reversed */
1050                         arg->next = call->out_args;
1051                         call->out_args = arg;
1052
1053                         if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1054                                 gint align;
1055                                 guint32 size;
1056
1057                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1058                                         size = sizeof (MonoTypedRef);
1059                                         align = sizeof (gpointer);
1060                                 }
1061                                 else
1062                                 if (sig->pinvoke)
1063                                         size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1064                                 else
1065                                         size = mono_type_stack_size (&in->klass->byval_arg, &align);
1066                                 if (ainfo->storage == ArgValuetypeInReg) {
1067                                         if (ainfo->pair_storage [1] == ArgNone) {
1068                                                 MonoInst *load;
1069
1070                                                 /* Simpler case */
1071
1072                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1073                                                 load->inst_left = in;
1074
1075                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1076                                         }
1077                                         else {
1078                                                 /* Trees can't be shared so make a copy */
1079                                                 MonoInst *vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1080                                                 MonoInst *load, *load2, *offset_ins;
1081
1082                                                 /* Reg1 */
1083                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1084                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1085
1086                                                 NEW_ICONST (cfg, offset_ins, 0);
1087                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1088                                                 load2->inst_left = load;
1089                                                 load2->inst_right = offset_ins;
1090
1091                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1092                                                 load->inst_left = load2;
1093
1094                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1095
1096                                                 /* Reg2 */
1097                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1098                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1099
1100                                                 NEW_ICONST (cfg, offset_ins, 8);
1101                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1102                                                 load2->inst_left = load;
1103                                                 load2->inst_right = offset_ins;
1104
1105                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [1]));
1106                                                 load->inst_left = load2;
1107
1108                                                 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1109                                                 arg->cil_code = in->cil_code;
1110                                                 arg->type = in->type;
1111                                                 /* prepend, so they get reversed */
1112                                                 arg->next = call->out_args;
1113                                                 call->out_args = arg;
1114
1115                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [1], ainfo->pair_regs [1], load);
1116
1117                                                 /* Prepend a copy inst */
1118                                                 MONO_INST_NEW (cfg, arg, CEE_STIND_I);
1119                                                 arg->cil_code = in->cil_code;
1120                                                 arg->inst_left = vtaddr;
1121                                                 arg->inst_right = in;
1122                                                 arg->type = in->type;
1123
1124                                                 /* prepend, so they get reversed */
1125                                                 arg->next = call->out_args;
1126                                                 call->out_args = arg;
1127                                         }
1128                                 }
1129                                 else {
1130                                         arg->opcode = OP_OUTARG_VT;
1131                                         arg->klass = in->klass;
1132                                         arg->unused = sig->pinvoke;
1133                                         arg->inst_imm = size;
1134                                 }
1135                         }
1136                         else {
1137                                 switch (ainfo->storage) {
1138                                 case ArgInIReg:
1139                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1140                                         break;
1141                                 case ArgInFloatSSEReg:
1142                                 case ArgInDoubleSSEReg:
1143                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1144                                         break;
1145                                 case ArgOnStack:
1146                                         arg->opcode = OP_OUTARG;
1147                                         if (!sig->params [i - sig->hasthis]->byref) {
1148                                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4)
1149                                                         arg->opcode = OP_OUTARG_R4;
1150                                                 else
1151                                                         if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)
1152                                                                 arg->opcode = OP_OUTARG_R8;
1153                                         }
1154                                         break;
1155                                 default:
1156                                         g_assert_not_reached ();
1157                                 }
1158                         }
1159                 }
1160         }
1161
1162         if (cinfo->need_stack_align) {
1163                 MONO_INST_NEW (cfg, arg, OP_AMD64_OUTARG_ALIGN_STACK);
1164                 /* prepend, so they get reversed */
1165                 arg->next = call->out_args;
1166                 call->out_args = arg;
1167         }
1168
1169         call->stack_usage = cinfo->stack_usage;
1170         cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1171         cfg->flags |= MONO_CFG_HAS_CALLS;
1172
1173         g_free (cinfo);
1174
1175         return call;
1176 }
1177
1178 #define EMIT_COND_BRANCH(ins,cond,sign) \
1179 if (ins->flags & MONO_INST_BRLABEL) { \
1180         if (ins->inst_i0->inst_c0) { \
1181                 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
1182         } else { \
1183                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1184                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1185                     x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
1186                         x86_branch8 (code, cond, 0, sign); \
1187                 else \
1188                         x86_branch32 (code, cond, 0, sign); \
1189         } \
1190 } else { \
1191         if (ins->inst_true_bb->native_offset) { \
1192                 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1193         } else { \
1194                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1195                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1196                     x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1197                         x86_branch8 (code, cond, 0, sign); \
1198                 else \
1199                         x86_branch32 (code, cond, 0, sign); \
1200         } \
1201 }
1202
1203 /* emit an exception if condition is fail */
1204 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name)            \
1205         do {                                                        \
1206                 mono_add_patch_info (cfg, code - cfg->native_code,   \
1207                                     MONO_PATCH_INFO_EXC, exc_name);  \
1208                 x86_branch32 (code, cond, 0, signed);               \
1209         } while (0); 
1210
1211 #define EMIT_FPCOMPARE(code) do { \
1212         amd64_fcompp (code); \
1213         amd64_fnstsw (code); \
1214 } while (0); 
1215
1216 /*
1217  * Emitting a call and patching it later is expensive on amd64, so try to
1218  * determine the patch target immediately, and emit more efficient code if
1219  * possible.
1220  */
1221 static guint8*
1222 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
1223 {
1224         /* FIXME: */
1225         mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
1226         amd64_set_reg_template (code, GP_SCRATCH_REG);
1227         amd64_call_reg (code, GP_SCRATCH_REG);
1228
1229         return code;
1230 }
1231
1232 #define EMIT_CALL() do { \
1233     amd64_set_reg_template (code, GP_SCRATCH_REG); \
1234     amd64_call_reg (code, GP_SCRATCH_REG); \
1235 } while (0);
1236
1237 /* FIXME: Add more instructions */
1238 #define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG))
1239
1240 static void
1241 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1242 {
1243         MonoInst *ins, *last_ins = NULL;
1244         ins = bb->code;
1245
1246         while (ins) {
1247
1248                 switch (ins->opcode) {
1249                 case OP_ICONST:
1250                 case OP_I8CONST:
1251                         /* reg = 0 -> XOR (reg, reg) */
1252                         /* XOR sets cflags on x86, so we cant do it always */
1253                         if (ins->inst_c0 == 0 && ins->next && INST_IGNORES_CFLAGS (ins->next)) {
1254                                 ins->opcode = CEE_XOR;
1255                                 ins->sreg1 = ins->dreg;
1256                                 ins->sreg2 = ins->dreg;
1257                         }
1258                         break;
1259                 case OP_MUL_IMM: 
1260                         /* remove unnecessary multiplication with 1 */
1261                         if (ins->inst_imm == 1) {
1262                                 if (ins->dreg != ins->sreg1) {
1263                                         ins->opcode = OP_MOVE;
1264                                 } else {
1265                                         last_ins->next = ins->next;
1266                                         ins = ins->next;
1267                                         continue;
1268                                 }
1269                         }
1270                         break;
1271                 case OP_COMPARE_IMM:
1272                         /* OP_COMPARE_IMM (reg, 0) 
1273                          * --> 
1274                          * OP_AMD64_TEST_NULL (reg) 
1275                          */
1276                         if (!ins->inst_imm)
1277                                 ins->opcode = OP_AMD64_TEST_NULL;
1278                         break;
1279                 case OP_ICOMPARE_IMM:
1280                         if (!ins->inst_imm)
1281                                 ins->opcode = OP_X86_TEST_NULL;
1282                         break;
1283                 case OP_X86_COMPARE_MEMBASE_IMM:
1284                         /* 
1285                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1286                          * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1287                          * -->
1288                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1289                          * OP_COMPARE_IMM reg, imm
1290                          *
1291                          * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1292                          */
1293                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1294                             ins->inst_basereg == last_ins->inst_destbasereg &&
1295                             ins->inst_offset == last_ins->inst_offset) {
1296                                         ins->opcode = OP_COMPARE_IMM;
1297                                         ins->sreg1 = last_ins->sreg1;
1298
1299                                         /* check if we can remove cmp reg,0 with test null */
1300                                         if (!ins->inst_imm)
1301                                                 ins->opcode = OP_X86_TEST_NULL;
1302                                 }
1303
1304                         break;
1305                 case OP_LOAD_MEMBASE:
1306                 case OP_LOADI4_MEMBASE:
1307                         /* 
1308                          * Note: if reg1 = reg2 the load op is removed
1309                          *
1310                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1311                          * OP_LOAD_MEMBASE offset(basereg), reg2
1312                          * -->
1313                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1314                          * OP_MOVE reg1, reg2
1315                          */
1316                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG 
1317                                          || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1318                             ins->inst_basereg == last_ins->inst_destbasereg &&
1319                             ins->inst_offset == last_ins->inst_offset) {
1320                                 if (ins->dreg == last_ins->sreg1) {
1321                                         last_ins->next = ins->next;                             
1322                                         ins = ins->next;                                
1323                                         continue;
1324                                 } else {
1325                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1326                                         ins->opcode = OP_MOVE;
1327                                         ins->sreg1 = last_ins->sreg1;
1328                                 }
1329
1330                         /* 
1331                          * Note: reg1 must be different from the basereg in the second load
1332                          * Note: if reg1 = reg2 is equal then second load is removed
1333                          *
1334                          * OP_LOAD_MEMBASE offset(basereg), reg1
1335                          * OP_LOAD_MEMBASE offset(basereg), reg2
1336                          * -->
1337                          * OP_LOAD_MEMBASE offset(basereg), reg1
1338                          * OP_MOVE reg1, reg2
1339                          */
1340                         } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1341                                            || last_ins->opcode == OP_LOAD_MEMBASE) &&
1342                               ins->inst_basereg != last_ins->dreg &&
1343                               ins->inst_basereg == last_ins->inst_basereg &&
1344                               ins->inst_offset == last_ins->inst_offset) {
1345
1346                                 if (ins->dreg == last_ins->dreg) {
1347                                         last_ins->next = ins->next;                             
1348                                         ins = ins->next;                                
1349                                         continue;
1350                                 } else {
1351                                         ins->opcode = OP_MOVE;
1352                                         ins->sreg1 = last_ins->dreg;
1353                                 }
1354
1355                                 //g_assert_not_reached ();
1356
1357 #if 0
1358                         /* 
1359                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1360                          * OP_LOAD_MEMBASE offset(basereg), reg
1361                          * -->
1362                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1363                          * OP_ICONST reg, imm
1364                          */
1365                         } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1366                                                 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1367                                    ins->inst_basereg == last_ins->inst_destbasereg &&
1368                                    ins->inst_offset == last_ins->inst_offset) {
1369                                 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1370                                 ins->opcode = OP_ICONST;
1371                                 ins->inst_c0 = last_ins->inst_imm;
1372                                 g_assert_not_reached (); // check this rule
1373 #endif
1374                         }
1375                         break;
1376                 case OP_LOADU1_MEMBASE:
1377                 case OP_LOADI1_MEMBASE:
1378                         /* 
1379                          * Note: if reg1 = reg2 the load op is removed
1380                          *
1381                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1382                          * OP_LOAD_MEMBASE offset(basereg), reg2
1383                          * -->
1384                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1385                          * OP_MOVE reg1, reg2
1386                          */
1387                         if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1388                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1389                                         ins->inst_offset == last_ins->inst_offset) {
1390                                 if (ins->dreg == last_ins->sreg1) {
1391                                         last_ins->next = ins->next;                             
1392                                         ins = ins->next;                                
1393                                         continue;
1394                                 } else {
1395                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1396                                         ins->opcode = OP_MOVE;
1397                                         ins->sreg1 = last_ins->sreg1;
1398                                 }
1399                         }
1400                         break;
1401                 case OP_LOADU2_MEMBASE:
1402                 case OP_LOADI2_MEMBASE:
1403                         /* 
1404                          * Note: if reg1 = reg2 the load op is removed
1405                          *
1406                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1407                          * OP_LOAD_MEMBASE offset(basereg), reg2
1408                          * -->
1409                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1410                          * OP_MOVE reg1, reg2
1411                          */
1412                         if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1413                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1414                                         ins->inst_offset == last_ins->inst_offset) {
1415                                 if (ins->dreg == last_ins->sreg1) {
1416                                         last_ins->next = ins->next;                             
1417                                         ins = ins->next;                                
1418                                         continue;
1419                                 } else {
1420                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1421                                         ins->opcode = OP_MOVE;
1422                                         ins->sreg1 = last_ins->sreg1;
1423                                 }
1424                         }
1425                         break;
1426                 case CEE_CONV_I4:
1427                 case CEE_CONV_U4:
1428                 case OP_MOVE:
1429                         /*
1430                          * Removes:
1431                          *
1432                          * OP_MOVE reg, reg 
1433                          */
1434                         if (ins->dreg == ins->sreg1) {
1435                                 if (last_ins)
1436                                         last_ins->next = ins->next;                             
1437                                 ins = ins->next;
1438                                 continue;
1439                         }
1440                         /* 
1441                          * Removes:
1442                          *
1443                          * OP_MOVE sreg, dreg 
1444                          * OP_MOVE dreg, sreg
1445                          */
1446                         if (last_ins && last_ins->opcode == OP_MOVE &&
1447                             ins->sreg1 == last_ins->dreg &&
1448                             ins->dreg == last_ins->sreg1) {
1449                                 last_ins->next = ins->next;                             
1450                                 ins = ins->next;                                
1451                                 continue;
1452                         }
1453                         break;
1454                 }
1455                 last_ins = ins;
1456                 ins = ins->next;
1457         }
1458         bb->last_ins = last_ins;
1459 }
1460
1461 static const int 
1462 branch_cc_table [] = {
1463         X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1464         X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1465         X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1466 };
1467
1468 static int
1469 opcode_to_x86_cond (int opcode)
1470 {
1471         switch (opcode) {
1472         case OP_IBEQ:
1473                 return X86_CC_EQ;
1474         case OP_IBNE_UN:
1475                 return X86_CC_NE;
1476         case OP_IBLT:
1477                 return X86_CC_LT;
1478         case OP_IBLT_UN:
1479                 return X86_CC_LT;
1480         case OP_IBGT:
1481                 return X86_CC_GT;
1482         case OP_IBGT_UN:
1483                 return X86_CC_GT;
1484         case OP_IBGE:
1485                 return X86_CC_GE;
1486         case OP_IBGE_UN:
1487                 return X86_CC_GE;
1488         case OP_IBLE:
1489                 return X86_CC_LE;
1490         case OP_IBLE_UN:
1491                 return X86_CC_LE;
1492         case OP_COND_EXC_IOV:
1493                 return X86_CC_O;
1494         case OP_COND_EXC_IC:
1495                 return X86_CC_C;
1496         default:
1497                 g_assert_not_reached ();
1498         }
1499
1500         return -1;
1501 }
1502
1503 /*
1504  * returns the offset used by spillvar. It allocates a new
1505  * spill variable if necessary. 
1506  */
1507 static int
1508 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1509 {
1510         MonoSpillInfo **si, *info;
1511         int i = 0;
1512
1513         si = &cfg->spill_info; 
1514         
1515         while (i <= spillvar) {
1516
1517                 if (!*si) {
1518                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1519                         info->next = NULL;
1520                         cfg->stack_offset += sizeof (gpointer);
1521                         info->offset = - cfg->stack_offset;
1522                 }
1523
1524                 if (i == spillvar)
1525                         return (*si)->offset;
1526
1527                 i++;
1528                 si = &(*si)->next;
1529         }
1530
1531         g_assert_not_reached ();
1532         return 0;
1533 }
1534
1535 /*
1536  * returns the offset used by spillvar. It allocates a new
1537  * spill float variable if necessary. 
1538  * (same as mono_spillvar_offset but for float)
1539  */
1540 static int
1541 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1542 {
1543         MonoSpillInfo **si, *info;
1544         int i = 0;
1545
1546         si = &cfg->spill_info_float; 
1547         
1548         while (i <= spillvar) {
1549
1550                 if (!*si) {
1551                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1552                         info->next = NULL;
1553                         cfg->stack_offset += sizeof (double);
1554                         info->offset = - cfg->stack_offset;
1555                 }
1556
1557                 if (i == spillvar)
1558                         return (*si)->offset;
1559
1560                 i++;
1561                 si = &(*si)->next;
1562         }
1563
1564         g_assert_not_reached ();
1565         return 0;
1566 }
1567
1568 /*
1569  * Creates a store for spilled floating point items
1570  */
1571 static MonoInst*
1572 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1573 {
1574         MonoInst *store;
1575         MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
1576         store->sreg1 = reg;
1577         store->inst_destbasereg = AMD64_RBP;
1578         store->inst_offset = mono_spillvar_offset_float (cfg, spill);
1579
1580         DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)store->inst_offset, reg));
1581         return store;
1582 }
1583
1584 /*
1585  * Creates a load for spilled floating point items 
1586  */
1587 static MonoInst*
1588 create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1589 {
1590         MonoInst *load;
1591         MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
1592         load->dreg = reg;
1593         load->inst_basereg = AMD64_RBP;
1594         load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1595
1596         DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)load->inst_offset, reg));
1597         return load;
1598 }
1599
1600 #define ireg_is_freeable(r) ((r) >= 0 && (r) <= 7 && AMD64_IS_CALLEE_REG ((r)))
1601 #define freg_is_freeable(r) ((r) >= 0 && (r) <= AMD64_XMM_NREG)
1602
1603 #define reg_is_freeable(r,fp) ((fp) ? freg_is_freeable ((r)) : ireg_is_freeable ((r)))
1604 #define reg_is_hard(r,fp) ((fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
1605 #define reg_is_soft(r,fp) (!reg_is_hard((r),(fp)))
1606 #define rassign(cfg,reg,fp) ((fp) ? (cfg)->rs->fassign [(reg)] : (cfg)->rs->iassign [(reg)])
1607 #define sreg1_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC1] == 'f')
1608 #define sreg2_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC2] == 'f')
1609 #define dreg_is_fp(ins)  (ins_spec [(ins)->opcode] [MONO_INST_DEST] == 'f')
1610
1611 typedef struct {
1612         int born_in;
1613         int killed_in;
1614         int last_use;
1615         int prev_use;
1616         int flags;              /* used to track fp spill/load */
1617 } RegTrack;
1618
1619 static const char*const * ins_spec = amd64_desc;
1620
1621 static void
1622 print_ins (int i, MonoInst *ins)
1623 {
1624         const char *spec = ins_spec [ins->opcode];
1625         g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1626         if (!spec)
1627                 g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
1628         if (spec [MONO_INST_DEST]) {
1629                 gboolean fp = (spec [MONO_INST_DEST] == 'f');
1630                 if (reg_is_soft (ins->dreg, fp))
1631                         g_print (" R%d <-", ins->dreg);
1632                 else
1633                         g_print (" %s <-", mono_amd64_regname (ins->dreg, fp));
1634         }
1635         if (spec [MONO_INST_SRC1]) {
1636                 gboolean fp = (spec [MONO_INST_SRC1] == 'f');
1637                 if (reg_is_soft (ins->sreg1, fp))
1638                         g_print (" R%d", ins->sreg1);
1639                 else
1640                         g_print (" %s", mono_amd64_regname (ins->sreg1, fp));
1641         }
1642         if (spec [MONO_INST_SRC2]) {
1643                 gboolean fp = (spec [MONO_INST_SRC2] == 'f');
1644                 if (reg_is_soft (ins->sreg2, fp))
1645                         g_print (" R%d", ins->sreg2);
1646                 else
1647                         g_print (" %s", mono_amd64_regname (ins->sreg2, fp));
1648         }
1649         if (spec [MONO_INST_CLOB])
1650                 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1651         g_print ("\n");
1652 }
1653
1654 static void
1655 print_regtrack (RegTrack *t, int num)
1656 {
1657         int i;
1658         char buf [32];
1659         const char *r;
1660         
1661         for (i = 0; i < num; ++i) {
1662                 if (!t [i].born_in)
1663                         continue;
1664                 if (i >= MONO_MAX_IREGS) {
1665                         g_snprintf (buf, sizeof(buf), "R%d", i);
1666                         r = buf;
1667                 } else
1668                         r = mono_arch_regname (i);
1669                 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1670         }
1671 }
1672
1673 typedef struct InstList InstList;
1674
1675 struct InstList {
1676         InstList *prev;
1677         InstList *next;
1678         MonoInst *data;
1679 };
1680
1681 static inline InstList*
1682 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1683 {
1684         InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1685         item->data = data;
1686         item->prev = NULL;
1687         item->next = list;
1688         if (list)
1689                 list->prev = item;
1690         return item;
1691 }
1692
1693 /*
1694  * Force the spilling of the variable in the symbolic register 'reg'.
1695  */
1696 static int
1697 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg, gboolean fp)
1698 {
1699         MonoInst *load;
1700         int i, sel, spill;
1701         int *assign, *symbolic;
1702
1703         if (fp) {
1704                 assign = cfg->rs->fassign;
1705                 symbolic = cfg->rs->fsymbolic;
1706         }
1707         else {
1708                 assign = cfg->rs->iassign;
1709                 symbolic = cfg->rs->isymbolic;
1710         }       
1711         
1712         sel = assign [reg];
1713         /*i = cfg->rs->isymbolic [sel];
1714         g_assert (i == reg);*/
1715         i = reg;
1716         spill = ++cfg->spill_count;
1717         assign [i] = -spill - 1;
1718         if (fp)
1719                 mono_regstate_free_float (cfg->rs, sel);
1720         else
1721                 mono_regstate_free_int (cfg->rs, sel);
1722         /* we need to create a spill var and insert a load to sel after the current instruction */
1723         if (fp)
1724                 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1725         else
1726                 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1727         load->dreg = sel;
1728         load->inst_basereg = AMD64_RBP;
1729         load->inst_offset = mono_spillvar_offset (cfg, spill);
1730         if (item->prev) {
1731                 while (ins->next != item->prev->data)
1732                         ins = ins->next;
1733         }
1734         load->next = ins->next;
1735         ins->next = load;
1736         DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1737         if (fp)
1738                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1739         else
1740                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1741         g_assert (i == sel);
1742
1743         return sel;
1744 }
1745
1746 static int
1747 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg, gboolean fp)
1748 {
1749         MonoInst *load;
1750         int i, sel, spill;
1751         int *assign, *symbolic;
1752
1753         if (fp) {
1754                 assign = cfg->rs->fassign;
1755                 symbolic = cfg->rs->fsymbolic;
1756         }
1757         else {
1758                 assign = cfg->rs->iassign;
1759                 symbolic = cfg->rs->isymbolic;
1760         }
1761
1762         DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1763         /* exclude the registers in the current instruction */
1764         if ((sreg1_is_fp (ins) == fp) && (reg != ins->sreg1) && (reg_is_freeable (ins->sreg1, fp) || (reg_is_soft (ins->sreg1, fp) && rassign (cfg, ins->sreg1, fp) >= 0))) {
1765                 if (reg_is_soft (ins->sreg1, fp))
1766                         regmask &= ~ (1 << rassign (cfg, ins->sreg1, fp));
1767                 else
1768                         regmask &= ~ (1 << ins->sreg1);
1769                 DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_amd64_regname (ins->sreg1, fp)));
1770         }
1771         if ((sreg2_is_fp (ins) == fp) && (reg != ins->sreg2) && (reg_is_freeable (ins->sreg2, fp) || (reg_is_soft (ins->sreg2, fp) && rassign (cfg, ins->sreg2, fp) >= 0))) {
1772                 if (reg_is_soft (ins->sreg2, fp))
1773                         regmask &= ~ (1 << rassign (cfg, ins->sreg2, fp));
1774                 else
1775                         regmask &= ~ (1 << ins->sreg2);
1776                 DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_amd64_regname (ins->sreg2, fp), ins->sreg2));
1777         }
1778         if ((dreg_is_fp (ins) == fp) && (reg != ins->dreg) && reg_is_freeable (ins->dreg, fp)) {
1779                 regmask &= ~ (1 << ins->dreg);
1780                 DEBUG (g_print ("\t\texcluding dreg %s\n", mono_amd64_regname (ins->dreg, fp)));
1781         }
1782
1783         DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
1784         g_assert (regmask); /* need at least a register we can free */
1785         sel = -1;
1786         /* we should track prev_use and spill the register that's farther */
1787         if (fp) {
1788                 for (i = 0; i < MONO_MAX_FREGS; ++i) {
1789                         if (regmask & (1 << i)) {
1790                                 sel = i;
1791                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_fregname (sel), cfg->rs->fassign [sel]));
1792                                 break;
1793                         }
1794                 }
1795
1796                 i = cfg->rs->fsymbolic [sel];
1797                 spill = ++cfg->spill_count;
1798                 cfg->rs->fassign [i] = -spill - 1;
1799                 mono_regstate_free_float (cfg->rs, sel);
1800         }
1801         else {
1802                 for (i = 0; i < MONO_MAX_IREGS; ++i) {
1803                         if (regmask & (1 << i)) {
1804                                 sel = i;
1805                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1806                                 break;
1807                         }
1808                 }
1809
1810                 i = cfg->rs->isymbolic [sel];
1811                 spill = ++cfg->spill_count;
1812                 cfg->rs->iassign [i] = -spill - 1;
1813                 mono_regstate_free_int (cfg->rs, sel);
1814         }
1815
1816         /* we need to create a spill var and insert a load to sel after the current instruction */
1817         MONO_INST_NEW (cfg, load, fp ? OP_LOADR8_MEMBASE : OP_LOAD_MEMBASE);
1818         load->dreg = sel;
1819         load->inst_basereg = AMD64_RBP;
1820         load->inst_offset = mono_spillvar_offset (cfg, spill);
1821         if (item->prev) {
1822                 while (ins->next != item->prev->data)
1823                         ins = ins->next;
1824         }
1825         load->next = ins->next;
1826         ins->next = load;
1827         DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1828         if (fp)
1829                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1830         else
1831                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1832         g_assert (i == sel);
1833         
1834         return sel;
1835 }
1836
1837 static MonoInst*
1838 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins, gboolean fp)
1839 {
1840         MonoInst *copy;
1841
1842         if (fp)
1843                 MONO_INST_NEW (cfg, copy, OP_FMOVE);
1844         else
1845                 MONO_INST_NEW (cfg, copy, OP_MOVE);
1846
1847         copy->dreg = dest;
1848         copy->sreg1 = src;
1849         if (ins) {
1850                 copy->next = ins->next;
1851                 ins->next = copy;
1852         }
1853         DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
1854         return copy;
1855 }
1856
1857 static MonoInst*
1858 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins, gboolean fp)
1859 {
1860         MonoInst *store;
1861         MONO_INST_NEW (cfg, store, fp ? OP_STORER8_MEMBASE_REG : OP_STORE_MEMBASE_REG);
1862         store->sreg1 = reg;
1863         store->inst_destbasereg = AMD64_RBP;
1864         store->inst_offset = mono_spillvar_offset (cfg, spill);
1865         if (ins) {
1866                 store->next = ins->next;
1867                 ins->next = store;
1868         }
1869         DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_amd64_regname (reg, fp)));
1870         return store;
1871 }
1872
1873 static void
1874 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
1875 {
1876         MonoInst *prev;
1877         if (item->next) {
1878                 prev = item->next->data;
1879
1880                 while (prev->next != ins)
1881                         prev = prev->next;
1882                 to_insert->next = ins;
1883                 prev->next = to_insert;
1884         } else {
1885                 to_insert->next = ins;
1886         }
1887         /* 
1888          * needed otherwise in the next instruction we can add an ins to the 
1889          * end and that would get past this instruction.
1890          */
1891         item->data = to_insert; 
1892 }
1893
1894 /* flags used in reginfo->flags */
1895 enum {
1896         MONO_X86_FP_NEEDS_LOAD_SPILL    = 1 << 0,
1897         MONO_X86_FP_NEEDS_SPILL                 = 1 << 1,
1898         MONO_X86_FP_NEEDS_LOAD                  = 1 << 2,
1899         MONO_X86_REG_NOT_ECX                    = 1 << 3,
1900         MONO_X86_REG_EAX                                = 1 << 4,
1901         MONO_X86_REG_EDX                                = 1 << 5,
1902         MONO_X86_REG_ECX                                = 1 << 6
1903 };
1904
1905 static int
1906 mono_amd64_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
1907 {
1908         int val;
1909         int test_mask = dest_mask;
1910
1911         if (flags & MONO_X86_REG_EAX)
1912                 test_mask &= (1 << AMD64_RAX);
1913         else if (flags & MONO_X86_REG_EDX)
1914                 test_mask &= (1 << AMD64_RDX);
1915         else if (flags & MONO_X86_REG_ECX)
1916                 test_mask &= (1 << AMD64_RCX);
1917         else if (flags & MONO_X86_REG_NOT_ECX)
1918                 test_mask &= ~ (1 << AMD64_RCX);
1919
1920         val = mono_regstate_alloc_int (cfg->rs, test_mask);
1921         if (val >= 0 && test_mask != dest_mask)
1922                 DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
1923
1924         if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
1925                 DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
1926                 val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << AMD64_RCX)));
1927         }
1928
1929         if (val < 0) {
1930                 val = mono_regstate_alloc_int (cfg->rs, dest_mask);
1931                 if (val < 0)
1932                         val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, FALSE);
1933         }
1934
1935         return val;
1936 }
1937
1938 static int
1939 mono_amd64_alloc_float_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg)
1940 {
1941         int val;
1942
1943         val = mono_regstate_alloc_float (cfg->rs, dest_mask);
1944
1945         if (val < 0) {
1946                 val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, TRUE);
1947         }
1948
1949         return val;
1950 }
1951
1952
1953 /*#include "cprop.c"*/
1954
1955 /*
1956  * Local register allocation.
1957  * We first scan the list of instructions and we save the liveness info of
1958  * each register (when the register is first used, when it's value is set etc.).
1959  * We also reverse the list of instructions (in the InstList list) because assigning
1960  * registers backwards allows for more tricks to be used.
1961  */
1962 void
1963 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
1964 {
1965         MonoInst *ins;
1966         MonoRegState *rs = cfg->rs;
1967         int i, val, fpcount;
1968         RegTrack *reginfo, *reginfof;
1969         RegTrack *reginfo1, *reginfo2, *reginfod;
1970         InstList *tmp, *reversed = NULL;
1971         const char *spec;
1972         guint32 src1_mask, src2_mask, dest_mask;
1973         GList *fspill_list = NULL;
1974         int fspill = 0;
1975
1976         if (!bb->code)
1977                 return;
1978         rs->next_vireg = bb->max_ireg;
1979         rs->next_vfreg = bb->max_freg;
1980         mono_regstate_assign (rs);
1981         reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
1982         reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
1983         rs->ifree_mask = AMD64_CALLEE_REGS;
1984         rs->ffree_mask = AMD64_CALLEE_FREGS;
1985
1986         if (!use_sse2)
1987                 /* The fp stack is 6 entries deep */
1988                 rs->ffree_mask = 0x3f;
1989
1990         ins = bb->code;
1991
1992         /*if (cfg->opt & MONO_OPT_COPYPROP)
1993                 local_copy_prop (cfg, ins);*/
1994
1995         i = 1;
1996         fpcount = 0;
1997         DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
1998         /* forward pass on the instructions to collect register liveness info */
1999         while (ins) {
2000                 spec = ins_spec [ins->opcode];
2001                 
2002                 DEBUG (print_ins (i, ins));
2003
2004                 if (spec [MONO_INST_SRC1]) {
2005                         if (spec [MONO_INST_SRC1] == 'f') {
2006                                 reginfo1 = reginfof;
2007
2008                                 if (!use_sse2) {
2009                                         GList *spill;
2010
2011                                         spill = g_list_first (fspill_list);
2012                                         if (spill && fpcount < FPSTACK_SIZE) {
2013                                                 reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
2014                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2015                                         } else
2016                                                 fpcount--;
2017                                 }
2018                         }
2019                         else
2020                                 reginfo1 = reginfo;
2021                         reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
2022                         reginfo1 [ins->sreg1].last_use = i;
2023                         if (spec [MONO_INST_SRC1] == 'L') {
2024                                 /* The virtual register is allocated sequentially */
2025                                 reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
2026                                 reginfo1 [ins->sreg1 + 1].last_use = i;
2027                                 if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
2028                                         reginfo1 [ins->sreg1 + 1].born_in = i;
2029
2030                                 reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
2031                                 reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
2032                         }
2033                 } else {
2034                         ins->sreg1 = -1;
2035                 }
2036                 if (spec [MONO_INST_SRC2]) {
2037                         if (spec [MONO_INST_SRC2] == 'f') {
2038                                 reginfo2 = reginfof;
2039
2040                                 if (!use_sse2) {
2041                                         GList *spill;
2042
2043                                         spill = g_list_first (fspill_list);
2044                                         if (spill) {
2045                                                 reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
2046                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2047                                                 if (fpcount >= FPSTACK_SIZE) {
2048                                                         fspill++;
2049                                                         fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2050                                                         reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
2051                                                 }
2052                                         } else
2053                                                 fpcount--;
2054                                 }
2055                         }
2056                         else
2057                                 reginfo2 = reginfo;
2058                         reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
2059                         reginfo2 [ins->sreg2].last_use = i;
2060                         if (spec [MONO_INST_SRC2] == 'L') {
2061                                 /* The virtual register is allocated sequentially */
2062                                 reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
2063                                 reginfo2 [ins->sreg2 + 1].last_use = i;
2064                                 if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
2065                                         reginfo2 [ins->sreg2 + 1].born_in = i;
2066                         }
2067                         if (spec [MONO_INST_CLOB] == 's') {
2068                                 reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
2069                                 reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
2070                         }
2071                 } else {
2072                         ins->sreg2 = -1;
2073                 }
2074                 if (spec [MONO_INST_DEST]) {
2075                         if (spec [MONO_INST_DEST] == 'f') {
2076                                 reginfod = reginfof;
2077                                 if (!use_sse2 && (spec [MONO_INST_CLOB] != 'm')) {
2078                                         if (fpcount >= FPSTACK_SIZE) {
2079                                                 reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
2080                                                 fspill++;
2081                                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2082                                                 fpcount--;
2083                                         }
2084                                         fpcount++;
2085                                 }
2086                         }
2087                         else
2088                                 reginfod = reginfo;
2089                         if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
2090                                 reginfod [ins->dreg].killed_in = i;
2091                         reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
2092                         reginfod [ins->dreg].last_use = i;
2093                         if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
2094                                 reginfod [ins->dreg].born_in = i;
2095                         if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
2096                                 /* The virtual register is allocated sequentially */
2097                                 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
2098                                 reginfod [ins->dreg + 1].last_use = i;
2099                                 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
2100                                         reginfod [ins->dreg + 1].born_in = i;
2101
2102                                 reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
2103                                 reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
2104                         }
2105                 } else {
2106                         ins->dreg = -1;
2107                 }
2108
2109                 if (spec [MONO_INST_CLOB] == 'c') {
2110                         /* A call instruction implicitly uses all registers in call->out_ireg_args */
2111
2112                         MonoCallInst *call = (MonoCallInst*)ins;
2113                         GSList *list;
2114
2115                         list = call->out_ireg_args;
2116                         if (list) {
2117                                 while (list) {
2118                                         guint64 regpair;
2119                                         int reg, hreg;
2120
2121                                         regpair = (guint64) (list->data);
2122                                         hreg = regpair >> 32;
2123                                         reg = regpair & 0xffffffff;
2124
2125                                         reginfo [reg].prev_use = reginfo [reg].last_use;
2126                                         reginfo [reg].last_use = i;
2127
2128                                         list = g_slist_next (list);
2129                                 }
2130                         }
2131
2132                         list = call->out_freg_args;
2133                         if (use_sse2 && list) {
2134                                 while (list) {
2135                                         guint64 regpair;
2136                                         int reg, hreg;
2137
2138                                         regpair = (guint64) (list->data);
2139                                         hreg = regpair >> 32;
2140                                         reg = regpair & 0xffffffff;
2141
2142                                         reginfof [reg].prev_use = reginfof [reg].last_use;
2143                                         reginfof [reg].last_use = i;
2144
2145                                         list = g_slist_next (list);
2146                                 }
2147                         }
2148                 }
2149
2150                 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
2151                 ++i;
2152                 ins = ins->next;
2153         }
2154
2155         // todo: check if we have anything left on fp stack, in verify mode?
2156         fspill = 0;
2157
2158         DEBUG (print_regtrack (reginfo, rs->next_vireg));
2159         DEBUG (print_regtrack (reginfof, rs->next_vfreg));
2160         tmp = reversed;
2161         while (tmp) {
2162                 int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
2163                 dest_mask = src1_mask = src2_mask = AMD64_CALLEE_REGS;
2164                 --i;
2165                 ins = tmp->data;
2166                 spec = ins_spec [ins->opcode];
2167                 prev_dreg = -1;
2168                 clob_dreg = -1;
2169                 DEBUG (g_print ("processing:"));
2170                 DEBUG (print_ins (i, ins));
2171                 if (spec [MONO_INST_CLOB] == 's') {
2172                         if (rs->ifree_mask & (1 << AMD64_RCX)) {
2173                                 DEBUG (g_print ("\tshortcut assignment of R%d to ECX\n", ins->sreg2));
2174                                 if (ins->sreg2 < MONO_MAX_IREGS) {
2175                                         /* Argument already in hard reg, need to copy */
2176                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2177                                         insert_before_ins (ins, tmp, copy);
2178                                 }
2179                                 rs->iassign [ins->sreg2] = AMD64_RCX;
2180                                 rs->isymbolic [AMD64_RCX] = ins->sreg2;
2181                                 ins->sreg2 = AMD64_RCX;
2182                                 rs->ifree_mask &= ~ (1 << AMD64_RCX);
2183                         } else {
2184                                 int need_ecx_spill = TRUE;
2185                                 /* 
2186                                  * we first check if src1/dreg is already assigned a register
2187                                  * and then we force a spill of the var assigned to ECX.
2188                                  */
2189                                 /* the destination register can't be ECX */
2190                                 dest_mask &= ~ (1 << AMD64_RCX);
2191                                 src1_mask &= ~ (1 << AMD64_RCX);
2192                                 val = rs->iassign [ins->dreg];
2193                                 /* 
2194                                  * the destination register is already assigned to ECX:
2195                                  * we need to allocate another register for it and then
2196                                  * copy from this to ECX.
2197                                  */
2198                                 if (val == AMD64_RCX && ins->dreg != ins->sreg2) {
2199                                         int new_dest;
2200                                         new_dest = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2201                                         g_assert (new_dest >= 0);
2202                                         DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
2203
2204                                         rs->isymbolic [new_dest] = ins->dreg;
2205                                         rs->iassign [ins->dreg] = new_dest;
2206                                         clob_dreg = ins->dreg;
2207                                         ins->dreg = new_dest;
2208                                         create_copy_ins (cfg, AMD64_RCX, new_dest, ins, FALSE);
2209                                         need_ecx_spill = FALSE;
2210                                         /*DEBUG (g_print ("\tforced spill of R%d\n", ins->dreg));
2211                                         val = get_register_force_spilling (cfg, tmp, ins, ins->dreg);
2212                                         rs->iassign [ins->dreg] = val;
2213                                         rs->isymbolic [val] = prev_dreg;
2214                                         ins->dreg = val;*/
2215                                 }
2216                                 val = rs->iassign [ins->sreg2];
2217                                 if (val >= 0 && val != AMD64_RCX) {
2218                                         MonoInst *move = create_copy_ins (cfg, AMD64_RCX, val, NULL, FALSE);
2219                                         DEBUG (g_print ("\tmoved arg from R%d (%d) to ECX\n", val, ins->sreg2));
2220                                         move->next = ins;
2221                                         g_assert_not_reached ();
2222                                         /* FIXME: where is move connected to the instruction list? */
2223                                         //tmp->prev->data->next = move;
2224                                 }
2225                                 else 
2226                                         if (val == AMD64_RCX) {
2227                                                 if (ins->sreg2 < MONO_MAX_IREGS) {
2228                                                         /* sreg2 is already assigned to a hard reg, need to copy */
2229                                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2230                                                         insert_before_ins (ins, tmp, copy);
2231                                                 }
2232                                                 need_ecx_spill = FALSE;
2233                                         }
2234                                 if (need_ecx_spill && !(rs->ifree_mask & (1 << AMD64_RCX))) {
2235                                         DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RCX]));
2236                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RCX], FALSE);
2237                                         mono_regstate_free_int (rs, AMD64_RCX);
2238                                 }
2239                                 /* force-set sreg2 */
2240                                 rs->iassign [ins->sreg2] = AMD64_RCX;
2241                                 rs->isymbolic [AMD64_RCX] = ins->sreg2;
2242                                 ins->sreg2 = AMD64_RCX;
2243                                 rs->ifree_mask &= ~ (1 << AMD64_RCX);
2244                         }
2245                 } else if (spec [MONO_INST_CLOB] == 'd') { /* division */
2246                         int dest_reg = AMD64_RAX;
2247                         int clob_reg = AMD64_RDX;
2248                         if (spec [MONO_INST_DEST] == 'd') {
2249                                 dest_reg = AMD64_RDX; /* reminder */
2250                                 clob_reg = AMD64_RAX;
2251                         }
2252                         val = rs->iassign [ins->dreg];
2253                         if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
2254                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2255                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2256                                 mono_regstate_free_int (rs, dest_reg);
2257                         }
2258                         if (val < 0) {
2259                                 if (val < -1) {
2260                                         /* the register gets spilled after this inst */
2261                                         int spill = -val -1;
2262                                         dest_mask = 1 << clob_reg;
2263                                         prev_dreg = ins->dreg;
2264                                         val = mono_regstate_alloc_int (rs, dest_mask);
2265                                         if (val < 0)
2266                                                 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg, FALSE);
2267                                         rs->iassign [ins->dreg] = val;
2268                                         if (spill)
2269                                                 create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2270                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2271                                         rs->isymbolic [val] = prev_dreg;
2272                                         ins->dreg = val;
2273                                         if (val != dest_reg) { /* force a copy */
2274                                                 create_copy_ins (cfg, val, dest_reg, ins, FALSE);
2275                                         }
2276                                 } else {
2277                                         DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
2278                                         prev_dreg = ins->dreg;
2279                                         rs->iassign [ins->dreg] = dest_reg;
2280                                         rs->isymbolic [dest_reg] = ins->dreg;
2281                                         ins->dreg = dest_reg;
2282                                         rs->ifree_mask &= ~ (1 << dest_reg);
2283                                 }
2284                         } else {
2285                                 //DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
2286                                 if (val != dest_reg) { /* force a copy */
2287                                         create_copy_ins (cfg, val, dest_reg, ins, FALSE);
2288                                         if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
2289                                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2290                                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2291                                                 mono_regstate_free_int (rs, dest_reg);
2292                                         }
2293                                 }
2294                         }
2295                         if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= 8)) {
2296                                 DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
2297                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg], FALSE);
2298                                 mono_regstate_free_int (rs, clob_reg);
2299                         }
2300                         src1_mask = 1 << AMD64_RAX;
2301                         src2_mask = 1 << AMD64_RCX;
2302                 }
2303                 if (spec [MONO_INST_DEST] == 'l') {
2304                         int hreg;
2305                         val = rs->iassign [ins->dreg];
2306                         /* check special case when dreg have been moved from ecx (clob shift) */
2307                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2308                                 hreg = clob_dreg + 1;
2309                         else
2310                                 hreg = ins->dreg + 1;
2311
2312                         /* base prev_dreg on fixed hreg, handle clob case */
2313                         val = hreg - 1;
2314
2315                         if (val != rs->isymbolic [AMD64_RAX] && !(rs->ifree_mask & (1 << AMD64_RAX))) {
2316                                 DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2317                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2318                                 mono_regstate_free_int (rs, AMD64_RAX);
2319                         }
2320                         if (hreg != rs->isymbolic [AMD64_RDX] && !(rs->ifree_mask & (1 << AMD64_RDX))) {
2321                                 DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [AMD64_RDX]));
2322                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RDX], FALSE);
2323                                 mono_regstate_free_int (rs, AMD64_RDX);
2324                         }
2325                 }
2326
2327                 /*
2328                  * TRACK DREG
2329                  */
2330                 if (spec [MONO_INST_DEST] == 'f') {
2331                         if (use_sse2) {
2332                                 /* Allocate an XMM reg the same way as an int reg */
2333                                 if (reg_is_soft (ins->dreg, TRUE)) {
2334                                         val = rs->fassign [ins->dreg];
2335                                         prev_dreg = ins->dreg;
2336                                         
2337                                         if (val < 0) {
2338                                                 int spill = 0;
2339                                                 if (val < -1) {
2340                                                         /* the register gets spilled after this inst */
2341                                                         spill = -val -1;
2342                                                 }
2343                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->dreg);
2344                                                 rs->fassign [ins->dreg] = val;
2345                                                 if (spill)
2346                                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, TRUE);
2347                                         }
2348                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_amd64_regname (val, TRUE), ins->dreg));
2349                                         rs->fsymbolic [val] = prev_dreg;
2350                                         ins->dreg = val;
2351                                 }
2352                         }
2353                         else if (spec [MONO_INST_CLOB] != 'm') {
2354                                 if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
2355                                         GList *spill_node;
2356                                         MonoInst *store;
2357                                         spill_node = g_list_first (fspill_list);
2358                                         g_assert (spill_node);
2359
2360                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
2361                                         insert_before_ins (ins, tmp, store);
2362                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2363                                         fspill--;
2364                                 }
2365                         }
2366                 } else if (spec [MONO_INST_DEST] == 'L') {
2367                         int hreg;
2368                         val = rs->iassign [ins->dreg];
2369                         /* check special case when dreg have been moved from ecx (clob shift) */
2370                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2371                                 hreg = clob_dreg + 1;
2372                         else
2373                                 hreg = ins->dreg + 1;
2374
2375                         /* base prev_dreg on fixed hreg, handle clob case */
2376                         prev_dreg = hreg - 1;
2377
2378                         if (val < 0) {
2379                                 int spill = 0;
2380                                 if (val < -1) {
2381                                         /* the register gets spilled after this inst */
2382                                         spill = -val -1;
2383                                 }
2384                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2385                                 rs->iassign [ins->dreg] = val;
2386                                 if (spill)
2387                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2388                         }
2389
2390                         DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
2391  
2392                         rs->isymbolic [val] = hreg - 1;
2393                         ins->dreg = val;
2394                         
2395                         val = rs->iassign [hreg];
2396                         if (val < 0) {
2397                                 int spill = 0;
2398                                 if (val < -1) {
2399                                         /* the register gets spilled after this inst */
2400                                         spill = -val -1;
2401                                 }
2402                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2403                                 rs->iassign [hreg] = val;
2404                                 if (spill)
2405                                         create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2406                         }
2407
2408                         DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
2409                         rs->isymbolic [val] = hreg;
2410                         /* save reg allocating into unused */
2411                         ins->unused = val;
2412
2413                         /* check if we can free our long reg */
2414                         if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2415                                 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
2416                                 mono_regstate_free_int (rs, val);
2417                         }
2418                 }
2419                 else if (ins->dreg >= MONO_MAX_IREGS) {
2420                         int hreg;
2421                         val = rs->iassign [ins->dreg];
2422                         if (spec [MONO_INST_DEST] == 'l') {
2423                                 /* check special case when dreg have been moved from ecx (clob shift) */
2424                                 if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2425                                         hreg = clob_dreg + 1;
2426                                 else
2427                                         hreg = ins->dreg + 1;
2428
2429                                 /* base prev_dreg on fixed hreg, handle clob case */
2430                                 prev_dreg = hreg - 1;
2431                         } else
2432                                 prev_dreg = ins->dreg;
2433
2434                         if (val < 0) {
2435                                 int spill = 0;
2436                                 if (val < -1) {
2437                                         /* the register gets spilled after this inst */
2438                                         spill = -val -1;
2439                                 }
2440                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2441                                 rs->iassign [ins->dreg] = val;
2442                                 if (spill)
2443                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2444                         }
2445                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2446                         rs->isymbolic [val] = prev_dreg;
2447                         ins->dreg = val;
2448                         /* handle cases where lreg needs to be eax:edx */
2449                         if (spec [MONO_INST_DEST] == 'l') {
2450                                 /* check special case when dreg have been moved from ecx (clob shift) */
2451                                 int hreg = prev_dreg + 1;
2452                                 val = rs->iassign [hreg];
2453                                 if (val < 0) {
2454                                         int spill = 0;
2455                                         if (val < -1) {
2456                                                 /* the register gets spilled after this inst */
2457                                                 spill = -val -1;
2458                                         }
2459                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2460                                         rs->iassign [hreg] = val;
2461                                         if (spill)
2462                                                 create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2463                                 }
2464                                 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
2465                                 rs->isymbolic [val] = hreg;
2466                                 if (ins->dreg == AMD64_RAX) {
2467                                         if (val != AMD64_RDX)
2468                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2469                                 } else if (ins->dreg == AMD64_RDX) {
2470                                         if (val == AMD64_RAX) {
2471                                                 /* swap */
2472                                                 g_assert_not_reached ();
2473                                         } else {
2474                                                 /* two forced copies */
2475                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2476                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2477                                         }
2478                                 } else {
2479                                         if (val == AMD64_RDX) {
2480                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2481                                         } else {
2482                                                 /* two forced copies */
2483                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2484                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2485                                         }
2486                                 }
2487                                 if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2488                                         DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
2489                                         mono_regstate_free_int (rs, val);
2490                                 }
2491                         } else if (spec [MONO_INST_DEST] == 'a' && ins->dreg != AMD64_RAX && spec [MONO_INST_CLOB] != 'd') {
2492                                 /* this instruction only outputs to EAX, need to copy */
2493                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2494                         } else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != AMD64_RDX && spec [MONO_INST_CLOB] != 'd') {
2495                                 create_copy_ins (cfg, ins->dreg, AMD64_RDX, ins, FALSE);
2496                         }
2497                 }
2498
2499                 if (use_sse2 && spec [MONO_INST_DEST] == 'f' && reg_is_freeable (ins->dreg, TRUE) && prev_dreg >= 0 && reginfof [prev_dreg].born_in >= i) {
2500                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_fregname (ins->dreg), prev_dreg, reginfof [prev_dreg].born_in));
2501                         mono_regstate_free_float (rs, ins->dreg);
2502                 }
2503                 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg, FALSE) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
2504                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2505                         mono_regstate_free_int (rs, ins->dreg);
2506                 }
2507
2508                 /* put src1 in EAX if it needs to be */
2509                 if (spec [MONO_INST_SRC1] == 'a') {
2510                         if (!(rs->ifree_mask & (1 << AMD64_RAX))) {
2511                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2512                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2513                                 mono_regstate_free_int (rs, AMD64_RAX);
2514                         }
2515                         if (ins->sreg1 < MONO_MAX_IREGS) {
2516                                 /* The argument is already in a hard reg, need to copy */
2517                                 MonoInst *copy = create_copy_ins (cfg, AMD64_RAX, ins->sreg1, NULL, FALSE);
2518                                 insert_before_ins (ins, tmp, copy);
2519                         }
2520                         /* force-set sreg1 */
2521                         rs->iassign [ins->sreg1] = AMD64_RAX;
2522                         rs->isymbolic [AMD64_RAX] = ins->sreg1;
2523                         ins->sreg1 = AMD64_RAX;
2524                         rs->ifree_mask &= ~ (1 << AMD64_RAX);
2525                 }
2526
2527                 /*
2528                  * TRACK SREG1
2529                  */
2530                 if (spec [MONO_INST_SRC1] == 'f') {
2531                         if (use_sse2) {
2532                                 if (reg_is_soft (ins->sreg1, TRUE)) {
2533                                         val = rs->fassign [ins->sreg1];
2534                                         prev_sreg1 = ins->sreg1;
2535                                         if (val < 0) {
2536                                                 int spill = 0;
2537                                                 if (val < -1) {
2538                                                         /* the register gets spilled after this inst */
2539                                                         spill = -val -1;
2540                                                 }
2541                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg1);
2542                                                 rs->fassign [ins->sreg1] = val;
2543                                                 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_fregname (val), ins->sreg1));
2544                                                 if (spill) {
2545                                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, TRUE);
2546                                                         insert_before_ins (ins, tmp, store);
2547                                                 }
2548                                         }
2549                                         rs->fsymbolic [val] = prev_sreg1;
2550                                         ins->sreg1 = val;
2551                                 } else {
2552                                         prev_sreg1 = -1;
2553                                 }
2554                         }
2555                         else
2556                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
2557                                 MonoInst *load;
2558                                 MonoInst *store = NULL;
2559
2560                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2561                                         GList *spill_node;
2562                                         spill_node = g_list_first (fspill_list);
2563                                         g_assert (spill_node);
2564
2565                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);          
2566                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2567                                 }
2568
2569                                 fspill++;
2570                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2571                                 load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
2572                                 insert_before_ins (ins, tmp, load);
2573                                 if (store) 
2574                                         insert_before_ins (load, tmp, store);
2575                         }
2576                 } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
2577                         /* force source to be same as dest */
2578                         rs->iassign [ins->sreg1] = ins->dreg;
2579                         rs->iassign [ins->sreg1 + 1] = ins->unused;
2580
2581                         DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
2582                         DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
2583
2584                         ins->sreg1 = ins->dreg;
2585                         /* 
2586                          * No need for saving the reg, we know that src1=dest in this cases
2587                          * ins->inst_c0 = ins->unused;
2588                          */
2589
2590                         /* make sure that we remove them from free mask */
2591                         rs->ifree_mask &= ~ (1 << ins->dreg);
2592                         rs->ifree_mask &= ~ (1 << ins->unused);
2593                 }
2594                 else if (ins->sreg1 >= MONO_MAX_IREGS) {
2595                         val = rs->iassign [ins->sreg1];
2596                         prev_sreg1 = ins->sreg1;
2597                         if (val < 0) {
2598                                 int spill = 0;
2599                                 if (val < -1) {
2600                                         /* the register gets spilled after this inst */
2601                                         spill = -val -1;
2602                                 }
2603                                 if (0 && (ins->opcode == OP_MOVE)) {
2604                                         /* 
2605                                          * small optimization: the dest register is already allocated
2606                                          * but the src one is not: we can simply assign the same register
2607                                          * here and peephole will get rid of the instruction later.
2608                                          * This optimization may interfere with the clobbering handling:
2609                                          * it removes a mov operation that will be added again to handle clobbering.
2610                                          * There are also some other issues that should with make testjit.
2611                                          */
2612                                         mono_regstate_alloc_int (rs, 1 << ins->dreg);
2613                                         val = rs->iassign [ins->sreg1] = ins->dreg;
2614                                         //g_assert (val >= 0);
2615                                         DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2616                                 } else {
2617                                         //g_assert (val == -1); /* source cannot be spilled */
2618                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
2619                                         rs->iassign [ins->sreg1] = val;
2620                                         DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2621                                 }
2622                                 if (spill) {
2623                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, FALSE);
2624                                         insert_before_ins (ins, tmp, store);
2625                                 }
2626                         }
2627                         rs->isymbolic [val] = prev_sreg1;
2628                         ins->sreg1 = val;
2629                 } else {
2630                         prev_sreg1 = -1;
2631                 }
2632
2633                 /* handle clobbering of sreg1 */
2634                 if (((spec [MONO_INST_DEST] == 'f' && spec [MONO_INST_SRC1] == 'f' && use_sse2) || spec [MONO_INST_CLOB] == '1' || spec [MONO_INST_CLOB] == 's') && ins->dreg != ins->sreg1) {
2635                         MonoInst *sreg2_copy = NULL;
2636
2637                         gboolean fp = (spec [MONO_INST_SRC1] == 'f');
2638
2639                         if (ins->dreg == ins->sreg2) {
2640                                 /* 
2641                                  * copying sreg1 to dreg could clobber sreg2, so allocate a new
2642                                  * register for it.
2643                                  */
2644                                 int reg2 = 0;
2645
2646                                 if (fp)
2647                                         reg2 = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2648                                 else
2649                                         reg2 = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->sreg2, 0);
2650
2651                                 DEBUG (g_print ("\tneed to copy sreg2 %s to reg %s\n", mono_amd64_regname (ins->sreg2, fp), mono_amd64_regname (reg2, fp)));
2652                                 sreg2_copy = create_copy_ins (cfg, reg2, ins->sreg2, NULL, fp);
2653                                 prev_sreg2 = ins->sreg2 = reg2;
2654                         }
2655
2656                         MonoInst *copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL, fp);
2657                         DEBUG (g_print ("\tneed to copy sreg1 %s to dreg %s\n", mono_amd64_regname (ins->sreg1, fp), mono_amd64_regname (ins->dreg, fp)));
2658                         insert_before_ins (ins, tmp, copy);
2659
2660                         if (sreg2_copy)
2661                                 insert_before_ins (copy, tmp, sreg2_copy);
2662
2663                         /* we set sreg1 to dest as well */
2664                         prev_sreg1 = ins->sreg1 = ins->dreg;
2665                         src2_mask &= ~ (1 << ins->dreg);
2666                 }
2667
2668                 /*
2669                  * TRACK SREG2
2670                  */
2671                 if (spec [MONO_INST_SRC2] == 'f') {
2672                         if (use_sse2) {
2673                                 if (reg_is_soft (ins->sreg2, TRUE)) {
2674                                         val = rs->fassign [ins->sreg2];
2675                                         prev_sreg2 = ins->sreg2;
2676                                         if (val < 0) {
2677                                                 int spill = 0;
2678                                                 if (val < -1) {
2679                                                         /* the register gets spilled after this inst */
2680                                                         spill = -val -1;
2681                                                 }
2682                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2683                                                 rs->fassign [ins->sreg2] = val;
2684                                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_fregname (val), ins->sreg2));
2685                                                 if (spill)
2686                                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, TRUE);
2687                                         }
2688                                         rs->fsymbolic [val] = prev_sreg2;
2689                                         ins->sreg2 = val;
2690                                 } else {
2691                                         prev_sreg2 = -1;
2692                                 }
2693                         }
2694                         else
2695                         if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
2696                                 MonoInst *load;
2697                                 MonoInst *store = NULL;
2698
2699                                 if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2700                                         GList *spill_node;
2701
2702                                         spill_node = g_list_first (fspill_list);
2703                                         g_assert (spill_node);
2704                                         if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
2705                                                 spill_node = g_list_next (spill_node);
2706         
2707                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
2708                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2709                                 } 
2710                                 
2711                                 fspill++;
2712                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2713                                 load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
2714                                 insert_before_ins (ins, tmp, load);
2715                                 if (store) 
2716                                         insert_before_ins (load, tmp, store);
2717                         }
2718                 } 
2719                 else if (ins->sreg2 >= MONO_MAX_IREGS) {
2720                         val = rs->iassign [ins->sreg2];
2721                         prev_sreg2 = ins->sreg2;
2722                         if (val < 0) {
2723                                 int spill = 0;
2724                                 if (val < -1) {
2725                                         /* the register gets spilled after this inst */
2726                                         spill = -val -1;
2727                                 }
2728                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
2729                                 rs->iassign [ins->sreg2] = val;
2730                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2731                                 if (spill)
2732                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, FALSE);
2733                         }
2734                         rs->isymbolic [val] = prev_sreg2;
2735                         ins->sreg2 = val;
2736                         if (spec [MONO_INST_CLOB] == 's' && ins->sreg2 != AMD64_RCX) {
2737                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d, but ECX is needed (R%d)\n", mono_arch_regname (val), ins->sreg2, rs->iassign [AMD64_RCX]));
2738                         }
2739                 } else {
2740                         prev_sreg2 = -1;
2741                 }
2742
2743                 if (spec [MONO_INST_CLOB] == 'c') {
2744                         int j, s;
2745                         MonoCallInst *call = (MonoCallInst*)ins;
2746                         GSList *list;
2747                         guint32 clob_mask = AMD64_CALLEE_REGS;
2748
2749                         for (j = 0; j < MONO_MAX_IREGS; ++j) {
2750                                 s = 1 << j;
2751                                 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2752                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [j], FALSE);
2753                                         mono_regstate_free_int (rs, j);
2754                                         //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2755                                 }
2756                         }
2757
2758                         if (use_sse2) {
2759                                 clob_mask = AMD64_CALLEE_FREGS;
2760
2761                                 for (j = 0; j < MONO_MAX_FREGS; ++j) {
2762                                         s = 1 << j;
2763                                         if ((clob_mask & s) && !(rs->ffree_mask & s) && j != ins->sreg1) {
2764                                                 get_register_force_spilling (cfg, tmp, ins, rs->fsymbolic [j], TRUE);
2765                                                 mono_regstate_free_float (rs, j);
2766                                                 //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2767                                         }
2768                                 }
2769                         }
2770
2771                         /* 
2772                          * Assign all registers in call->out_reg_args to the proper 
2773                          * argument registers.
2774                          */
2775
2776                         list = call->out_ireg_args;
2777                         if (list) {
2778                                 while (list) {
2779                                         guint64 regpair;
2780                                         int reg, hreg;
2781
2782                                         regpair = (guint64) (list->data);
2783                                         hreg = regpair >> 32;
2784                                         reg = regpair & 0xffffffff;
2785
2786                                         rs->iassign [reg] = hreg;
2787                                         rs->isymbolic [hreg] = reg;
2788                                         rs->ifree_mask &= ~ (1 << hreg);
2789
2790                                         list = g_slist_next (list);
2791                                 }
2792                                 g_slist_free (call->out_ireg_args);
2793                         }
2794
2795                         list = call->out_freg_args;
2796                         if (list && use_sse2) {
2797                                 while (list) {
2798                                         guint64 regpair;
2799                                         int reg, hreg;
2800
2801                                         regpair = (guint64) (list->data);
2802                                         hreg = regpair >> 32;
2803                                         reg = regpair & 0xffffffff;
2804
2805                                         rs->fassign [reg] = hreg;
2806                                         rs->fsymbolic [hreg] = reg;
2807                                         rs->ffree_mask &= ~ (1 << hreg);
2808
2809                                         list = g_slist_next (list);
2810                                 }
2811                         }
2812                         if (call->out_freg_args)
2813                                 g_slist_free (call->out_freg_args);
2814                 }
2815
2816                 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2817                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2818                         mono_regstate_free_int (rs, ins->sreg1);
2819                 }
2820                 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2821                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2822                         mono_regstate_free_int (rs, ins->sreg2);
2823                 }*/
2824         
2825                 DEBUG (print_ins (i, ins));
2826                 /* this may result from a insert_before call */
2827                 if (!tmp->next)
2828                         bb->code = tmp->data;
2829                 tmp = tmp->next;
2830         }
2831
2832         g_free (reginfo);
2833         g_free (reginfof);
2834         g_list_free (fspill_list);
2835 }
2836
2837 static unsigned char*
2838 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2839 {
2840         if (use_sse2) {
2841                 amd64_sse_cvtsd2si_reg_reg (code, dreg, sreg);
2842         }
2843         else {
2844                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
2845                 x86_fnstcw_membase(code, AMD64_RSP, 0);
2846                 amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
2847                 amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
2848                 amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
2849                 amd64_fldcw_membase (code, AMD64_RSP, 2);
2850                 amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
2851                 amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
2852                 amd64_pop_reg (code, dreg);
2853                 amd64_fldcw_membase (code, AMD64_RSP, 0);
2854                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
2855         }
2856
2857         if (size == 1)
2858                 amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
2859         else if (size == 2)
2860                 amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
2861         return code;
2862 }
2863
2864 static unsigned char*
2865 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
2866 {
2867         int sreg = tree->sreg1;
2868 #ifdef PLATFORM_WIN32
2869         guint8* br[5];
2870
2871         NOT_IMPLEMENTED;
2872
2873         /*
2874          * Under Windows:
2875          * If requested stack size is larger than one page,
2876          * perform stack-touch operation
2877          */
2878         /*
2879          * Generate stack probe code.
2880          * Under Windows, it is necessary to allocate one page at a time,
2881          * "touching" stack after each successful sub-allocation. This is
2882          * because of the way stack growth is implemented - there is a
2883          * guard page before the lowest stack page that is currently commited.
2884          * Stack normally grows sequentially so OS traps access to the
2885          * guard page and commits more pages when needed.
2886          */
2887         amd64_test_reg_imm (code, sreg, ~0xFFF);
2888         br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
2889
2890         br[2] = code; /* loop */
2891         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
2892         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
2893         amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
2894         amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
2895         br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
2896         amd64_patch (br[3], br[2]);
2897         amd64_test_reg_reg (code, sreg, sreg);
2898         br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
2899         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
2900
2901         br[1] = code; x86_jump8 (code, 0);
2902
2903         amd64_patch (br[0], code);
2904         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
2905         amd64_patch (br[1], code);
2906         amd64_patch (br[4], code);
2907 #else /* PLATFORM_WIN32 */
2908         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
2909 #endif
2910         if (tree->flags & MONO_INST_INIT) {
2911                 int offset = 0;
2912                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
2913                         amd64_push_reg (code, AMD64_RAX);
2914                         offset += 8;
2915                 }
2916                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
2917                         amd64_push_reg (code, AMD64_RCX);
2918                         offset += 8;
2919                 }
2920                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
2921                         amd64_push_reg (code, AMD64_RDI);
2922                         offset += 8;
2923                 }
2924                 
2925                 amd64_shift_reg_imm (code, X86_SHR, sreg, 4);
2926                 if (sreg != AMD64_RCX)
2927                         amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
2928                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
2929                                 
2930                 amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
2931                 amd64_cld (code);
2932                 amd64_prefix (code, X86_REP_PREFIX);
2933                 amd64_stosl (code);
2934                 
2935                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
2936                         amd64_pop_reg (code, AMD64_RDI);
2937                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
2938                         amd64_pop_reg (code, AMD64_RCX);
2939                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
2940                         amd64_pop_reg (code, AMD64_RAX);
2941         }
2942         return code;
2943 }
2944
2945 static guint8*
2946 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
2947 {
2948         CallInfo *cinfo;
2949         guint32 offset, quad;
2950
2951         /* Move return value to the target register */
2952         /* FIXME: do this in the local reg allocator */
2953         switch (ins->opcode) {
2954         case CEE_CALL:
2955         case OP_CALL_REG:
2956         case OP_CALL_MEMBASE:
2957         case OP_LCALL:
2958         case OP_LCALL_REG:
2959         case OP_LCALL_MEMBASE:
2960                 if (ins->dreg != AMD64_RAX)
2961                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, 8);
2962                 break;
2963         case OP_FCALL:
2964         case OP_FCALL_REG:
2965         case OP_FCALL_MEMBASE:
2966                 /* FIXME: optimize this */
2967                 offset = mono_spillvar_offset_float (cfg, 0);
2968                 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2969                         if (use_sse2)
2970                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
2971                         else {
2972                                 amd64_movss_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
2973                                 amd64_fld_membase (code, AMD64_RBP, offset, FALSE);
2974                         }
2975                 }
2976                 else {
2977                         if (use_sse2) {
2978                                 if (ins->dreg != AMD64_XMM0)
2979                                         amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
2980                         }
2981                         else {
2982                                 amd64_movsd_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
2983                                 amd64_fld_membase (code, AMD64_RBP, offset, TRUE);
2984                         }
2985                 }
2986                 break;
2987         case OP_VCALL:
2988         case OP_VCALL_REG:
2989         case OP_VCALL_MEMBASE:
2990                 cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
2991                 if (cinfo->ret.storage == ArgValuetypeInReg) {
2992                         /* Pop the destination address from the stack */
2993                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
2994                         amd64_pop_reg (code, AMD64_RCX);
2995                         
2996                         for (quad = 0; quad < 2; quad ++) {
2997                                 switch (cinfo->ret.pair_storage [quad]) {
2998                                 case ArgInIReg:
2999                                         amd64_mov_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad], 8);
3000                                         break;
3001                                 case ArgInFloatSSEReg:
3002                                         amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3003                                         break;
3004                                 case ArgInDoubleSSEReg:
3005                                         amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3006                                         break;
3007                                 case ArgNone:
3008                                         break;
3009                                 default:
3010                                         NOT_IMPLEMENTED;
3011                                 }
3012                         }
3013                 }
3014                 break;
3015         }
3016
3017         return code;
3018 }
3019
3020 /*
3021  * emit_load_volatile_arguments:
3022  *
3023  *  Load volatile arguments from the stack to the original input registers.
3024  * Required before a tail call.
3025  */
3026 static guint8*
3027 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3028 {
3029         MonoMethod *method = cfg->method;
3030         MonoMethodSignature *sig;
3031         MonoInst *inst;
3032         CallInfo *cinfo;
3033         guint32 i;
3034
3035         /* FIXME: Generate intermediate code instead */
3036
3037         sig = method->signature;
3038
3039         cinfo = get_call_info (sig, FALSE);
3040         
3041         /* This is the opposite of the code in emit_prolog */
3042
3043         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3044                 ArgInfo *ainfo = cinfo->args + i;
3045                 MonoType *arg_type;
3046                 inst = cfg->varinfo [i];
3047
3048                 if (sig->hasthis && (i == 0))
3049                         arg_type = &mono_defaults.object_class->byval_arg;
3050                 else
3051                         arg_type = sig->params [i - sig->hasthis];
3052
3053                 if (inst->opcode != OP_REGVAR) {
3054                         switch (ainfo->storage) {
3055                         case ArgInIReg: {
3056                                 guint32 size = 8;
3057
3058                                 /* FIXME: I1 etc */
3059                                 amd64_mov_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset, size);
3060                                 break;
3061                         }
3062                         case ArgInFloatSSEReg:
3063                                 amd64_movss_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3064                                 break;
3065                         case ArgInDoubleSSEReg:
3066                                 amd64_movsd_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3067                                 break;
3068                         default:
3069                                 break;
3070                         }
3071                 }
3072         }
3073
3074         g_free (cinfo);
3075
3076         return code;
3077 }
3078
3079 #define REAL_PRINT_REG(text,reg) \
3080 mono_assert (reg >= 0); \
3081 amd64_push_reg (code, AMD64_RAX); \
3082 amd64_push_reg (code, AMD64_RDX); \
3083 amd64_push_reg (code, AMD64_RCX); \
3084 amd64_push_reg (code, reg); \
3085 amd64_push_imm (code, reg); \
3086 amd64_push_imm (code, text " %d %p\n"); \
3087 amd64_mov_reg_imm (code, AMD64_RAX, printf); \
3088 amd64_call_reg (code, AMD64_RAX); \
3089 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 3*4); \
3090 amd64_pop_reg (code, AMD64_RCX); \
3091 amd64_pop_reg (code, AMD64_RDX); \
3092 amd64_pop_reg (code, AMD64_RAX);
3093
3094 /* benchmark and set based on cpu */
3095 #define LOOP_ALIGNMENT 8
3096 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
3097
3098 void
3099 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3100 {
3101         MonoInst *ins;
3102         MonoCallInst *call;
3103         guint offset;
3104         guint8 *code = cfg->native_code + cfg->code_len;
3105         MonoInst *last_ins = NULL;
3106         guint last_offset = 0;
3107         int max_len, cpos;
3108
3109         if (cfg->opt & MONO_OPT_PEEPHOLE)
3110                 peephole_pass (cfg, bb);
3111
3112         if (cfg->opt & MONO_OPT_LOOP) {
3113                 int pad, align = LOOP_ALIGNMENT;
3114                 /* set alignment depending on cpu */
3115                 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
3116                         pad = align - pad;
3117                         /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
3118                         amd64_padding (code, pad);
3119                         cfg->code_len += pad;
3120                         bb->native_offset = cfg->code_len;
3121                 }
3122         }
3123
3124         if (cfg->verbose_level > 2)
3125                 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3126
3127         cpos = bb->max_offset;
3128
3129         if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3130                 MonoProfileCoverageInfo *cov = cfg->coverage_info;
3131                 g_assert (!mono_compile_aot);
3132                 cpos += 6;
3133
3134                 cov->data [bb->dfn].cil_code = bb->cil_code;
3135                 /* this is not thread save, but good enough */
3136                 amd64_inc_mem (code, (guint64)&cov->data [bb->dfn].count); 
3137         }
3138
3139         offset = code - cfg->native_code;
3140
3141         ins = bb->code;
3142         while (ins) {
3143                 offset = code - cfg->native_code;
3144
3145                 max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
3146
3147                 if (offset > (cfg->code_size - max_len - 16)) {
3148                         cfg->code_size *= 2;
3149                         cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3150                         code = cfg->native_code + offset;
3151                         mono_jit_stats.code_reallocs++;
3152                 }
3153
3154                 mono_debug_record_line_number (cfg, ins, offset);
3155
3156                 switch (ins->opcode) {
3157                 case OP_BIGMUL:
3158                         amd64_mul_reg (code, ins->sreg2, TRUE);
3159                         break;
3160                 case OP_BIGMUL_UN:
3161                         amd64_mul_reg (code, ins->sreg2, FALSE);
3162                         break;
3163                 case OP_X86_SETEQ_MEMBASE:
3164                         amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
3165                         break;
3166                 case OP_STOREI1_MEMBASE_IMM:
3167                         g_assert (amd64_is_imm32 (ins->inst_imm));
3168                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
3169                         break;
3170                 case OP_STOREI2_MEMBASE_IMM:
3171                         g_assert (amd64_is_imm32 (ins->inst_imm));
3172                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
3173                         break;
3174                 case OP_STOREI4_MEMBASE_IMM:
3175                         g_assert (amd64_is_imm32 (ins->inst_imm));
3176                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
3177                         break;
3178                 case OP_STOREI1_MEMBASE_REG:
3179                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
3180                         break;
3181                 case OP_STOREI2_MEMBASE_REG:
3182                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
3183                         break;
3184                 case OP_STORE_MEMBASE_REG:
3185                 case OP_STOREI8_MEMBASE_REG:
3186                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
3187                         break;
3188                 case OP_STOREI4_MEMBASE_REG:
3189                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
3190                         break;
3191                 case OP_STORE_MEMBASE_IMM:
3192                 case OP_STOREI8_MEMBASE_IMM:
3193                         if (amd64_is_imm32 (ins->inst_imm))
3194                                 amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
3195                         else {
3196                                 amd64_mov_reg_imm (code, GP_SCRATCH_REG, ins->inst_imm);
3197                                 amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, GP_SCRATCH_REG, 8);
3198                         }
3199                         break;
3200                 case CEE_LDIND_I:
3201                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, sizeof (gpointer));
3202                         break;
3203                 case CEE_LDIND_I4:
3204                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3205                         break;
3206                 case CEE_LDIND_U4:
3207                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3208                         break;
3209                 case OP_LOADU4_MEM:
3210                         amd64_mov_reg_imm (code, ins->dreg, ins->inst_p0);
3211                         amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
3212                         break;
3213                 case OP_LOAD_MEMBASE:
3214                 case OP_LOADI8_MEMBASE:
3215                         if (amd64_is_imm32 (ins->inst_offset)) {
3216                                 amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
3217                         }
3218                         else {
3219                                 amd64_mov_reg_imm_size (code, GP_SCRATCH_REG, ins->inst_offset, 8);
3220                                 amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, GP_SCRATCH_REG, 0, 8);
3221                         }
3222                         break;
3223                 case OP_LOADI4_MEMBASE:
3224                         amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3225                         break;
3226                 case OP_LOADU4_MEMBASE:
3227                         amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
3228                         break;
3229                 case OP_LOADU1_MEMBASE:
3230                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
3231                         break;
3232                 case OP_LOADI1_MEMBASE:
3233                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
3234                         break;
3235                 case OP_LOADU2_MEMBASE:
3236                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
3237                         break;
3238                 case OP_LOADI2_MEMBASE:
3239                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
3240                         break;
3241                 case CEE_CONV_I1:
3242                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3243                         break;
3244                 case CEE_CONV_I2:
3245                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3246                         break;
3247                 case CEE_CONV_U1:
3248                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
3249                         break;
3250                 case CEE_CONV_U2:
3251                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
3252                         break;
3253                 case CEE_CONV_U8:
3254                 case CEE_CONV_U:
3255                         /* Clean out the upper word */
3256                         amd64_mov_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
3257                         break;
3258                 case CEE_CONV_I8:
3259                 case CEE_CONV_I:
3260                         amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
3261                         break;                  
3262                 case OP_COMPARE:
3263                 case OP_LCOMPARE:
3264                         amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3265                         break;
3266                 case OP_COMPARE_IMM:
3267                         if (!amd64_is_imm32 (ins->inst_imm)) {
3268                                 amd64_mov_reg_imm (code, AMD64_R11, ins->inst_imm);
3269                                 amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, AMD64_R11);
3270                         } else {
3271                                 amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
3272                         }
3273                         break;
3274                 case OP_X86_COMPARE_MEMBASE_REG:
3275                         amd64_alu_membase_reg (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2);
3276                         break;
3277                 case OP_X86_COMPARE_MEMBASE_IMM:
3278                         g_assert (amd64_is_imm32 (ins->inst_imm));
3279                         amd64_alu_membase_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
3280                         break;
3281                 case OP_X86_COMPARE_REG_MEMBASE:
3282                         amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
3283                         break;
3284                 case OP_X86_TEST_NULL:
3285                         amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4);
3286                         break;
3287                 case OP_AMD64_TEST_NULL:
3288                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
3289                         break;
3290                 case OP_X86_ADD_MEMBASE_IMM:
3291                         /* FIXME: Make a 64 version too */
3292                         amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3293                         break;
3294                 case OP_X86_ADD_MEMBASE:
3295                         amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3296                         break;
3297                 case OP_X86_SUB_MEMBASE_IMM:
3298                         g_assert (amd64_is_imm32 (ins->inst_imm));
3299                         amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3300                         break;
3301                 case OP_X86_SUB_MEMBASE:
3302                         amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3303                         break;
3304                 case OP_X86_INC_MEMBASE:
3305                         amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3306                         break;
3307                 case OP_X86_INC_REG:
3308                         amd64_inc_reg_size (code, ins->dreg, 4);
3309                         break;
3310                 case OP_X86_DEC_MEMBASE:
3311                         amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3312                         break;
3313                 case OP_X86_DEC_REG:
3314                         amd64_dec_reg_size (code, ins->dreg, 4);
3315                         break;
3316                 case OP_X86_MUL_MEMBASE:
3317                         amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3318                         break;
3319                 case OP_AMD64_ICOMPARE_MEMBASE_REG:
3320                         amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
3321                         break;
3322                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
3323                         amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3324                         break;
3325                 case OP_AMD64_ICOMPARE_REG_MEMBASE:
3326                         amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3327                         break;
3328                 case CEE_BREAK:
3329                         amd64_breakpoint (code);
3330                         break;
3331
3332                 case OP_ADDCC:
3333                 case CEE_ADD:
3334                         amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
3335                         break;
3336                 case OP_ADC:
3337                         amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
3338                         break;
3339                 case OP_ADD_IMM:
3340                         g_assert (amd64_is_imm32 (ins->inst_imm));
3341                         amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
3342                         break;
3343                 case OP_ADC_IMM:
3344                         g_assert (amd64_is_imm32 (ins->inst_imm));
3345                         amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
3346                         break;
3347                 case OP_SUBCC:
3348                 case CEE_SUB:
3349                         amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
3350                         break;
3351                 case OP_SBB:
3352                         amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
3353                         break;
3354                 case OP_SUB_IMM:
3355                         g_assert (amd64_is_imm32 (ins->inst_imm));
3356                         amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
3357                         break;
3358                 case OP_SBB_IMM:
3359                         g_assert (amd64_is_imm32 (ins->inst_imm));
3360                         amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
3361                         break;
3362                 case CEE_AND:
3363                         amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
3364                         break;
3365                 case OP_AND_IMM:
3366                         g_assert (amd64_is_imm32 (ins->inst_imm));
3367                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
3368                         break;
3369                 case CEE_MUL:
3370                         amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
3371                         break;
3372                 case OP_MUL_IMM:
3373                         amd64_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
3374                         break;
3375                 case CEE_DIV:
3376                         amd64_cdq (code);
3377                         amd64_div_reg (code, ins->sreg2, TRUE);
3378                         break;
3379                 case CEE_DIV_UN:
3380                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3381                         amd64_div_reg (code, ins->sreg2, FALSE);
3382                         break;
3383                 case OP_DIV_IMM:
3384                         g_assert (amd64_is_imm32 (ins->inst_imm));
3385                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3386                         amd64_cdq (code);
3387                         amd64_div_reg (code, ins->sreg2, TRUE);
3388                         break;
3389                 case CEE_REM:
3390                         amd64_cdq (code);
3391                         amd64_div_reg (code, ins->sreg2, TRUE);
3392                         break;
3393                 case CEE_REM_UN:
3394                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3395                         amd64_div_reg (code, ins->sreg2, FALSE);
3396                         break;
3397                 case OP_REM_IMM:
3398                         g_assert (amd64_is_imm32 (ins->inst_imm));
3399                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3400                         amd64_cdq (code);
3401                         amd64_div_reg (code, ins->sreg2, TRUE);
3402                         break;
3403                 case CEE_OR:
3404                         amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
3405                         break;
3406                 case OP_OR_IMM
3407 :                       g_assert (amd64_is_imm32 (ins->inst_imm));
3408                         amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
3409                         break;
3410                 case CEE_XOR:
3411                         amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
3412                         break;
3413                 case OP_XOR_IMM:
3414                         g_assert (amd64_is_imm32 (ins->inst_imm));
3415                         amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
3416                         break;
3417                 case CEE_SHL:
3418                 case OP_LSHL:
3419                         g_assert (ins->sreg2 == AMD64_RCX);
3420                         amd64_shift_reg (code, X86_SHL, ins->dreg);
3421                         break;
3422                 case CEE_SHR:
3423                 case OP_LSHR:
3424                         g_assert (ins->sreg2 == AMD64_RCX);
3425                         amd64_shift_reg (code, X86_SAR, ins->dreg);
3426                         break;
3427                 case OP_SHR_IMM:
3428                         g_assert (amd64_is_imm32 (ins->inst_imm));
3429                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3430                         break;
3431                 case OP_LSHR_IMM:
3432                         g_assert (amd64_is_imm32 (ins->inst_imm));
3433                         amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
3434                         break;
3435                 case OP_SHR_UN_IMM:
3436                         g_assert (amd64_is_imm32 (ins->inst_imm));
3437                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3438                         break;
3439                 case OP_LSHR_UN_IMM:
3440                         g_assert (amd64_is_imm32 (ins->inst_imm));
3441                         amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
3442                         break;
3443                 case CEE_SHR_UN:
3444                         g_assert (ins->sreg2 == AMD64_RCX);
3445                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3446                         break;
3447                 case OP_LSHR_UN:
3448                         g_assert (ins->sreg2 == AMD64_RCX);
3449                         amd64_shift_reg (code, X86_SHR, ins->dreg);
3450                         break;
3451                 case OP_SHL_IMM:
3452                         g_assert (amd64_is_imm32 (ins->inst_imm));
3453                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3454                         break;
3455                 case OP_LSHL_IMM:
3456                         g_assert (amd64_is_imm32 (ins->inst_imm));
3457                         amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
3458                         break;
3459
3460                 case OP_IADDCC:
3461                 case OP_IADD:
3462                         amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4);
3463                         break;
3464                 case OP_IADC:
3465                         amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4);
3466                         break;
3467                 case OP_IADD_IMM:
3468                         amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4);
3469                         break;
3470                 case OP_IADC_IMM:
3471                         amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4);
3472                         break;
3473                 case OP_ISUBCC:
3474                 case OP_ISUB:
3475                         amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4);
3476                         break;
3477                 case OP_ISBB:
3478                         amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4);
3479                         break;
3480                 case OP_ISUB_IMM:
3481                         amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4);
3482                         break;
3483                 case OP_ISBB_IMM:
3484                         amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4);
3485                         break;
3486                 case OP_IAND:
3487                         amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4);
3488                         break;
3489                 case OP_IAND_IMM:
3490                         amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4);
3491                         break;
3492                 case OP_IOR:
3493                         amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4);
3494                         break;
3495                 case OP_IOR_IMM:
3496                         amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4);
3497                         break;
3498                 case OP_IXOR:
3499                         amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4);
3500                         break;
3501                 case OP_IXOR_IMM:
3502                         amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4);
3503                         break;
3504                 case OP_INEG:
3505                         amd64_neg_reg_size (code, ins->sreg1, 4);
3506                         break;
3507                 case OP_INOT:
3508                         amd64_not_reg_size (code, ins->sreg1, 4);
3509                         break;
3510                 case OP_ISHL:
3511                         g_assert (ins->sreg2 == AMD64_RCX);
3512                         amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4);
3513                         break;
3514                 case OP_ISHR:
3515                         g_assert (ins->sreg2 == AMD64_RCX);
3516                         amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4);
3517                         break;
3518                 case OP_ISHR_IMM:
3519                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3520                         break;
3521                 case OP_ISHR_UN_IMM:
3522                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3523                         break;
3524                 case OP_ISHR_UN:
3525                         g_assert (ins->sreg2 == AMD64_RCX);
3526                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3527                         break;
3528                 case OP_ISHL_IMM:
3529                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3530                         break;
3531                 case OP_IMUL:
3532                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3533                         break;
3534                 case OP_IMUL_IMM:
3535                         amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, 4);
3536                         break;
3537                 case OP_IMUL_OVF:
3538                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3539                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3540                         break;
3541                 case OP_IMUL_OVF_UN: {
3542                         /* the mul operation and the exception check should most likely be split */
3543                         int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
3544                         /*g_assert (ins->sreg2 == X86_EAX);
3545                         g_assert (ins->dreg == X86_EAX);*/
3546                         if (ins->sreg2 == X86_EAX) {
3547                                 non_eax_reg = ins->sreg1;
3548                         } else if (ins->sreg1 == X86_EAX) {
3549                                 non_eax_reg = ins->sreg2;
3550                         } else {
3551                                 /* no need to save since we're going to store to it anyway */
3552                                 if (ins->dreg != X86_EAX) {
3553                                         saved_eax = TRUE;
3554                                         amd64_push_reg (code, X86_EAX);
3555                                 }
3556                                 amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
3557                                 non_eax_reg = ins->sreg2;
3558                         }
3559                         if (ins->dreg == X86_EDX) {
3560                                 if (!saved_eax) {
3561                                         saved_eax = TRUE;
3562                                         amd64_push_reg (code, X86_EAX);
3563                                 }
3564                         } else if (ins->dreg != X86_EAX) {
3565                                 saved_edx = TRUE;
3566                                 amd64_push_reg (code, X86_EDX);
3567                         }
3568                         amd64_mul_reg_size (code, non_eax_reg, FALSE, 4);
3569                         /* save before the check since pop and mov don't change the flags */
3570                         if (ins->dreg != X86_EAX)
3571                                 amd64_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
3572                         if (saved_edx)
3573                                 amd64_pop_reg (code, X86_EDX);
3574                         if (saved_eax)
3575                                 amd64_pop_reg (code, X86_EAX);
3576                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3577                         break;
3578                 }
3579                 case OP_IDIV:
3580                         amd64_cdq_size (code, 4);
3581                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3582                         break;
3583                 case OP_IDIV_UN:
3584                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3585                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
3586                         break;
3587                 case OP_IDIV_IMM:
3588                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3589                         amd64_cdq_size (code, 4);
3590                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3591                         break;
3592                 case OP_IREM:
3593                         amd64_cdq_size (code, 4);
3594                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3595                         break;
3596                 case OP_IREM_UN:
3597                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3598                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
3599                         break;
3600                 case OP_IREM_IMM:
3601                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3602                         amd64_cdq_size (code, 4);
3603                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3604                         break;
3605
3606                 case OP_ICOMPARE:
3607                         amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
3608                         break;
3609                 case OP_ICOMPARE_IMM:
3610                         amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
3611                         break;
3612
3613                 case OP_IBEQ:
3614                 case OP_IBLT:
3615                 case OP_IBGT:
3616                 case OP_IBGE:
3617                 case OP_IBLE:
3618                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), TRUE);
3619                         break;
3620                 case OP_IBNE_UN:
3621                 case OP_IBLT_UN:
3622                 case OP_IBGT_UN:
3623                 case OP_IBGE_UN:
3624                 case OP_IBLE_UN:
3625                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), FALSE);
3626                         break;
3627                 case OP_COND_EXC_IOV:
3628                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3629                                                                                 TRUE, ins->inst_p1);
3630                         break;
3631                 case OP_COND_EXC_IC:
3632                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3633                                                                                 FALSE, ins->inst_p1);
3634                         break;
3635                 case CEE_NOT:
3636                         amd64_not_reg (code, ins->sreg1);
3637                         break;
3638                 case CEE_NEG:
3639                         amd64_neg_reg (code, ins->sreg1);
3640                         break;
3641                 case OP_SEXT_I1:
3642                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3643                         break;
3644                 case OP_SEXT_I2:
3645                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3646                         break;
3647                 case OP_ICONST:
3648                 case OP_I8CONST:
3649                         if ((((guint64)ins->inst_c0) >> 32) == 0)
3650                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
3651                         else
3652                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
3653                         break;
3654                 case OP_AOTCONST:
3655                         mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3656                         amd64_set_reg_template (code, ins->dreg);
3657                         break;
3658                 case CEE_CONV_I4:
3659                 case CEE_CONV_U4:
3660                 case OP_MOVE:
3661                 case OP_SETREG:
3662                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
3663                         break;
3664                 case OP_AMD64_SET_XMMREG_R4: {
3665                         if (use_sse2) {
3666                                 amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
3667                         }
3668                         else {
3669                                 amd64_fst_membase (code, AMD64_RSP, -8, FALSE, TRUE);
3670                                 /* ins->dreg is set to -1 by the reg allocator */
3671                                 amd64_movss_reg_membase (code, ins->unused, AMD64_RSP, -8);
3672                         }
3673                         break;
3674                 }
3675                 case OP_AMD64_SET_XMMREG_R8: {
3676                         if (use_sse2) {
3677                                 if (ins->dreg != ins->sreg1)
3678                                         amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
3679                         }
3680                         else {
3681                                 amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE);
3682                                 /* ins->dreg is set to -1 by the reg allocator */
3683                                 amd64_movsd_reg_membase (code, ins->unused, AMD64_RSP, -8);
3684                         }
3685                         break;
3686                 }
3687                 case CEE_JMP: {
3688                         /*
3689                          * Note: this 'frame destruction' logic is useful for tail calls, too.
3690                          * Keep in sync with the code in emit_epilog.
3691                          */
3692                         int pos = 0, i;
3693
3694                         /* FIXME: no tracing support... */
3695                         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3696                                 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
3697
3698                         g_assert (!cfg->method->save_lmf);
3699
3700                         code = emit_load_volatile_arguments (cfg, code);
3701
3702                         for (i = 0; i < AMD64_NREG; ++i)
3703                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
3704                                         pos -= sizeof (gpointer);
3705                         
3706                         if (pos)
3707                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
3708
3709                         /* Pop registers in reverse order */
3710                         for (i = AMD64_NREG - 1; i > 0; --i)
3711                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
3712                                         amd64_pop_reg (code, i);
3713                                 }
3714
3715                         amd64_leave (code);
3716                         offset = code - cfg->native_code;
3717                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3718                         amd64_set_reg_template (code, AMD64_R11);
3719                         amd64_jump_reg (code, AMD64_R11);
3720                         break;
3721                 }
3722                 case OP_CHECK_THIS:
3723                         /* ensure ins->sreg1 is not NULL */
3724                         amd64_alu_membase_imm (code, X86_CMP, ins->sreg1, 0, 0);
3725                         break;
3726                 case OP_ARGLIST: {
3727                         amd64_lea_membase (code, AMD64_R11, AMD64_RBP, cfg->sig_cookie);
3728                         amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
3729                         break;
3730                 }
3731                 case OP_FCALL:
3732                 case OP_LCALL:
3733                 case OP_VCALL:
3734                 case OP_VOIDCALL:
3735                 case CEE_CALL:
3736                         call = (MonoCallInst*)ins;
3737                         /*
3738                          * The AMD64 ABI forces callers to know about varargs.
3739                          */
3740                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke))
3741                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3742
3743                         if (ins->flags & MONO_INST_HAS_METHOD)
3744                                 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
3745                         else
3746                                 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
3747                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3748                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3749                         code = emit_move_return_value (cfg, ins, code);
3750                         break;
3751                 case OP_FCALL_REG:
3752                 case OP_LCALL_REG:
3753                 case OP_VCALL_REG:
3754                 case OP_VOIDCALL_REG:
3755                 case OP_CALL_REG:
3756                         call = (MonoCallInst*)ins;
3757
3758                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3759                                 amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
3760                                 ins->sreg1 = AMD64_R11;
3761                         }
3762
3763                         /*
3764                          * The AMD64 ABI forces callers to know about varargs.
3765                          */
3766                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke)) {
3767                                 if (ins->sreg1 == AMD64_RAX) {
3768                                         amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
3769                                         ins->sreg1 = AMD64_R11;
3770                                 }
3771                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3772                         }
3773                         amd64_call_reg (code, ins->sreg1);
3774                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3775                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3776                         code = emit_move_return_value (cfg, ins, code);
3777                         break;
3778                 case OP_FCALL_MEMBASE:
3779                 case OP_LCALL_MEMBASE:
3780                 case OP_VCALL_MEMBASE:
3781                 case OP_VOIDCALL_MEMBASE:
3782                 case OP_CALL_MEMBASE:
3783                         call = (MonoCallInst*)ins;
3784
3785                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3786                                 amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
3787                                 ins->sreg1 = AMD64_R11;
3788                         }
3789
3790                         amd64_call_membase (code, ins->sreg1, ins->inst_offset);
3791                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3792                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3793                         code = emit_move_return_value (cfg, ins, code);
3794                         break;
3795                 case OP_OUTARG:
3796                 case OP_X86_PUSH:
3797                         amd64_push_reg (code, ins->sreg1);
3798                         break;
3799                 case OP_X86_PUSH_IMM:
3800                         g_assert (amd64_is_imm32 (ins->inst_imm));
3801                         amd64_push_imm (code, ins->inst_imm);
3802                         break;
3803                 case OP_X86_PUSH_MEMBASE:
3804                         amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
3805                         break;
3806                 case OP_X86_PUSH_OBJ: 
3807                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ins->inst_imm);
3808                         amd64_push_reg (code, AMD64_RDI);
3809                         amd64_push_reg (code, AMD64_RSI);
3810                         amd64_push_reg (code, AMD64_RCX);
3811                         if (ins->inst_offset)
3812                                 amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset);
3813                         else
3814                                 amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8);
3815                         amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, 3 * 8);
3816                         amd64_mov_reg_imm (code, AMD64_RCX, (ins->inst_imm >> 3));
3817                         amd64_cld (code);
3818                         amd64_prefix (code, X86_REP_PREFIX);
3819                         amd64_movsd (code);
3820                         amd64_pop_reg (code, AMD64_RCX);
3821                         amd64_pop_reg (code, AMD64_RSI);
3822                         amd64_pop_reg (code, AMD64_RDI);
3823                         break;
3824                 case OP_X86_LEA:
3825                         amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
3826                         break;
3827                 case OP_X86_LEA_MEMBASE:
3828                         amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
3829                         break;
3830                 case OP_X86_XCHG:
3831                         amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
3832                         break;
3833                 case OP_LOCALLOC:
3834                         /* keep alignment */
3835                         amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
3836                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
3837                         code = mono_emit_stack_alloc (code, ins);
3838                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
3839                         break;
3840                 case CEE_RET:
3841                         amd64_ret (code);
3842                         break;
3843                 case CEE_THROW: {
3844                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
3845                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3846                                              (gpointer)"mono_arch_throw_exception");
3847                         break;
3848                 }
3849                 case OP_CALL_HANDLER: 
3850                         /* Align stack */
3851                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
3852                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3853                         amd64_call_imm (code, 0);
3854                         /* Restore stack alignment */
3855                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3856                         break;
3857                 case OP_LABEL:
3858                         ins->inst_c0 = code - cfg->native_code;
3859                         break;
3860                 case CEE_BR:
3861                         //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3862                         //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3863                         //break;
3864                         if (ins->flags & MONO_INST_BRLABEL) {
3865                                 if (ins->inst_i0->inst_c0) {
3866                                         amd64_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3867                                 } else {
3868                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3869                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3870                                             x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
3871                                                 x86_jump8 (code, 0);
3872                                         else 
3873                                                 x86_jump32 (code, 0);
3874                                 }
3875                         } else {
3876                                 if (ins->inst_target_bb->native_offset) {
3877                                         amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); 
3878                                 } else {
3879                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3880                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3881                                             x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
3882                                                 x86_jump8 (code, 0);
3883                                         else 
3884                                                 x86_jump32 (code, 0);
3885                                 } 
3886                         }
3887                         break;
3888                 case OP_BR_REG:
3889                         amd64_jump_reg (code, ins->sreg1);
3890                         break;
3891                 case OP_CEQ:
3892                 case OP_ICEQ:
3893                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3894                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3895                         break;
3896                 case OP_CLT:
3897                 case OP_ICLT:
3898                         amd64_set_reg (code, X86_CC_LT, ins->dreg, TRUE);
3899                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3900                         break;
3901                 case OP_CLT_UN:
3902                 case OP_ICLT_UN:
3903                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3904                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3905                         break;
3906                 case OP_CGT:
3907                 case OP_ICGT:
3908                         amd64_set_reg (code, X86_CC_GT, ins->dreg, TRUE);
3909                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3910                         break;
3911                 case OP_CGT_UN:
3912                 case OP_ICGT_UN:
3913                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3914                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3915                         break;
3916                 case OP_COND_EXC_EQ:
3917                 case OP_COND_EXC_NE_UN:
3918                 case OP_COND_EXC_LT:
3919                 case OP_COND_EXC_LT_UN:
3920                 case OP_COND_EXC_GT:
3921                 case OP_COND_EXC_GT_UN:
3922                 case OP_COND_EXC_GE:
3923                 case OP_COND_EXC_GE_UN:
3924                 case OP_COND_EXC_LE:
3925                 case OP_COND_EXC_LE_UN:
3926                 case OP_COND_EXC_OV:
3927                 case OP_COND_EXC_NO:
3928                 case OP_COND_EXC_C:
3929                 case OP_COND_EXC_NC:
3930                         EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], 
3931                                                     (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
3932                         break;
3933                 case CEE_BEQ:
3934                 case CEE_BNE_UN:
3935                 case CEE_BLT:
3936                 case CEE_BLT_UN:
3937                 case CEE_BGT:
3938                 case CEE_BGT_UN:
3939                 case CEE_BGE:
3940                 case CEE_BGE_UN:
3941                 case CEE_BLE:
3942                 case CEE_BLE_UN:
3943                         EMIT_COND_BRANCH (ins, branch_cc_table [ins->opcode - CEE_BEQ], (ins->opcode < CEE_BNE_UN));
3944                         break;
3945
3946                 /* floating point opcodes */
3947                 case OP_R8CONST: {
3948                         double d = *(double *)ins->inst_p0;
3949
3950                         if (use_sse2) {
3951                                 if ((d == 0.0) && (mono_signbit (d) == 0)) {
3952                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
3953                                 }
3954                                 else {
3955                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3956                                         amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
3957                                 }
3958                         }
3959                         else if ((d == 0.0) && (mono_signbit (d) == 0)) {
3960                                 amd64_fldz (code);
3961                         } else if (d == 1.0) {
3962                                 x86_fld1 (code);
3963                         } else {
3964                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3965                                 amd64_fld_membase (code, AMD64_RIP, 0, TRUE);
3966                         }
3967                         break;
3968                 }
3969                 case OP_R4CONST: {
3970                         float f = *(float *)ins->inst_p0;
3971
3972                         if (use_sse2) {
3973                                 if ((f == 0.0) && (mono_signbit (f) == 0)) {
3974                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
3975                                 }
3976                                 else {
3977                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3978                                         amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0);
3979                                         amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
3980                                 }
3981                         }
3982                         else if ((f == 0.0) && (mono_signbit (f) == 0)) {
3983                                 amd64_fldz (code);
3984                         } else if (f == 1.0) {
3985                                 x86_fld1 (code);
3986                         } else {
3987                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3988                                 amd64_fld_membase (code, AMD64_RIP, 0, FALSE);
3989                         }
3990                         break;
3991                 }
3992                 case OP_STORER8_MEMBASE_REG:
3993                         if (use_sse2)
3994                                 amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
3995                         else
3996                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
3997                         break;
3998                 case OP_LOADR8_SPILL_MEMBASE:
3999                         if (use_sse2)
4000                                 g_assert_not_reached ();
4001                         amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4002                         amd64_fxch (code, 1);
4003                         break;
4004                 case OP_LOADR8_MEMBASE:
4005                         if (use_sse2)
4006                                 amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4007                         else
4008                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4009                         break;
4010                 case OP_STORER4_MEMBASE_REG:
4011                         if (use_sse2) {
4012                                 /* This requires a double->single conversion */
4013                                 amd64_sse_cvtsd2ss_reg_reg (code, AMD64_XMM15, ins->sreg1);
4014                                 amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, AMD64_XMM15);
4015                         }
4016                         else
4017                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
4018                         break;
4019                 case OP_LOADR4_MEMBASE:
4020                         if (use_sse2) {
4021                                 amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4022                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
4023                         }
4024                         else
4025                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4026                         break;
4027                 case CEE_CONV_R4: /* FIXME: change precision */
4028                 case CEE_CONV_R8:
4029                         if (use_sse2)
4030                                 amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
4031                         else {
4032                                 amd64_push_reg (code, ins->sreg1);
4033                                 amd64_fild_membase (code, AMD64_RSP, 0, FALSE);
4034                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4035                         }
4036                         break;
4037                 case CEE_CONV_R_UN:
4038                         /* Emulated */
4039                         g_assert_not_reached ();
4040                         break;
4041                 case OP_LCONV_TO_R4: /* FIXME: change precision */
4042                 case OP_LCONV_TO_R8:
4043                         if (use_sse2)
4044                                 amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
4045                         else {
4046                                 amd64_push_reg (code, ins->sreg1);
4047                                 amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4048                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4049                         }
4050                         break;
4051                 case OP_X86_FP_LOAD_I8:
4052                         if (use_sse2)
4053                                 g_assert_not_reached ();
4054                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4055                         break;
4056                 case OP_X86_FP_LOAD_I4:
4057                         if (use_sse2)
4058                                 g_assert_not_reached ();
4059                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4060                         break;
4061                 case OP_FCONV_TO_I1:
4062                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4063                         break;
4064                 case OP_FCONV_TO_U1:
4065                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4066                         break;
4067                 case OP_FCONV_TO_I2:
4068                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4069                         break;
4070                 case OP_FCONV_TO_U2:
4071                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4072                         break;
4073                 case OP_FCONV_TO_I4:
4074                 case OP_FCONV_TO_I:
4075                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4076                         break;
4077                 case OP_FCONV_TO_I8:
4078                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
4079                         break;
4080                 case OP_LCONV_TO_R_UN: { 
4081                         static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
4082                         guint8 *br;
4083
4084                         if (use_sse2)
4085                                 g_assert_not_reached ();
4086
4087                         /* load 64bit integer to FP stack */
4088                         amd64_push_imm (code, 0);
4089                         amd64_push_reg (code, ins->sreg2);
4090                         amd64_push_reg (code, ins->sreg1);
4091                         amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4092                         /* store as 80bit FP value */
4093                         x86_fst80_membase (code, AMD64_RSP, 0);
4094                         
4095                         /* test if lreg is negative */
4096                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4097                         br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
4098         
4099                         /* add correction constant mn */
4100                         x86_fld80_mem (code, mn);
4101                         x86_fld80_membase (code, AMD64_RSP, 0);
4102                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4103                         x86_fst80_membase (code, AMD64_RSP, 0);
4104
4105                         amd64_patch (br, code);
4106
4107                         x86_fld80_membase (code, AMD64_RSP, 0);
4108                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 12);
4109
4110                         break;
4111                 }
4112                 case OP_LCONV_TO_OVF_I: {
4113                         guint8 *br [3], *label [1];
4114
4115                         if (use_sse2)
4116                                 g_assert_not_reached ();
4117
4118                         /* 
4119                          * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4120                          */
4121                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
4122
4123                         /* If the low word top bit is set, see if we are negative */
4124                         br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
4125                         /* We are not negative (no top bit set, check for our top word to be zero */
4126                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4127                         br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
4128                         label [0] = code;
4129
4130                         /* throw exception */
4131                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
4132                         x86_jump32 (code, 0);
4133         
4134                         amd64_patch (br [0], code);
4135                         /* our top bit is set, check that top word is 0xfffffff */
4136                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
4137                 
4138                         amd64_patch (br [1], code);
4139                         /* nope, emit exception */
4140                         br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
4141                         amd64_patch (br [2], label [0]);
4142
4143                         if (ins->dreg != ins->sreg1)
4144                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
4145                         break;
4146                 }
4147                 case CEE_CONV_OVF_U4:
4148                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0);
4149                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException");
4150                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4151                         break;
4152                 case CEE_CONV_OVF_I4_UN:
4153                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff);
4154                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException");
4155                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4156                         break;
4157                 case OP_FMOVE:
4158                         if (use_sse2 && (ins->dreg != ins->sreg1))
4159                                 amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
4160                         break;
4161                 case OP_FADD:
4162                         if (use_sse2)
4163                                 amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2);
4164                         else
4165                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4166                         break;
4167                 case OP_FSUB:
4168                         if (use_sse2)
4169                                 amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2);
4170                         else
4171                                 amd64_fp_op_reg (code, X86_FSUB, 1, TRUE);
4172                         break;          
4173                 case OP_FMUL:
4174                         if (use_sse2)
4175                                 amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2);
4176                         else
4177                                 amd64_fp_op_reg (code, X86_FMUL, 1, TRUE);
4178                         break;          
4179                 case OP_FDIV:
4180                         if (use_sse2)
4181                                 amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2);
4182                         else
4183                                 amd64_fp_op_reg (code, X86_FDIV, 1, TRUE);
4184                         break;          
4185                 case OP_FNEG:
4186                         if (use_sse2) {
4187                                 amd64_mov_reg_imm_size (code, AMD64_R11, 0x8000000000000000, 8);
4188                                 amd64_push_reg (code, AMD64_R11);
4189                                 amd64_push_reg (code, AMD64_R11);
4190                                 amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RSP, 0);
4191                         }
4192                         else
4193                                 amd64_fchs (code);
4194                         break;          
4195                 case OP_SIN:
4196                         if (use_sse2)
4197                                 g_assert_not_reached ();
4198                         amd64_fsin (code);
4199                         amd64_fldz (code);
4200                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4201                         break;          
4202                 case OP_COS:
4203                         if (use_sse2)
4204                                 g_assert_not_reached ();
4205                         amd64_fcos (code);
4206                         amd64_fldz (code);
4207                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4208                         break;          
4209                 case OP_ABS:
4210                         if (use_sse2)
4211                                 g_assert_not_reached ();
4212                         amd64_fabs (code);
4213                         break;          
4214                 case OP_TAN: {
4215                         /* 
4216                          * it really doesn't make sense to inline all this code,
4217                          * it's here just to show that things may not be as simple 
4218                          * as they appear.
4219                          */
4220                         guchar *check_pos, *end_tan, *pop_jump;
4221                         if (use_sse2)
4222                                 g_assert_not_reached ();
4223                         amd64_push_reg (code, AMD64_RAX);
4224                         amd64_fptan (code);
4225                         amd64_fnstsw (code);
4226                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4227                         check_pos = code;
4228                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4229                         amd64_fstp (code, 0); /* pop the 1.0 */
4230                         end_tan = code;
4231                         x86_jump8 (code, 0);
4232                         amd64_fldpi (code);
4233                         amd64_fp_op (code, X86_FADD, 0);
4234                         amd64_fxch (code, 1);
4235                         x86_fprem1 (code);
4236                         amd64_fstsw (code);
4237                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4238                         pop_jump = code;
4239                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4240                         amd64_fstp (code, 1);
4241                         amd64_fptan (code);
4242                         amd64_patch (pop_jump, code);
4243                         amd64_fstp (code, 0); /* pop the 1.0 */
4244                         amd64_patch (check_pos, code);
4245                         amd64_patch (end_tan, code);
4246                         amd64_fldz (code);
4247                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4248                         amd64_pop_reg (code, AMD64_RAX);
4249                         break;
4250                 }
4251                 case OP_ATAN:
4252                         if (use_sse2)
4253                                 g_assert_not_reached ();
4254                         x86_fld1 (code);
4255                         amd64_fpatan (code);
4256                         amd64_fldz (code);
4257                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4258                         break;          
4259                 case OP_SQRT:
4260                         if (use_sse2)
4261                                 g_assert_not_reached ();
4262                         amd64_fsqrt (code);
4263                         break;          
4264                 case OP_X86_FPOP:
4265                         if (!use_sse2)
4266                                 amd64_fstp (code, 0);
4267                         break;          
4268                 case OP_FREM: {
4269                         guint8 *l1, *l2;
4270
4271                         if (use_sse2)
4272                                 g_assert_not_reached ();
4273                         amd64_push_reg (code, AMD64_RAX);
4274                         /* we need to exchange ST(0) with ST(1) */
4275                         amd64_fxch (code, 1);
4276
4277                         /* this requires a loop, because fprem somtimes 
4278                          * returns a partial remainder */
4279                         l1 = code;
4280                         /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
4281                         /* x86_fprem1 (code); */
4282                         amd64_fprem (code);
4283                         amd64_fnstsw (code);
4284                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_C2);
4285                         l2 = code + 2;
4286                         x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
4287
4288                         /* pop result */
4289                         amd64_fstp (code, 1);
4290
4291                         amd64_pop_reg (code, AMD64_RAX);
4292                         break;
4293                 }
4294                 case OP_FCOMPARE:
4295                         if (use_sse2) {
4296                                 amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4297                                 break;
4298                         }
4299                         if (cfg->opt & MONO_OPT_FCMOV) {
4300                                 amd64_fcomip (code, 1);
4301                                 amd64_fstp (code, 0);
4302                                 break;
4303                         }
4304                         /* this overwrites EAX */
4305                         EMIT_FPCOMPARE(code);
4306                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4307                         break;
4308                 case OP_FCEQ:
4309                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4310                                 /* zeroing the register at the start results in 
4311                                  * shorter and faster code (we can also remove the widening op)
4312                                  */
4313                                 guchar *unordered_check;
4314                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4315                                 
4316                                 if (use_sse2)
4317                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4318                                 else {
4319                                         amd64_fcomip (code, 1);
4320                                         amd64_fstp (code, 0);
4321                                 }
4322                                 unordered_check = code;
4323                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4324                                 amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
4325                                 amd64_patch (unordered_check, code);
4326                                 break;
4327                         }
4328                         if (ins->dreg != AMD64_RAX) 
4329                                 amd64_push_reg (code, AMD64_RAX);
4330
4331                         EMIT_FPCOMPARE(code);
4332                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4333                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4334                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4335                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4336
4337                         if (ins->dreg != AMD64_RAX) 
4338                                 amd64_pop_reg (code, AMD64_RAX);
4339                         break;
4340                 case OP_FCLT:
4341                 case OP_FCLT_UN:
4342                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4343                                 /* zeroing the register at the start results in 
4344                                  * shorter and faster code (we can also remove the widening op)
4345                                  */
4346                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4347                                 if (use_sse2)
4348                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4349                                 else {
4350                                         amd64_fcomip (code, 1);
4351                                         amd64_fstp (code, 0);
4352                                 }
4353                                 if (ins->opcode == OP_FCLT_UN) {
4354                                         guchar *unordered_check = code;
4355                                         guchar *jump_to_end;
4356                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4357                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4358                                         jump_to_end = code;
4359                                         x86_jump8 (code, 0);
4360                                         amd64_patch (unordered_check, code);
4361                                         amd64_inc_reg (code, ins->dreg);
4362                                         amd64_patch (jump_to_end, code);
4363                                 } else {
4364                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4365                                 }
4366                                 break;
4367                         }
4368                         if (ins->dreg != AMD64_RAX) 
4369                                 amd64_push_reg (code, AMD64_RAX);
4370
4371                         EMIT_FPCOMPARE(code);
4372                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4373                         if (ins->opcode == OP_FCLT_UN) {
4374                                 guchar *is_not_zero_check, *end_jump;
4375                                 is_not_zero_check = code;
4376                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4377                                 end_jump = code;
4378                                 x86_jump8 (code, 0);
4379                                 amd64_patch (is_not_zero_check, code);
4380                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4381
4382                                 amd64_patch (end_jump, code);
4383                         }
4384                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4385                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4386
4387                         if (ins->dreg != AMD64_RAX) 
4388                                 amd64_pop_reg (code, AMD64_RAX);
4389                         break;
4390                 case OP_FCGT:
4391                 case OP_FCGT_UN:
4392                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4393                                 /* zeroing the register at the start results in 
4394                                  * shorter and faster code (we can also remove the widening op)
4395                                  */
4396                                 guchar *unordered_check;
4397                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4398                                 if (use_sse2)
4399                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4400                                 else {
4401                                         amd64_fcomip (code, 1);
4402                                         amd64_fstp (code, 0);
4403                                 }
4404                                 if (ins->opcode == OP_FCGT) {
4405                                         unordered_check = code;
4406                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4407                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4408                                         amd64_patch (unordered_check, code);
4409                                 } else {
4410                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4411                                 }
4412                                 break;
4413                         }
4414                         if (ins->dreg != AMD64_RAX) 
4415                                 amd64_push_reg (code, AMD64_RAX);
4416
4417                         EMIT_FPCOMPARE(code);
4418                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4419                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4420                         if (ins->opcode == OP_FCGT_UN) {
4421                                 guchar *is_not_zero_check, *end_jump;
4422                                 is_not_zero_check = code;
4423                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4424                                 end_jump = code;
4425                                 x86_jump8 (code, 0);
4426                                 amd64_patch (is_not_zero_check, code);
4427                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4428
4429                                 amd64_patch (end_jump, code);
4430                         }
4431                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4432                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4433
4434                         if (ins->dreg != AMD64_RAX) 
4435                                 amd64_pop_reg (code, AMD64_RAX);
4436                         break;
4437                 case OP_FBEQ:
4438                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4439                                 guchar *jump = code;
4440                                 x86_branch8 (code, X86_CC_P, 0, TRUE);
4441                                 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4442                                 amd64_patch (jump, code);
4443                                 break;
4444                         }
4445                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4446                         EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
4447                         break;
4448                 case OP_FBNE_UN:
4449                         /* Branch if C013 != 100 */
4450                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4451                                 /* branch if !ZF or (PF|CF) */
4452                                 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4453                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4454                                 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
4455                                 break;
4456                         }
4457                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4458                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4459                         break;
4460                 case OP_FBLT:
4461                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4462                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4463                                 break;
4464                         }
4465                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4466                         break;
4467                 case OP_FBLT_UN:
4468                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4469                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4470                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4471                                 break;
4472                         }
4473                         if (ins->opcode == OP_FBLT_UN) {
4474                                 guchar *is_not_zero_check, *end_jump;
4475                                 is_not_zero_check = code;
4476                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4477                                 end_jump = code;
4478                                 x86_jump8 (code, 0);
4479                                 amd64_patch (is_not_zero_check, code);
4480                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4481
4482                                 amd64_patch (end_jump, code);
4483                         }
4484                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4485                         break;
4486                 case OP_FBGT:
4487                 case OP_FBGT_UN:
4488                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4489                                 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
4490                                 break;
4491                         }
4492                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4493                         if (ins->opcode == OP_FBGT_UN) {
4494                                 guchar *is_not_zero_check, *end_jump;
4495                                 is_not_zero_check = code;
4496                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4497                                 end_jump = code;
4498                                 x86_jump8 (code, 0);
4499                                 amd64_patch (is_not_zero_check, code);
4500                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4501
4502                                 amd64_patch (end_jump, code);
4503                         }
4504                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4505                         break;
4506                 case OP_FBGE:
4507                         /* Branch if C013 == 100 or 001 */
4508                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4509                                 guchar *br1;
4510
4511                                 /* skip branch if C1=1 */
4512                                 br1 = code;
4513                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4514                                 /* branch if (C0 | C3) = 1 */
4515                                 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
4516                                 amd64_patch (br1, code);
4517                                 break;
4518                         }
4519                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4520                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4521                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4522                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4523                         break;
4524                 case OP_FBGE_UN:
4525                         /* Branch if C013 == 000 */
4526                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4527                                 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
4528                                 break;
4529                         }
4530                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4531                         break;
4532                 case OP_FBLE:
4533                         /* Branch if C013=000 or 100 */
4534                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4535                                 guchar *br1;
4536
4537                                 /* skip branch if C1=1 */
4538                                 br1 = code;
4539                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4540                                 /* branch if C0=0 */
4541                                 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
4542                                 amd64_patch (br1, code);
4543                                 break;
4544                         }
4545                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, (X86_FP_C0|X86_FP_C1));
4546                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4547                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4548                         break;
4549                 case OP_FBLE_UN:
4550                         /* Branch if C013 != 001 */
4551                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4552                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4553                                 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
4554                                 break;
4555                         }
4556                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4557                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4558                         break;
4559                 case CEE_CKFINITE: {
4560                         if (use_sse2) {
4561                                 /* Transfer value to the fp stack */
4562                                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
4563                                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
4564                                 amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
4565                         }
4566                         amd64_push_reg (code, AMD64_RAX);
4567                         amd64_fxam (code);
4568                         amd64_fnstsw (code);
4569                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100);
4570                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4571                         amd64_pop_reg (code, AMD64_RAX);
4572                         if (use_sse2) {
4573                                 amd64_fstp (code, 0);
4574                         }                               
4575                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
4576                         break;
4577                 }
4578                 case OP_X86_TLS_GET: {
4579                         x86_prefix (code, X86_FS_PREFIX);
4580                         amd64_mov_reg_mem (code, ins->dreg, ins->inst_offset, 8);
4581                         break;
4582                 }
4583                 default:
4584                         g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4585                         g_assert_not_reached ();
4586                 }
4587
4588                 if ((code - cfg->native_code - offset) > max_len) {
4589                         g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4590                                    mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4591                         g_assert_not_reached ();
4592                 }
4593                
4594                 cpos += max_len;
4595
4596                 last_ins = ins;
4597                 last_offset = offset;
4598                 
4599                 ins = ins->next;
4600         }
4601
4602         cfg->code_len = code - cfg->native_code;
4603 }
4604
4605 void
4606 mono_arch_register_lowlevel_calls (void)
4607 {
4608 }
4609
4610 void
4611 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4612 {
4613         MonoJumpInfo *patch_info;
4614
4615         for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4616                 unsigned char *ip = patch_info->ip.i + code;
4617                 const unsigned char *target;
4618
4619                 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4620
4621                 switch (patch_info->type) {
4622                 case MONO_PATCH_INFO_METHOD_REL:
4623                 case MONO_PATCH_INFO_METHOD_JUMP:
4624                         *((gconstpointer *)(ip + 2)) = target;
4625                         continue;
4626                 case MONO_PATCH_INFO_SWITCH: {
4627                         *((gconstpointer *)(ip + 2)) = target;
4628                         continue;
4629                 }
4630                 case MONO_PATCH_INFO_IID:
4631                         *((guint32 *)(ip + 2)) = (guint32)(guint64)target;
4632                         continue;                       
4633                 case MONO_PATCH_INFO_CLASS_INIT: {
4634                         /* FIXME: Might already been changed to a nop */
4635                         *((gconstpointer *)(ip + 2)) = target;
4636                         continue;
4637                 }
4638                 case MONO_PATCH_INFO_R8:
4639                 case MONO_PATCH_INFO_R4:
4640                         g_assert_not_reached ();
4641                         continue;
4642                 case MONO_PATCH_INFO_METHODCONST:
4643                 case MONO_PATCH_INFO_CLASS:
4644                 case MONO_PATCH_INFO_IMAGE:
4645                 case MONO_PATCH_INFO_FIELD:
4646                 case MONO_PATCH_INFO_VTABLE:
4647                 case MONO_PATCH_INFO_SFLDA:
4648                 case MONO_PATCH_INFO_EXC_NAME:
4649                 case MONO_PATCH_INFO_LDSTR:
4650                 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4651                 case MONO_PATCH_INFO_LDTOKEN:
4652                 case MONO_PATCH_INFO_IP:
4653                         *((gconstpointer *)(ip + 2)) = target;
4654                         continue;
4655                 case MONO_PATCH_INFO_METHOD:
4656                         *((gconstpointer *)(ip + 2)) = target;
4657                         continue;
4658                 case MONO_PATCH_INFO_ABS:
4659                 case MONO_PATCH_INFO_INTERNAL_METHOD:
4660                         break;
4661                 default:
4662                         break;
4663                 }
4664                 amd64_patch (ip, (gpointer)target);
4665         }
4666 }
4667
4668 guint8 *
4669 mono_arch_emit_prolog (MonoCompile *cfg)
4670 {
4671         MonoMethod *method = cfg->method;
4672         MonoBasicBlock *bb;
4673         MonoMethodSignature *sig;
4674         MonoInst *inst;
4675         int alloc_size, pos, max_offset, i;
4676         guint8 *code;
4677         CallInfo *cinfo;
4678
4679         cfg->code_size =  MAX (((MonoMethodNormal *)method)->header->code_size * 4, 512);
4680         code = cfg->native_code = g_malloc (cfg->code_size);
4681
4682         amd64_push_reg (code, AMD64_RBP);
4683         amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
4684
4685         /* Stack alignment check */
4686 #if 0
4687         {
4688                 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8);
4689                 amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf);
4690                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4691                 x86_branch8 (code, X86_CC_EQ, 2, FALSE);
4692                 amd64_breakpoint (code);
4693         }
4694 #endif
4695
4696         alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
4697         pos = 0;
4698
4699         if (method->save_lmf) {
4700
4701                 pos = ALIGN_TO (pos + sizeof (MonoLMF), 16);
4702
4703                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, pos);
4704
4705                 gint32 lmf_offset = - cfg->arch.lmf_offset;
4706
4707                 /* Save ip */
4708                 amd64_lea_membase (code, AMD64_R11, AMD64_RIP, 0);
4709                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
4710                 /* Save fp */
4711                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), AMD64_RBP, 8);
4712                 /* Save method */
4713                 /* FIXME: add a relocation for this */
4714                 if (IS_IMM32 (cfg->method))
4715                         amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), (guint64)cfg->method, 8);
4716                 else {
4717                         amd64_mov_reg_imm (code, AMD64_R11, cfg->method);
4718                         amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
4719                 }
4720                 /* Save callee saved regs */
4721                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
4722                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
4723                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
4724                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
4725                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
4726         } else {
4727
4728                 for (i = 0; i < AMD64_NREG; ++i)
4729                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4730                                 amd64_push_reg (code, i);
4731                                 pos += sizeof (gpointer);
4732                         }
4733         }
4734
4735         alloc_size -= pos;
4736
4737         if (alloc_size) {
4738                 /* See mono_emit_stack_alloc */
4739 #ifdef PLATFORM_WIN32
4740                 guint32 remaining_size = alloc_size;
4741                 while (remaining_size >= 0x1000) {
4742                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
4743                         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
4744                         remaining_size -= 0x1000;
4745                 }
4746                 if (remaining_size)
4747                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
4748 #else
4749                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
4750 #endif
4751         }
4752
4753         /* compute max_offset in order to use short forward jumps */
4754         max_offset = 0;
4755         if (cfg->opt & MONO_OPT_BRANCH) {
4756                 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4757                         MonoInst *ins = bb->code;
4758                         bb->max_offset = max_offset;
4759
4760                         if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4761                                 max_offset += 6;
4762                         /* max alignment for loops */
4763                         if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
4764                                 max_offset += LOOP_ALIGNMENT;
4765
4766                         while (ins) {
4767                                 if (ins->opcode == OP_LABEL)
4768                                         ins->inst_c1 = max_offset;
4769                                 
4770                                 max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
4771                                 ins = ins->next;
4772                         }
4773                 }
4774         }
4775
4776         sig = method->signature;
4777         pos = 0;
4778
4779         cinfo = get_call_info (sig, FALSE);
4780
4781         if (sig->ret->type != MONO_TYPE_VOID) {
4782                 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
4783                         /* Save volatile arguments to the stack */
4784                         amd64_mov_membase_reg (code, cfg->ret->inst_basereg, cfg->ret->inst_offset, cinfo->ret.reg, 8);
4785                 }
4786         }
4787
4788         /* Keep this in sync with emit_load_volatile_arguments */
4789         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4790                 ArgInfo *ainfo = cinfo->args + i;
4791                 gint32 stack_offset;
4792                 MonoType *arg_type;
4793                 inst = cfg->varinfo [i];
4794
4795                 if (sig->hasthis && (i == 0))
4796                         arg_type = &mono_defaults.object_class->byval_arg;
4797                 else
4798                         arg_type = sig->params [i - sig->hasthis];
4799
4800                 stack_offset = ainfo->offset + ARGS_OFFSET;
4801
4802                 /* Save volatile arguments to the stack */
4803                 if (inst->opcode != OP_REGVAR) {
4804                         switch (ainfo->storage) {
4805                         case ArgInIReg: {
4806                                 guint32 size = 8;
4807
4808                                 /* FIXME: I1 etc */
4809                                 /*
4810                                 if (stack_offset & 0x1)
4811                                         size = 1;
4812                                 else if (stack_offset & 0x2)
4813                                         size = 2;
4814                                 else if (stack_offset & 0x4)
4815                                         size = 4;
4816                                 else
4817                                         size = 8;
4818                                 */
4819                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg, size);
4820                                 break;
4821                         }
4822                         case ArgInFloatSSEReg:
4823                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
4824                                 break;
4825                         case ArgInDoubleSSEReg:
4826                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
4827                                 break;
4828                         default:
4829                                 break;
4830                         }
4831                 }
4832
4833                 if (inst->opcode == OP_REGVAR) {
4834                         /* Argument allocated to (non-volatile) register */
4835                         switch (ainfo->storage) {
4836                         case ArgInIReg:
4837                                 amd64_mov_reg_reg (code, inst->dreg, ainfo->reg, 8);
4838                                 break;
4839                         case ArgOnStack:
4840                                 amd64_mov_reg_membase (code, inst->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
4841                                 break;
4842                         default:
4843                                 g_assert_not_reached ();
4844                         }
4845                 }
4846         }
4847
4848         if (method->save_lmf) {
4849                 if (lmf_tls_offset != -1) {
4850                         /* Load lmf quicky using the FS register */
4851                         x86_prefix (code, X86_FS_PREFIX);
4852                         amd64_mov_reg_mem (code, AMD64_RAX, lmf_tls_offset, 8);
4853                 }
4854                 else {
4855                         /* 
4856                          * The call might clobber argument registers, but they are already
4857                          * saved to the stack/global regs.
4858                          */
4859
4860                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
4861                                                                  (gpointer)"mono_get_lmf_addr");                
4862                 }
4863
4864                 gint32 lmf_offset = - cfg->arch.lmf_offset;
4865
4866                 /* Save lmf_addr */
4867                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
4868                 /* Save previous_lmf */
4869                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
4870                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
4871                 /* Set new lmf */
4872                 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
4873                 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
4874         }
4875
4876
4877         g_free (cinfo);
4878
4879         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4880                 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4881
4882         cfg->code_len = code - cfg->native_code;
4883
4884         g_assert (cfg->code_len < cfg->code_size);
4885
4886         return code;
4887 }
4888
4889 void
4890 mono_arch_emit_epilog (MonoCompile *cfg)
4891 {
4892         MonoJumpInfo *patch_info;
4893         MonoMethod *method = cfg->method;
4894         int pos, i;
4895         guint8 *code;
4896
4897         code = cfg->native_code + cfg->code_len;
4898
4899         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4900                 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4901
4902         /* the code restoring the registers must be kept in sync with CEE_JMP */
4903         pos = 0;
4904         
4905         if (method->save_lmf) {
4906                 gint32 lmf_offset = - cfg->arch.lmf_offset;
4907
4908                 /* Restore previous lmf */
4909                 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
4910                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
4911                 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
4912
4913                 /* Restore caller saved regs */
4914                 if (cfg->used_int_regs & (1 << AMD64_RBX)) {
4915                         amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
4916                 }
4917                 if (cfg->used_int_regs & (1 << AMD64_R12)) {
4918                         amd64_mov_reg_membase (code, AMD64_R12, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
4919                 }
4920                 if (cfg->used_int_regs & (1 << AMD64_R13)) {
4921                         amd64_mov_reg_membase (code, AMD64_R13, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
4922                 }
4923                 if (cfg->used_int_regs & (1 << AMD64_R14)) {
4924                         amd64_mov_reg_membase (code, AMD64_R14, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
4925                 }
4926                 if (cfg->used_int_regs & (1 << AMD64_R15)) {
4927                         amd64_mov_reg_membase (code, AMD64_R15, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
4928                 }
4929         } else {
4930
4931                 for (i = 0; i < AMD64_NREG; ++i)
4932                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
4933                                 pos -= sizeof (gpointer);
4934
4935                 if (pos) {
4936                         if (pos == - sizeof (gpointer)) {
4937                                 /* Only one register, so avoid lea */
4938                                 for (i = AMD64_NREG - 1; i > 0; --i)
4939                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4940                                                 amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
4941                                         }
4942                         }
4943                         else {
4944                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
4945
4946                                 /* Pop registers in reverse order */
4947                                 for (i = AMD64_NREG - 1; i > 0; --i)
4948                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4949                                                 amd64_pop_reg (code, i);
4950                                         }
4951                         }
4952                 }
4953         }
4954
4955         amd64_leave (code);
4956         amd64_ret (code);
4957
4958         /* add code to raise exceptions */
4959         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4960                 switch (patch_info->type) {
4961                 case MONO_PATCH_INFO_EXC: {
4962                         guint64 offset;
4963
4964                         amd64_patch (patch_info->ip.i + cfg->native_code, code);
4965                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);
4966                         amd64_set_reg_template (code, AMD64_RDI);
4967                         /* 7 is the length of the lea */
4968                         offset = (((guint64)code + 7) - (guint64)cfg->native_code) - (guint64)patch_info->ip.i;
4969                         amd64_lea_membase (code, AMD64_RSI, AMD64_RIP, - offset);
4970                         patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4971                         patch_info->data.name = "mono_arch_throw_exception_by_name";
4972                         patch_info->ip.i = code - cfg->native_code;
4973                         EMIT_CALL ();
4974                         break;
4975                 }
4976                 default:
4977                         /* do nothing */
4978                         break;
4979                 }
4980         }
4981
4982         /* Handle relocations with RIP relative addressing */
4983         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4984                 gboolean remove = FALSE;
4985
4986                 switch (patch_info->type) {
4987                 case MONO_PATCH_INFO_R8: {
4988                         code = (guint8*)ALIGN_TO (code, 8);
4989
4990                         guint8* pos = cfg->native_code + patch_info->ip.i;
4991
4992                         *(double*)code = *(double*)patch_info->data.target;
4993
4994                         if (use_sse2)
4995                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
4996                         else
4997                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
4998                         code += 8;
4999
5000                         remove = TRUE;
5001                         break;
5002                 }
5003                 case MONO_PATCH_INFO_R4: {
5004                         code = (guint8*)ALIGN_TO (code, 8);
5005
5006                         guint8* pos = cfg->native_code + patch_info->ip.i;
5007
5008                         *(float*)code = *(float*)patch_info->data.target;
5009
5010                         if (use_sse2)
5011                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
5012                         else
5013                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
5014                         code += 4;
5015
5016                         remove = TRUE;
5017                         break;
5018                 }
5019                 default:
5020                         break;
5021                 }
5022
5023                 if (remove) {
5024                         if (patch_info == cfg->patch_info)
5025                                 cfg->patch_info = patch_info->next;
5026                         else {
5027                                 MonoJumpInfo *tmp;
5028
5029                                 for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next)
5030                                         ;
5031                                 tmp->next = patch_info->next;
5032                         }
5033                 }
5034         }
5035
5036         cfg->code_len = code - cfg->native_code;
5037
5038         g_assert (cfg->code_len < cfg->code_size);
5039
5040 }
5041
5042 /*
5043  * Allow tracing to work with this interface (with an optional argument)
5044  */
5045
5046 /*
5047  * This may be needed on some archs or for debugging support.
5048  */
5049 void
5050 mono_arch_instrument_mem_needs (MonoMethod *method, int *stack, int *code)
5051 {
5052         /* no stack room needed now (may be needed for FASTCALL-trace support) */
5053         *stack = 0;
5054         /* split prolog-epilog requirements? */
5055         *code = 50; /* max bytes needed: check this number */
5056 }
5057
5058 void*
5059 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5060 {
5061         guchar *code = p;
5062         CallInfo *cinfo;
5063         MonoMethodSignature *sig;
5064         MonoInst *inst;
5065         int i, n, stack_area = 0;
5066
5067         /* Keep this in sync with mono_arch_get_argument_info */
5068
5069         if (enable_arguments) {
5070                 /* Allocate a new area on the stack and save arguments there */
5071                 sig = cfg->method->signature;
5072
5073                 cinfo = get_call_info (sig, FALSE);
5074
5075                 n = sig->param_count + sig->hasthis;
5076
5077                 stack_area = ALIGN_TO (n * 8, 16);
5078
5079                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_area);
5080
5081                 for (i = 0; i < n; ++i) {
5082                         inst = cfg->varinfo [i];
5083
5084                         if (inst->opcode == OP_REGVAR)
5085                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), inst->dreg, 8);
5086                         else {
5087                                 amd64_mov_reg_membase (code, AMD64_R11, inst->inst_basereg, inst->inst_offset, 8);
5088                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), AMD64_R11, 8);
5089                         }
5090                 }
5091         }
5092
5093         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
5094         amd64_set_reg_template (code, AMD64_RDI);
5095         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RSP, 8);
5096         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5097
5098         if (enable_arguments) {
5099                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, stack_area);
5100
5101                 g_free (cinfo);
5102         }
5103
5104         return code;
5105 }
5106
5107 enum {
5108         SAVE_NONE,
5109         SAVE_STRUCT,
5110         SAVE_EAX,
5111         SAVE_EAX_EDX,
5112         SAVE_XMM
5113 };
5114
5115 void*
5116 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5117 {
5118         guchar *code = p;
5119         int save_mode = SAVE_NONE;
5120         MonoMethod *method = cfg->method;
5121         int rtype = mono_type_get_underlying_type (method->signature->ret)->type;
5122         
5123         switch (rtype) {
5124         case MONO_TYPE_VOID:
5125                 /* special case string .ctor icall */
5126                 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
5127                         save_mode = SAVE_EAX;
5128                 else
5129                         save_mode = SAVE_NONE;
5130                 break;
5131         case MONO_TYPE_I8:
5132         case MONO_TYPE_U8:
5133                 save_mode = SAVE_EAX;
5134                 break;
5135         case MONO_TYPE_R4:
5136         case MONO_TYPE_R8:
5137                 save_mode = SAVE_XMM;
5138                 break;
5139         case MONO_TYPE_VALUETYPE:
5140                 save_mode = SAVE_STRUCT;
5141                 break;
5142         default:
5143                 save_mode = SAVE_EAX;
5144                 break;
5145         }
5146
5147         /* Save the result and copy it into the proper argument register */
5148         switch (save_mode) {
5149         case SAVE_EAX:
5150                 amd64_push_reg (code, AMD64_RAX);
5151                 /* Align stack */
5152                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5153                 if (enable_arguments)
5154                         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RAX, 8);
5155                 break;
5156         case SAVE_STRUCT:
5157                 /* FIXME: */
5158                 if (enable_arguments)
5159                         amd64_mov_reg_imm (code, AMD64_RSI, 0);
5160                 break;
5161         case SAVE_XMM:
5162                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5163                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
5164                 /* Align stack */
5165                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5166                 /* 
5167                  * The result is already in the proper argument register so no copying
5168                  * needed.
5169                  */
5170                 break;
5171         case SAVE_NONE:
5172                 break;
5173         default:
5174                 g_assert_not_reached ();
5175         }
5176
5177         /* Set %al since this is a varargs call */
5178         if (save_mode == SAVE_XMM)
5179                 amd64_mov_reg_imm (code, AMD64_RAX, 1);
5180         else
5181                 amd64_mov_reg_imm (code, AMD64_RAX, 0);
5182
5183         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
5184         amd64_set_reg_template (code, AMD64_RDI);
5185         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5186
5187         /* Restore result */
5188         switch (save_mode) {
5189         case SAVE_EAX:
5190                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5191                 amd64_pop_reg (code, AMD64_RAX);
5192                 break;
5193         case SAVE_STRUCT:
5194                 /* FIXME: */
5195                 break;
5196         case SAVE_XMM:
5197                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5198                 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
5199                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5200                 break;
5201         case SAVE_NONE:
5202                 break;
5203         default:
5204                 g_assert_not_reached ();
5205         }
5206
5207         return code;
5208 }
5209
5210 int
5211 mono_arch_max_epilog_size (MonoCompile *cfg)
5212 {
5213         int max_epilog_size = 16;
5214         MonoJumpInfo *patch_info;
5215         
5216         if (cfg->method->save_lmf)
5217                 max_epilog_size += 256;
5218         
5219         if (mono_jit_trace_calls != NULL)
5220                 max_epilog_size += 50;
5221
5222         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5223                 max_epilog_size += 50;
5224
5225         max_epilog_size += (AMD64_NREG * 2);
5226
5227         /* 
5228          * make sure we have enough space for exceptions
5229          */
5230         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5231                 if (patch_info->type == MONO_PATCH_INFO_EXC)
5232                         max_epilog_size += 40;
5233                 if (patch_info->type == MONO_PATCH_INFO_R8)
5234                         max_epilog_size += 8 + 7; /* sizeof (double) + alignment */
5235                 if (patch_info->type == MONO_PATCH_INFO_R4)
5236                         max_epilog_size += 4 + 7; /* sizeof (float) + alignment */
5237         }
5238
5239         return max_epilog_size;
5240 }
5241
5242 void
5243 mono_arch_flush_icache (guint8 *code, gint size)
5244 {
5245         /* not needed */
5246 }
5247
5248 void
5249 mono_arch_flush_register_windows (void)
5250 {
5251 }
5252
5253 gboolean 
5254 mono_arch_is_inst_imm (gint64 imm)
5255 {
5256         return amd64_is_imm32 (imm);
5257 }
5258
5259 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
5260
5261 static int reg_to_ucontext_reg [] = {
5262         REG_RAX, REG_RCX, REG_RDX, REG_RBX, REG_RSP, REG_RBP, REG_RSI, REG_RDI,
5263         REG_R8, REG_R9, REG_R10, REG_R11, REG_R12, REG_R13, REG_R14, REG_R15,
5264         REG_RIP
5265 };
5266
5267 /*
5268  * Determine whenever the trap whose info is in SIGINFO is caused by
5269  * integer overflow.
5270  */
5271 gboolean
5272 mono_arch_is_int_overflow (void *sigctx)
5273 {
5274         ucontext_t *ctx = (ucontext_t*)sigctx;
5275         guint8* rip;
5276         int reg;
5277
5278         rip = (guint8*)ctx->uc_mcontext.gregs [REG_RIP];
5279
5280         if (IS_REX (rip [0])) {
5281                 reg = amd64_rex_r (rip [0]);
5282                 rip ++;
5283         }
5284         else
5285                 reg = 0;
5286
5287         if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) {
5288                 /* idiv REG */
5289                 reg += x86_modrm_rm (rip [1]);
5290
5291                 if (ctx->uc_mcontext.gregs [reg_to_ucontext_reg [reg]] == -1)
5292                         return TRUE;
5293         }
5294
5295         return FALSE;
5296 }
5297
5298 gpointer*
5299 mono_amd64_get_vcall_slot_addr (guint8* code, guint64 *regs)
5300 {
5301         guint32 reg;
5302         guint32 disp;
5303         guint8 rex = 0;
5304
5305         /* go to the start of the call instruction
5306          *
5307          * address_byte = (m << 6) | (o << 3) | reg
5308          * call opcode: 0xff address_byte displacement
5309          * 0xff m=1,o=2 imm8
5310          * 0xff m=2,o=2 imm32
5311          */
5312         code -= 6;
5313
5314         if (IS_REX (code [3]) && (code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x3)) {
5315                 /* call *%reg */
5316                 return NULL;
5317         }
5318         else if ((code [0] == 0xff) && (amd64_modrm_reg (code [1]) == 0x2) && (amd64_modrm_mod (code [1]) == 0x2)) {
5319                 /* call *[reg+disp32] */
5320                 reg = amd64_modrm_rm (code [1]);
5321                 disp = *(guint32*)(code + 2);
5322                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5323         }
5324         else if ((code [3] == 0xff) && (amd64_modrm_reg (code [4]) == 0x2) && (amd64_modrm_mod (code [4]) == 0x1)) {
5325                 /* call *[reg+disp8] */
5326                 reg = amd64_modrm_rm (code [4]);
5327                 disp = *(guint8*)(code + 5);
5328                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5329         }
5330         else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x0)) {
5331                         /*
5332                          * This is a interface call: should check the above code can't catch it earlier 
5333                          * 8b 40 30   mov    0x30(%eax),%eax
5334                          * ff 10      call   *(%eax)
5335                          */
5336                 reg = amd64_modrm_rm (code [5]);
5337                 disp = 0;
5338         }
5339         else
5340                 g_assert_not_reached ();
5341
5342         reg += amd64_rex_b (rex);
5343
5344         /* FIXME: */
5345         return (gpointer)((regs [reg]) + disp);
5346 }
5347
5348 /*
5349  * Support for fast access to the thread-local lmf structure using the GS
5350  * segment register on NPTL + kernel 2.6.x.
5351  */
5352
5353 static gboolean tls_offset_inited = FALSE;
5354
5355 /* code should be simply return <tls var>; */
5356 static int 
5357 read_tls_offset_from_method (void* method)
5358 {
5359         guint8 *code = (guint8*)method;
5360
5361         /* 
5362          * Determine the offset of mono_lfm_addr inside the TLS structures
5363          * by disassembling the function above.
5364          */
5365         /* This is generated by gcc 3.3.2 */
5366         if ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5367                 (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5368                 (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5369                 (code [9] == 0x00) && (code [10] == 0x00) && (code [11] == 0x00) &&
5370                 (code [12] == 0x0) && (code [13] == 0x48) && (code [14] == 0x8b) &&
5371                 (code [15] == 0x80)) {
5372                 return *(gint32*)&(code [16]);
5373         } else if
5374                 /* This is generated by gcc-3.3.2 with -O=2 */
5375                 /* mov fs:0, %rax ; mov <offset>(%rax), %rax ; retq */
5376                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5377                  (code [3] == 0x04) && (code [4] == 0x25) &&
5378                  (code [9] == 0x48) && (code [10] == 0x8b) && (code [11] == 0x80) &&
5379                  (code [16] == 0xc3)) {
5380                         return *(gint32*)&(code [12]);
5381         } else if 
5382                 /* This is generated by gcc-3.4.1 */
5383                 ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5384                  (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5385                  (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5386                  (code [13] == 0xc9) && (code [14] == 0xc3)) {
5387                         return *(gint32*)&(code [9]);
5388         } else if
5389                 /* This is generated by gcc-3.4.1 with -O=2 */
5390                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5391                  (code [3] == 0x04) && (code [4] == 0x25)) {
5392                 return *(gint32*)&(code [5]);
5393         }
5394
5395         return -1;
5396 }
5397
5398 void
5399 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5400 {
5401 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5402         pthread_t self = pthread_self();
5403         pthread_attr_t attr;
5404         void *staddr = NULL;
5405         size_t stsize = 0;
5406         struct sigaltstack sa;
5407 #endif
5408
5409         if (!tls_offset_inited) {
5410                 tls_offset_inited = TRUE;
5411
5412                 lmf_tls_offset = read_tls_offset_from_method (mono_get_lmf_addr);
5413                 appdomain_tls_offset = read_tls_offset_from_method (mono_domain_get);
5414                 //thread_tls_offset = read_tls_offset_from_method (mono_thread_current);
5415         }               
5416
5417 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5418
5419         /* Determine stack boundaries */
5420         if (!mono_running_on_valgrind ()) {
5421 #ifdef HAVE_PTHREAD_GETATTR_NP
5422                 pthread_getattr_np( self, &attr );
5423 #else
5424 #ifdef HAVE_PTHREAD_ATTR_GET_NP
5425                 pthread_attr_get_np( self, &attr );
5426 #elif defined(sun)
5427                 pthread_attr_init( &attr );
5428                 pthread_attr_getstacksize( &attr, &stsize );
5429 #else
5430 #error "Not implemented"
5431 #endif
5432 #endif
5433 #ifndef sun
5434                 pthread_attr_getstack( &attr, &staddr, &stsize );
5435 #endif
5436         }
5437
5438         /* 
5439          * staddr seems to be wrong for the main thread, so we keep the value in
5440          * tls->end_of_stack
5441          */
5442         tls->stack_size = stsize;
5443
5444         /* Setup an alternate signal stack */
5445         tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
5446         tls->signal_stack_size = SIGNAL_STACK_SIZE;
5447
5448         sa.ss_sp = tls->signal_stack;
5449         sa.ss_size = SIGNAL_STACK_SIZE;
5450         sa.ss_flags = SS_ONSTACK;
5451         sigaltstack (&sa, NULL);
5452 #endif
5453 }
5454
5455 void
5456 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5457 {
5458 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5459         struct sigaltstack sa;
5460
5461         sa.ss_sp = tls->signal_stack;
5462         sa.ss_size = SIGNAL_STACK_SIZE;
5463         sa.ss_flags = SS_DISABLE;
5464         sigaltstack  (&sa, NULL);
5465
5466         if (tls->signal_stack)
5467                 g_free (tls->signal_stack);
5468 #endif
5469 }
5470
5471 void
5472 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
5473 {
5474         int out_reg = param_regs [0];
5475
5476         /* FIXME: RDI and RSI might get clobbered */
5477
5478         if (vt_reg != -1) {
5479                 CallInfo * cinfo = get_call_info (inst->signature, FALSE);
5480                 MonoInst *vtarg;
5481
5482                 if (cinfo->ret.storage == ArgValuetypeInReg) {
5483                         /*
5484                          * The valuetype is in RAX:RDX after the call, need to be copied to
5485                          * the stack. Push the address here, so the call instruction can
5486                          * access it.
5487                          */
5488                         MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
5489                         vtarg->sreg1 = vt_reg;
5490                         mono_bblock_add_inst (cfg->cbb, vtarg);
5491
5492                         /* Align stack */
5493                         MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
5494                 }
5495                 else {
5496                         MONO_INST_NEW (cfg, vtarg, OP_SETREG);
5497                         vtarg->sreg1 = vt_reg;
5498                         vtarg->dreg = out_reg;
5499                         out_reg = param_regs [1];
5500                         mono_bblock_add_inst (cfg->cbb, vtarg);
5501                 }
5502
5503                 g_free (cinfo);
5504         }
5505
5506         /* add the this argument */
5507         if (this_reg != -1) {
5508                 MonoInst *this;
5509                 MONO_INST_NEW (cfg, this, OP_SETREG);
5510                 this->type = this_type;
5511                 this->sreg1 = this_reg;
5512                 this->dreg = out_reg;
5513                 mono_bblock_add_inst (cfg->cbb, this);
5514         }
5515 }
5516
5517 gint
5518 mono_arch_get_opcode_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5519 {
5520         if (use_sse2)
5521                 return -1;
5522
5523         if (cmethod->klass == mono_defaults.math_class) {
5524                 if (strcmp (cmethod->name, "Sin") == 0)
5525                         return OP_SIN;
5526                 else if (strcmp (cmethod->name, "Cos") == 0)
5527                         return OP_COS;
5528                 else if (strcmp (cmethod->name, "Tan") == 0)
5529                         return OP_TAN;
5530                 else if (strcmp (cmethod->name, "Atan") == 0)
5531                         return OP_ATAN;
5532                 else if (strcmp (cmethod->name, "Sqrt") == 0)
5533                         return OP_SQRT;
5534                 else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8)
5535                         return OP_ABS;
5536 #if 0
5537                 /* OP_FREM is not IEEE compatible */
5538                 else if (strcmp (cmethod->name, "IEEERemainder") == 0)
5539                         return OP_FREM;
5540 #endif
5541                 else
5542                         return -1;
5543         } else {
5544                 return -1;
5545         }
5546         return -1;
5547 }
5548
5549
5550 gboolean
5551 mono_arch_print_tree (MonoInst *tree, int arity)
5552 {
5553         return 0;
5554 }
5555
5556 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5557 {
5558         MonoInst* ins;
5559         
5560         if (appdomain_tls_offset == -1)
5561                 return NULL;
5562         
5563         MONO_INST_NEW (cfg, ins, OP_X86_TLS_GET);
5564         ins->inst_offset = appdomain_tls_offset;
5565         return ins;
5566 }
5567
5568 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
5569 {
5570         MonoInst* ins;
5571         
5572         if (thread_tls_offset == -1)
5573                 return NULL;
5574         
5575         MONO_INST_NEW (cfg, ins, OP_X86_TLS_GET);
5576         ins->inst_offset = thread_tls_offset;
5577         return ins;
5578 }