2005-03-12 Zoltan Varga <vargaz@freemail.hu>
[mono.git] / mono / mini / mini-amd64.c
1 /*
2  * mini-amd64.c: AMD64 backend for the Mono code generator
3  *
4  * Based on mini-x86.c.
5  *
6  * Authors:
7  *   Paolo Molaro (lupus@ximian.com)
8  *   Dietmar Maurer (dietmar@ximian.com)
9  *   Patrik Torstensson
10  *
11  * (C) 2003 Ximian, Inc.
12  */
13 #include "mini.h"
14 #include <string.h>
15 #include <math.h>
16
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/metadata/profiler-private.h>
21 #include <mono/utils/mono-math.h>
22
23 #include "trace.h"
24 #include "mini-amd64.h"
25 #include "inssel.h"
26 #include "cpu-amd64.h"
27
28 static gint lmf_tls_offset = -1;
29 static gint appdomain_tls_offset = -1;
30 static gint thread_tls_offset = -1;
31
32 /* Use SSE2 instructions for fp arithmetic */
33 static gboolean use_sse2 = TRUE;
34
35 /* xmm15 is reserved for use by some opcodes */
36 #define AMD64_CALLEE_FREGS 0xef
37
38 #define FPSTACK_SIZE 6
39
40 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
41
42 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
43
44 #ifdef PLATFORM_WIN32
45 /* Under windows, the default pinvoke calling convention is stdcall */
46 #define CALLCONV_IS_STDCALL(call_conv) (((call_conv) == MONO_CALL_STDCALL) || ((call_conv) == MONO_CALL_DEFAULT))
47 #else
48 #define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
49 #endif
50
51 #define SIGNAL_STACK_SIZE (64 * 1024)
52
53 #define ARGS_OFFSET 16
54 #define GP_SCRATCH_REG AMD64_R11
55
56 /*
57  * AMD64 register usage:
58  * - callee saved registers are used for global register allocation
59  * - %r11 is used for materializing 64 bit constants in opcodes
60  * - the rest is used for local allocation
61  */
62
63 /*
64  * FIXME: 
65  * - Use xmm registers instead of the x87 stack
66  * - Allocate arguments to global registers
67  * - implement emulated opcodes
68  * - (all archs) do not store trampoline addresses in method->info since they
69  *   are domain specific.   
70  */
71
72 /*
73  * Floating point comparison results:
74  *                  ZF PF CF
75  * A > B            0  0  0
76  * A < B            0  0  1
77  * A = B            1  0  0
78  * A > B            0  0  0
79  * UNORDERED        1  1  1
80  */
81
82 #define NOT_IMPLEMENTED g_assert_not_reached ()
83
84 const char*
85 mono_arch_regname (int reg) {
86         switch (reg) {
87         case AMD64_RAX: return "%rax";
88         case AMD64_RBX: return "%rbx";
89         case AMD64_RCX: return "%rcx";
90         case AMD64_RDX: return "%rdx";
91         case AMD64_RSP: return "%rsp";  
92         case AMD64_RBP: return "%rbp";
93         case AMD64_RDI: return "%rdi";
94         case AMD64_RSI: return "%rsi";
95         case AMD64_R8: return "%r8";
96         case AMD64_R9: return "%r9";
97         case AMD64_R10: return "%r10";
98         case AMD64_R11: return "%r11";
99         case AMD64_R12: return "%r12";
100         case AMD64_R13: return "%r13";
101         case AMD64_R14: return "%r14";
102         case AMD64_R15: return "%r15";
103         }
104         return "unknown";
105 }
106
107 static const char * xmmregs [] = {
108         "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8",
109         "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
110 };
111
112 static const char*
113 mono_arch_fregname (int reg)
114 {
115         if (reg < AMD64_XMM_NREG)
116                 return xmmregs [reg];
117         else
118                 return "unknown";
119 }
120
121 static const char*
122 mono_amd64_regname (int reg, gboolean fp)
123 {
124         if (fp)
125                 return mono_arch_fregname (reg);
126         else
127                 return mono_arch_regname (reg);
128 }
129
130 static inline void 
131 amd64_patch (unsigned char* code, gpointer target)
132 {
133         /* Skip REX */
134         if ((code [0] >= 0x40) && (code [0] <= 0x4f))
135                 code += 1;
136
137         if ((code [0] & 0xf8) == 0xb8) {
138                 /* amd64_set_reg_template */
139                 *(guint64*)(code + 1) = (guint64)target;
140         }
141         else if (code [0] == 0x8b) {
142                 /* mov 0(%rip), %dreg */
143                 *(guint32*)(code + 2) = (guint32)(guint64)target - 7;
144         }
145         else if ((code [0] == 0xff) && (code [1] == 0x15)) {
146                 /* call *<OFFSET>(%rip) */
147                 *(guint32*)(code + 2) = ((guint32)(guint64)target) - 7;
148         }
149         else
150                 x86_patch (code, (unsigned char*)target);
151 }
152
153 typedef enum {
154         ArgInIReg,
155         ArgInFloatSSEReg,
156         ArgInDoubleSSEReg,
157         ArgOnStack,
158         ArgValuetypeInReg,
159         ArgNone /* only in pair_storage */
160 } ArgStorage;
161
162 typedef struct {
163         gint16 offset;
164         gint8  reg;
165         ArgStorage storage;
166
167         /* Only if storage == ArgValuetypeInReg */
168         ArgStorage pair_storage [2];
169         gint8 pair_regs [2];
170 } ArgInfo;
171
172 typedef struct {
173         int nargs;
174         guint32 stack_usage;
175         guint32 reg_usage;
176         guint32 freg_usage;
177         gboolean need_stack_align;
178         ArgInfo ret;
179         ArgInfo sig_cookie;
180         ArgInfo args [1];
181 } CallInfo;
182
183 #define DEBUG(a) if (cfg->verbose_level > 1) a
184
185 #define NEW_ICONST(cfg,dest,val) do {   \
186                 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst));       \
187                 (dest)->opcode = OP_ICONST;     \
188                 (dest)->inst_c0 = (val);        \
189                 (dest)->type = STACK_I4;        \
190         } while (0)
191
192 #define PARAM_REGS 6
193
194 static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
195
196 static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
197
198 static void inline
199 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
200 {
201     ainfo->offset = *stack_size;
202
203     if (*gr >= PARAM_REGS) {
204                 ainfo->storage = ArgOnStack;
205                 (*stack_size) += sizeof (gpointer);
206     }
207     else {
208                 ainfo->storage = ArgInIReg;
209                 ainfo->reg = param_regs [*gr];
210                 (*gr) ++;
211     }
212 }
213
214 #define FLOAT_PARAM_REGS 8
215
216 static void inline
217 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
218 {
219     ainfo->offset = *stack_size;
220
221     if (*gr >= FLOAT_PARAM_REGS) {
222                 ainfo->storage = ArgOnStack;
223                 (*stack_size) += sizeof (gpointer);
224     }
225     else {
226                 /* A double register */
227                 if (is_double)
228                         ainfo->storage = ArgInDoubleSSEReg;
229                 else
230                         ainfo->storage = ArgInFloatSSEReg;
231                 ainfo->reg = *gr;
232                 (*gr) += 1;
233     }
234 }
235
236 typedef enum ArgumentClass {
237         ARG_CLASS_NO_CLASS,
238         ARG_CLASS_MEMORY,
239         ARG_CLASS_INTEGER,
240         ARG_CLASS_SSE
241 } ArgumentClass;
242
243 static ArgumentClass
244 merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
245 {
246         ArgumentClass class2;
247         MonoType *ptype;
248
249         ptype = mono_type_get_underlying_type (type);
250         switch (ptype->type) {
251         case MONO_TYPE_BOOLEAN:
252         case MONO_TYPE_CHAR:
253         case MONO_TYPE_I1:
254         case MONO_TYPE_U1:
255         case MONO_TYPE_I2:
256         case MONO_TYPE_U2:
257         case MONO_TYPE_I4:
258         case MONO_TYPE_U4:
259         case MONO_TYPE_I:
260         case MONO_TYPE_U:
261         case MONO_TYPE_STRING:
262         case MONO_TYPE_OBJECT:
263         case MONO_TYPE_CLASS:
264         case MONO_TYPE_SZARRAY:
265         case MONO_TYPE_PTR:
266         case MONO_TYPE_FNPTR:
267         case MONO_TYPE_ARRAY:
268         case MONO_TYPE_I8:
269         case MONO_TYPE_U8:
270                 class2 = ARG_CLASS_INTEGER;
271                 break;
272         case MONO_TYPE_R4:
273         case MONO_TYPE_R8:
274                 class2 = ARG_CLASS_SSE;
275                 break;
276
277         case MONO_TYPE_TYPEDBYREF:
278                 g_assert_not_reached ();
279
280         case MONO_TYPE_VALUETYPE: {
281                 MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass);
282                 int i;
283
284                 for (i = 0; i < info->num_fields; ++i) {
285                         class2 = class1;
286                         class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
287                 }
288                 break;
289         }
290         default:
291                 g_assert_not_reached ();
292         }
293
294         /* Merge */
295         if (class1 == class2)
296                 ;
297         else if (class1 == ARG_CLASS_NO_CLASS)
298                 class1 = class2;
299         else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY))
300                 class1 = ARG_CLASS_MEMORY;
301         else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER))
302                 class1 = ARG_CLASS_INTEGER;
303         else
304                 class1 = ARG_CLASS_SSE;
305
306         return class1;
307 }
308
309 static void
310 add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
311                gboolean is_return,
312                guint32 *gr, guint32 *fr, guint32 *stack_size)
313 {
314         guint32 size, quad, nquads, i;
315         ArgumentClass args [2];
316         MonoMarshalType *info;
317         MonoClass *klass;
318
319         klass = mono_class_from_mono_type (type);
320         if (sig->pinvoke) 
321                 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
322         else 
323                 size = mono_type_stack_size (&klass->byval_arg, NULL);
324
325         if (!sig->pinvoke || (size == 0) || (size > 16)) {
326                 /* Allways pass in memory */
327                 ainfo->offset = *stack_size;
328                 *stack_size += ALIGN_TO (size, 8);
329                 ainfo->storage = ArgOnStack;
330
331                 return;
332         }
333
334         /* FIXME: Handle structs smaller than 8 bytes */
335         //if ((size % 8) != 0)
336         //      NOT_IMPLEMENTED;
337
338         if (size > 8)
339                 nquads = 2;
340         else
341                 nquads = 1;
342
343         /*
344          * Implement the algorithm from section 3.2.3 of the X86_64 ABI.
345          * The X87 and SSEUP stuff is left out since there are no such types in
346          * the CLR.
347          */
348         info = mono_marshal_load_type_info (klass);
349         g_assert (info);
350         if (info->native_size > 16) {
351                 ainfo->offset = *stack_size;
352                 *stack_size += ALIGN_TO (info->native_size, 8);
353                 ainfo->storage = ArgOnStack;
354
355                 return;
356         }
357
358         for (quad = 0; quad < nquads; ++quad) {
359                 int size, align;
360                 ArgumentClass class1;
361                 
362                 class1 = ARG_CLASS_NO_CLASS;
363                 for (i = 0; i < info->num_fields; ++i) {
364                         size = mono_marshal_type_size (info->fields [i].field->type, 
365                                                                                    info->fields [i].mspec, 
366                                                                                    &align, TRUE, klass->unicode);
367                         if ((info->fields [i].offset < 8) && (info->fields [i].offset + size) > 8) {
368                                 /* Unaligned field */
369                                 NOT_IMPLEMENTED;
370                         }
371
372                         /* Skip fields in other quad */
373                         if ((quad == 0) && (info->fields [i].offset >= 8))
374                                 continue;
375                         if ((quad == 1) && (info->fields [i].offset < 8))
376                                 continue;
377
378                         class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
379                 }
380                 g_assert (class1 != ARG_CLASS_NO_CLASS);
381                 args [quad] = class1;
382         }
383
384         /* Post merger cleanup */
385         if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY))
386                 args [0] = args [1] = ARG_CLASS_MEMORY;
387
388         /* Allocate registers */
389         {
390                 int orig_gr = *gr;
391                 int orig_fr = *fr;
392
393                 ainfo->storage = ArgValuetypeInReg;
394                 ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
395                 for (quad = 0; quad < nquads; ++quad) {
396                         switch (args [quad]) {
397                         case ARG_CLASS_INTEGER:
398                                 if (*gr >= PARAM_REGS)
399                                         args [quad] = ARG_CLASS_MEMORY;
400                                 else {
401                                         ainfo->pair_storage [quad] = ArgInIReg;
402                                         if (is_return)
403                                                 ainfo->pair_regs [quad] = return_regs [*gr];
404                                         else
405                                                 ainfo->pair_regs [quad] = param_regs [*gr];
406                                         (*gr) ++;
407                                 }
408                                 break;
409                         case ARG_CLASS_SSE:
410                                 if (*fr >= FLOAT_PARAM_REGS)
411                                         args [quad] = ARG_CLASS_MEMORY;
412                                 else {
413                                         ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
414                                         ainfo->pair_regs [quad] = *fr;
415                                         (*fr) ++;
416                                 }
417                                 break;
418                         case ARG_CLASS_MEMORY:
419                                 break;
420                         default:
421                                 g_assert_not_reached ();
422                         }
423                 }
424
425                 if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) {
426                         /* Revert possible register assignments */
427                         *gr = orig_gr;
428                         *fr = orig_fr;
429
430                         ainfo->offset = *stack_size;
431                         *stack_size += ALIGN_TO (info->native_size, 8);
432                         ainfo->storage = ArgOnStack;
433                 }
434         }
435 }
436
437 /*
438  * get_call_info:
439  *
440  *  Obtain information about a call according to the calling convention.
441  * For AMD64, see the "System V ABI, x86-64 Architecture Processor Supplement 
442  * Draft Version 0.23" document for more information.
443  */
444 static CallInfo*
445 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
446 {
447         guint32 i, gr, fr;
448         MonoType *ret_type;
449         int n = sig->hasthis + sig->param_count;
450         guint32 stack_size = 0;
451         CallInfo *cinfo;
452
453         cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
454
455         gr = 0;
456         fr = 0;
457
458         /* return value */
459         {
460                 ret_type = mono_type_get_underlying_type (sig->ret);
461                 switch (ret_type->type) {
462                 case MONO_TYPE_BOOLEAN:
463                 case MONO_TYPE_I1:
464                 case MONO_TYPE_U1:
465                 case MONO_TYPE_I2:
466                 case MONO_TYPE_U2:
467                 case MONO_TYPE_CHAR:
468                 case MONO_TYPE_I4:
469                 case MONO_TYPE_U4:
470                 case MONO_TYPE_I:
471                 case MONO_TYPE_U:
472                 case MONO_TYPE_PTR:
473                 case MONO_TYPE_CLASS:
474                 case MONO_TYPE_OBJECT:
475                 case MONO_TYPE_SZARRAY:
476                 case MONO_TYPE_ARRAY:
477                 case MONO_TYPE_STRING:
478                         cinfo->ret.storage = ArgInIReg;
479                         cinfo->ret.reg = AMD64_RAX;
480                         break;
481                 case MONO_TYPE_U8:
482                 case MONO_TYPE_I8:
483                         cinfo->ret.storage = ArgInIReg;
484                         cinfo->ret.reg = AMD64_RAX;
485                         break;
486                 case MONO_TYPE_R4:
487                         cinfo->ret.storage = ArgInFloatSSEReg;
488                         cinfo->ret.reg = AMD64_XMM0;
489                         break;
490                 case MONO_TYPE_R8:
491                         cinfo->ret.storage = ArgInDoubleSSEReg;
492                         cinfo->ret.reg = AMD64_XMM0;
493                         break;
494                 case MONO_TYPE_VALUETYPE: {
495                         guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
496
497                         add_valuetype (sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
498                         if (cinfo->ret.storage == ArgOnStack)
499                                 /* The caller passes the address where the value is stored */
500                                 add_general (&gr, &stack_size, &cinfo->ret);
501                         break;
502                 }
503                 case MONO_TYPE_TYPEDBYREF:
504                         /* Same as a valuetype with size 24 */
505                         add_general (&gr, &stack_size, &cinfo->ret);
506                         ;
507                         break;
508                 case MONO_TYPE_VOID:
509                         break;
510                 default:
511                         g_error ("Can't handle as return value 0x%x", sig->ret->type);
512                 }
513         }
514
515         /* this */
516         if (sig->hasthis)
517                 add_general (&gr, &stack_size, cinfo->args + 0);
518
519         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
520                 gr = PARAM_REGS;
521                 fr = FLOAT_PARAM_REGS;
522                 
523                 /* Emit the signature cookie just before the implicit arguments */
524                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
525         }
526
527         for (i = 0; i < sig->param_count; ++i) {
528                 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
529                 MonoType *ptype;
530
531                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
532                         /* We allways pass the sig cookie on the stack for simplicity */
533                         /* 
534                          * Prevent implicit arguments + the sig cookie from being passed 
535                          * in registers.
536                          */
537                         gr = PARAM_REGS;
538                         fr = FLOAT_PARAM_REGS;
539
540                         /* Emit the signature cookie just before the implicit arguments */
541                         add_general (&gr, &stack_size, &cinfo->sig_cookie);
542                 }
543
544                 if (sig->params [i]->byref) {
545                         add_general (&gr, &stack_size, ainfo);
546                         continue;
547                 }
548                 ptype = mono_type_get_underlying_type (sig->params [i]);
549                 switch (ptype->type) {
550                 case MONO_TYPE_BOOLEAN:
551                 case MONO_TYPE_I1:
552                 case MONO_TYPE_U1:
553                         add_general (&gr, &stack_size, ainfo);
554                         break;
555                 case MONO_TYPE_I2:
556                 case MONO_TYPE_U2:
557                 case MONO_TYPE_CHAR:
558                         add_general (&gr, &stack_size, ainfo);
559                         break;
560                 case MONO_TYPE_I4:
561                 case MONO_TYPE_U4:
562                         add_general (&gr, &stack_size, ainfo);
563                         break;
564                 case MONO_TYPE_I:
565                 case MONO_TYPE_U:
566                 case MONO_TYPE_PTR:
567                 case MONO_TYPE_CLASS:
568                 case MONO_TYPE_OBJECT:
569                 case MONO_TYPE_STRING:
570                 case MONO_TYPE_SZARRAY:
571                 case MONO_TYPE_ARRAY:
572                         add_general (&gr, &stack_size, ainfo);
573                         break;
574                 case MONO_TYPE_VALUETYPE:
575                         add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
576                         break;
577                 case MONO_TYPE_TYPEDBYREF:
578                         stack_size += sizeof (MonoTypedRef);
579                         ainfo->storage = ArgOnStack;
580                         break;
581                 case MONO_TYPE_U8:
582                 case MONO_TYPE_I8:
583                         add_general (&gr, &stack_size, ainfo);
584                         break;
585                 case MONO_TYPE_R4:
586                         add_float (&fr, &stack_size, ainfo, FALSE);
587                         break;
588                 case MONO_TYPE_R8:
589                         add_float (&fr, &stack_size, ainfo, TRUE);
590                         break;
591                 default:
592                         g_assert_not_reached ();
593                 }
594         }
595
596         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
597                 gr = PARAM_REGS;
598                 fr = FLOAT_PARAM_REGS;
599                 
600                 /* Emit the signature cookie just before the implicit arguments */
601                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
602         }
603
604         if (stack_size & 0x8) {
605                 /* The AMD64 ABI requires each stack frame to be 16 byte aligned */
606                 cinfo->need_stack_align = TRUE;
607                 stack_size += 8;
608         }
609
610         cinfo->stack_usage = stack_size;
611         cinfo->reg_usage = gr;
612         cinfo->freg_usage = fr;
613         return cinfo;
614 }
615
616 /*
617  * mono_arch_get_argument_info:
618  * @csig:  a method signature
619  * @param_count: the number of parameters to consider
620  * @arg_info: an array to store the result infos
621  *
622  * Gathers information on parameters such as size, alignment and
623  * padding. arg_info should be large enought to hold param_count + 1 entries. 
624  *
625  * Returns the size of the argument area on the stack.
626  */
627 int
628 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
629 {
630         int k;
631         CallInfo *cinfo = get_call_info (csig, FALSE);
632         guint32 args_size = cinfo->stack_usage;
633
634         /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
635         if (csig->hasthis) {
636                 arg_info [0].offset = 0;
637         }
638
639         for (k = 0; k < param_count; k++) {
640                 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
641                 /* FIXME: */
642                 arg_info [k + 1].size = 0;
643         }
644
645         g_free (cinfo);
646
647         return args_size;
648 }
649
650 static int 
651 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
652 {
653         return 0;
654 }
655
656 /*
657  * Initialize the cpu to execute managed code.
658  */
659 void
660 mono_arch_cpu_init (void)
661 {
662         guint16 fpcw;
663
664         /* spec compliance requires running with double precision */
665         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
666         fpcw &= ~X86_FPCW_PRECC_MASK;
667         fpcw |= X86_FPCW_PREC_DOUBLE;
668         __asm__  __volatile__ ("fldcw %0\n": : "m" (fpcw));
669         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
670
671         mono_amd64_exceptions_init ();
672         mono_amd64_tramp_init ();
673 }
674
675 /*
676  * This function returns the optimizations supported on this cpu.
677  */
678 guint32
679 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
680 {
681         int eax, ebx, ecx, edx;
682         guint32 opts = 0;
683
684         /* FIXME: AMD64 */
685
686         *exclude_mask = 0;
687         /* Feature Flags function, flags returned in EDX. */
688         if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
689                 if (edx & (1 << 15)) {
690                         opts |= MONO_OPT_CMOV;
691                         if (edx & 1)
692                                 opts |= MONO_OPT_FCMOV;
693                         else
694                                 *exclude_mask |= MONO_OPT_FCMOV;
695                 } else
696                         *exclude_mask |= MONO_OPT_CMOV;
697         }
698         return opts;
699 }
700
701 gboolean
702 mono_amd64_is_sse2 (void)
703 {
704         return use_sse2;
705 }
706
707 static gboolean
708 is_regsize_var (MonoType *t) {
709         if (t->byref)
710                 return TRUE;
711         t = mono_type_get_underlying_type (t);
712         switch (t->type) {
713         case MONO_TYPE_I4:
714         case MONO_TYPE_U4:
715         case MONO_TYPE_I:
716         case MONO_TYPE_U:
717         case MONO_TYPE_PTR:
718                 return TRUE;
719         case MONO_TYPE_OBJECT:
720         case MONO_TYPE_STRING:
721         case MONO_TYPE_CLASS:
722         case MONO_TYPE_SZARRAY:
723         case MONO_TYPE_ARRAY:
724                 return TRUE;
725         case MONO_TYPE_VALUETYPE:
726                 return FALSE;
727         }
728         return FALSE;
729 }
730
731 GList *
732 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
733 {
734         GList *vars = NULL;
735         int i;
736
737         for (i = 0; i < cfg->num_varinfo; i++) {
738                 MonoInst *ins = cfg->varinfo [i];
739                 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
740
741                 /* unused vars */
742                 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
743                         continue;
744
745                 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || 
746                     (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
747                         continue;
748
749                 /* we dont allocate I1 to registers because there is no simply way to sign extend 
750                  * 8bit quantities in caller saved registers on x86 */
751                 if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) || 
752                     (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
753                     (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
754                         g_assert (MONO_VARINFO (cfg, i)->reg == -1);
755                         g_assert (i == vmv->idx);
756                         vars = g_list_prepend (vars, vmv);
757                 }
758         }
759
760         vars = mono_varlist_sort (cfg, vars, 0);
761
762         return vars;
763 }
764
765 GList *
766 mono_arch_get_global_int_regs (MonoCompile *cfg)
767 {
768         GList *regs = NULL;
769
770         /* We use the callee saved registers for global allocation */
771         regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
772         regs = g_list_prepend (regs, (gpointer)AMD64_R12);
773         regs = g_list_prepend (regs, (gpointer)AMD64_R13);
774         regs = g_list_prepend (regs, (gpointer)AMD64_R14);
775         regs = g_list_prepend (regs, (gpointer)AMD64_R15);
776
777         return regs;
778 }
779
780 /*
781  * mono_arch_regalloc_cost:
782  *
783  *  Return the cost, in number of memory references, of the action of 
784  * allocating the variable VMV into a register during global register
785  * allocation.
786  */
787 guint32
788 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
789 {
790         MonoInst *ins = cfg->varinfo [vmv->idx];
791
792         if (cfg->method->save_lmf)
793                 /* The register is already saved */
794                 /* substract 1 for the invisible store in the prolog */
795                 return (ins->opcode == OP_ARG) ? 0 : 1;
796         else
797                 /* push+pop */
798                 return (ins->opcode == OP_ARG) ? 1 : 2;
799 }
800  
801 void
802 mono_arch_allocate_vars (MonoCompile *m)
803 {
804         MonoMethodSignature *sig;
805         MonoMethodHeader *header;
806         MonoInst *inst;
807         int i, offset;
808         guint32 locals_stack_size, locals_stack_align;
809         gint32 *offsets;
810         CallInfo *cinfo;
811
812         header = mono_method_get_header (m->method);
813
814         sig = mono_method_signature (m->method);
815
816         cinfo = get_call_info (sig, FALSE);
817
818         /*
819          * We use the ABI calling conventions for managed code as well.
820          * Exception: valuetypes are never passed or returned in registers.
821          */
822
823         /* Locals are allocated backwards from %fp */
824         m->frame_reg = AMD64_RBP;
825         offset = 0;
826
827         /* Reserve space for caller saved registers */
828         for (i = 0; i < AMD64_NREG; ++i)
829                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (m->used_int_regs & (1 << i))) {
830                         offset += sizeof (gpointer);
831                 }
832
833         if (m->method->save_lmf) {
834                 /* Reserve stack space for saving LMF + argument regs */
835                 offset += sizeof (MonoLMF);
836                 if (lmf_tls_offset == -1)
837                         /* Need to save argument regs too */
838                         offset += (AMD64_NREG * 8) + (8 * 8);
839                 m->arch.lmf_offset = offset;
840         }
841
842         if (sig->ret->type != MONO_TYPE_VOID) {
843                 switch (cinfo->ret.storage) {
844                 case ArgInIReg:
845                 case ArgInFloatSSEReg:
846                 case ArgInDoubleSSEReg:
847                         if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
848                                 /* The register is volatile */
849                                 m->ret->opcode = OP_REGOFFSET;
850                                 m->ret->inst_basereg = AMD64_RBP;
851                                 offset += 8;
852                                 m->ret->inst_offset = - offset;
853                         }
854                         else {
855                                 m->ret->opcode = OP_REGVAR;
856                                 m->ret->inst_c0 = cinfo->ret.reg;
857                         }
858                         break;
859                 case ArgValuetypeInReg:
860                         /* Allocate a local to hold the result, the epilog will copy it to the correct place */
861                         offset += 16;
862                         m->ret->opcode = OP_REGOFFSET;
863                         m->ret->inst_basereg = AMD64_RBP;
864                         m->ret->inst_offset = - offset;
865                         break;
866                 default:
867                         g_assert_not_reached ();
868                 }
869                 m->ret->dreg = m->ret->inst_c0;
870         }
871
872         /* Allocate locals */
873         offsets = mono_allocate_stack_slots (m, &locals_stack_size, &locals_stack_align);
874         if (locals_stack_align) {
875                 offset += (locals_stack_align - 1);
876                 offset &= ~(locals_stack_align - 1);
877         }
878         for (i = m->locals_start; i < m->num_varinfo; i++) {
879                 if (offsets [i] != -1) {
880                         MonoInst *inst = m->varinfo [i];
881                         inst->opcode = OP_REGOFFSET;
882                         inst->inst_basereg = AMD64_RBP;
883                         inst->inst_offset = - (offset + offsets [i]);
884                         //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
885                 }
886         }
887         g_free (offsets);
888         offset += locals_stack_size;
889
890         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
891                 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
892                 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
893         }
894
895         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
896                 inst = m->varinfo [i];
897                 if (inst->opcode != OP_REGVAR) {
898                         ArgInfo *ainfo = &cinfo->args [i];
899                         gboolean inreg = TRUE;
900                         MonoType *arg_type;
901
902                         if (sig->hasthis && (i == 0))
903                                 arg_type = &mono_defaults.object_class->byval_arg;
904                         else
905                                 arg_type = sig->params [i - sig->hasthis];
906
907                         /* FIXME: Allocate volatile arguments to registers */
908                         if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
909                                 inreg = FALSE;
910
911                         /* 
912                          * Under AMD64, all registers used to pass arguments to functions
913                          * are volatile across calls.
914                          * FIXME: Optimize this.
915                          */
916                         if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg))
917                                 inreg = FALSE;
918
919                         inst->opcode = OP_REGOFFSET;
920
921                         switch (ainfo->storage) {
922                         case ArgInIReg:
923                         case ArgInFloatSSEReg:
924                         case ArgInDoubleSSEReg:
925                                 inst->opcode = OP_REGVAR;
926                                 inst->dreg = ainfo->reg;
927                                 break;
928                         case ArgOnStack:
929                                 inst->opcode = OP_REGOFFSET;
930                                 inst->inst_basereg = AMD64_RBP;
931                                 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
932                                 break;
933                         case ArgValuetypeInReg:
934                                 break;
935                         default:
936                                 NOT_IMPLEMENTED;
937                         }
938
939                         if (!inreg && (ainfo->storage != ArgOnStack)) {
940                                 inst->opcode = OP_REGOFFSET;
941                                 inst->inst_basereg = AMD64_RBP;
942                                 /* These arguments are saved to the stack in the prolog */
943                                 if (ainfo->storage == ArgValuetypeInReg)
944                                         offset += 2 * sizeof (gpointer);
945                                 else
946                                         offset += sizeof (gpointer);
947                                 inst->inst_offset = - offset;
948                         }
949                 }
950         }
951
952         m->stack_offset = offset;
953
954         g_free (cinfo);
955 }
956
957 void
958 mono_arch_create_vars (MonoCompile *cfg)
959 {
960         MonoMethodSignature *sig;
961         CallInfo *cinfo;
962
963         sig = mono_method_signature (cfg->method);
964
965         cinfo = get_call_info (sig, FALSE);
966
967         if (cinfo->ret.storage == ArgValuetypeInReg)
968                 cfg->ret_var_is_local = TRUE;
969
970         g_free (cinfo);
971 }
972
973 static void
974 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, MonoInst *arg, ArgStorage storage, int reg, MonoInst *tree)
975 {
976         switch (storage) {
977         case ArgInIReg:
978                 arg->opcode = OP_OUTARG_REG;
979                 arg->inst_left = tree;
980                 arg->inst_right = (MonoInst*)call;
981                 arg->unused = reg;
982                 call->used_iregs |= 1 << reg;
983                 break;
984         case ArgInFloatSSEReg:
985                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R4;
986                 arg->inst_left = tree;
987                 arg->inst_right = (MonoInst*)call;
988                 arg->unused = reg;
989                 call->used_fregs |= 1 << reg;
990                 break;
991         case ArgInDoubleSSEReg:
992                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R8;
993                 arg->inst_left = tree;
994                 arg->inst_right = (MonoInst*)call;
995                 arg->unused = reg;
996                 call->used_fregs |= 1 << reg;
997                 break;
998         default:
999                 g_assert_not_reached ();
1000         }
1001 }
1002
1003 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1004  * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info 
1005  */
1006
1007 static int
1008 arg_storage_to_ldind (ArgStorage storage)
1009 {
1010         switch (storage) {
1011         case ArgInIReg:
1012                 return CEE_LDIND_I;
1013         case ArgInDoubleSSEReg:
1014                 return CEE_LDIND_R8;
1015         case ArgInFloatSSEReg:
1016                 return CEE_LDIND_R4;
1017         default:
1018                 g_assert_not_reached ();
1019         }
1020
1021         return -1;
1022 }
1023
1024 /* 
1025  * take the arguments and generate the arch-specific
1026  * instructions to properly call the function in call.
1027  * This includes pushing, moving arguments to the right register
1028  * etc.
1029  * Issue: who does the spilling if needed, and when?
1030  */
1031 MonoCallInst*
1032 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1033         MonoInst *arg, *in;
1034         MonoMethodSignature *sig;
1035         int i, n, stack_size;
1036         CallInfo *cinfo;
1037         ArgInfo *ainfo;
1038
1039         stack_size = 0;
1040
1041         sig = call->signature;
1042         n = sig->param_count + sig->hasthis;
1043
1044         cinfo = get_call_info (sig, sig->pinvoke);
1045
1046         for (i = 0; i < n; ++i) {
1047                 ainfo = cinfo->args + i;
1048
1049                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1050                         MonoMethodSignature *tmp_sig;
1051                         
1052                         /* Emit the signature cookie just before the implicit arguments */
1053                         MonoInst *sig_arg;
1054                         /* FIXME: Add support for signature tokens to AOT */
1055                         cfg->disable_aot = TRUE;
1056
1057                         g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1058
1059                         /*
1060                          * mono_ArgIterator_Setup assumes the signature cookie is 
1061                          * passed first and all the arguments which were before it are
1062                          * passed on the stack after the signature. So compensate by 
1063                          * passing a different signature.
1064                          */
1065                         tmp_sig = mono_metadata_signature_dup (call->signature);
1066                         tmp_sig->param_count -= call->signature->sentinelpos;
1067                         tmp_sig->sentinelpos = 0;
1068                         memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1069
1070                         MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1071                         sig_arg->inst_p0 = tmp_sig;
1072
1073                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1074                         arg->inst_left = sig_arg;
1075                         arg->type = STACK_PTR;
1076
1077                         /* prepend, so they get reversed */
1078                         arg->next = call->out_args;
1079                         call->out_args = arg;
1080                 }
1081
1082                 if (is_virtual && i == 0) {
1083                         /* the argument will be attached to the call instruction */
1084                         in = call->args [i];
1085                 } else {
1086                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1087                         in = call->args [i];
1088                         arg->cil_code = in->cil_code;
1089                         arg->inst_left = in;
1090                         arg->type = in->type;
1091                         /* prepend, so they get reversed */
1092                         arg->next = call->out_args;
1093                         call->out_args = arg;
1094
1095                         if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1096                                 gint align;
1097                                 guint32 size;
1098
1099                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1100                                         size = sizeof (MonoTypedRef);
1101                                         align = sizeof (gpointer);
1102                                 }
1103                                 else
1104                                 if (sig->pinvoke)
1105                                         size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1106                                 else
1107                                         size = mono_type_stack_size (&in->klass->byval_arg, &align);
1108                                 if (ainfo->storage == ArgValuetypeInReg) {
1109                                         if (ainfo->pair_storage [1] == ArgNone) {
1110                                                 MonoInst *load;
1111
1112                                                 /* Simpler case */
1113
1114                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1115                                                 load->inst_left = in;
1116
1117                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1118                                         }
1119                                         else {
1120                                                 /* Trees can't be shared so make a copy */
1121                                                 MonoInst *vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1122                                                 MonoInst *load, *load2, *offset_ins;
1123
1124                                                 /* Reg1 */
1125                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1126                                                 load->ssa_op = MONO_SSA_LOAD;
1127                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1128
1129                                                 NEW_ICONST (cfg, offset_ins, 0);
1130                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1131                                                 load2->inst_left = load;
1132                                                 load2->inst_right = offset_ins;
1133
1134                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1135                                                 load->inst_left = load2;
1136
1137                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1138
1139                                                 /* Reg2 */
1140                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1141                                                 load->ssa_op = MONO_SSA_LOAD;
1142                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1143
1144                                                 NEW_ICONST (cfg, offset_ins, 8);
1145                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1146                                                 load2->inst_left = load;
1147                                                 load2->inst_right = offset_ins;
1148
1149                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [1]));
1150                                                 load->inst_left = load2;
1151
1152                                                 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1153                                                 arg->cil_code = in->cil_code;
1154                                                 arg->type = in->type;
1155                                                 /* prepend, so they get reversed */
1156                                                 arg->next = call->out_args;
1157                                                 call->out_args = arg;
1158
1159                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [1], ainfo->pair_regs [1], load);
1160
1161                                                 /* Prepend a copy inst */
1162                                                 MONO_INST_NEW (cfg, arg, CEE_STIND_I);
1163                                                 arg->cil_code = in->cil_code;
1164                                                 arg->ssa_op = MONO_SSA_STORE;
1165                                                 arg->inst_left = vtaddr;
1166                                                 arg->inst_right = in;
1167                                                 arg->type = in->type;
1168
1169                                                 /* prepend, so they get reversed */
1170                                                 arg->next = call->out_args;
1171                                                 call->out_args = arg;
1172                                         }
1173                                 }
1174                                 else {
1175                                         arg->opcode = OP_OUTARG_VT;
1176                                         arg->klass = in->klass;
1177                                         arg->unused = sig->pinvoke;
1178                                         arg->inst_imm = size;
1179                                 }
1180                         }
1181                         else {
1182                                 switch (ainfo->storage) {
1183                                 case ArgInIReg:
1184                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1185                                         break;
1186                                 case ArgInFloatSSEReg:
1187                                 case ArgInDoubleSSEReg:
1188                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1189                                         break;
1190                                 case ArgOnStack:
1191                                         arg->opcode = OP_OUTARG;
1192                                         if (!sig->params [i - sig->hasthis]->byref) {
1193                                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4)
1194                                                         arg->opcode = OP_OUTARG_R4;
1195                                                 else
1196                                                         if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)
1197                                                                 arg->opcode = OP_OUTARG_R8;
1198                                         }
1199                                         break;
1200                                 default:
1201                                         g_assert_not_reached ();
1202                                 }
1203                         }
1204                 }
1205         }
1206
1207         if (cinfo->need_stack_align) {
1208                 MONO_INST_NEW (cfg, arg, OP_AMD64_OUTARG_ALIGN_STACK);
1209                 /* prepend, so they get reversed */
1210                 arg->next = call->out_args;
1211                 call->out_args = arg;
1212         }
1213
1214         call->stack_usage = cinfo->stack_usage;
1215         cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1216         cfg->flags |= MONO_CFG_HAS_CALLS;
1217
1218         g_free (cinfo);
1219
1220         return call;
1221 }
1222
1223 #define EMIT_COND_BRANCH(ins,cond,sign) \
1224 if (ins->flags & MONO_INST_BRLABEL) { \
1225         if (ins->inst_i0->inst_c0) { \
1226                 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
1227         } else { \
1228                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1229                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1230                     x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
1231                         x86_branch8 (code, cond, 0, sign); \
1232                 else \
1233                         x86_branch32 (code, cond, 0, sign); \
1234         } \
1235 } else { \
1236         if (ins->inst_true_bb->native_offset) { \
1237                 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1238         } else { \
1239                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1240                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1241                     x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1242                         x86_branch8 (code, cond, 0, sign); \
1243                 else \
1244                         x86_branch32 (code, cond, 0, sign); \
1245         } \
1246 }
1247
1248 /* emit an exception if condition is fail */
1249 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name)            \
1250         do {                                                        \
1251                 mono_add_patch_info (cfg, code - cfg->native_code,   \
1252                                     MONO_PATCH_INFO_EXC, exc_name);  \
1253                 x86_branch32 (code, cond, 0, signed);               \
1254         } while (0); 
1255
1256 #define EMIT_FPCOMPARE(code) do { \
1257         amd64_fcompp (code); \
1258         amd64_fnstsw (code); \
1259 } while (0); 
1260
1261 static guint8*
1262 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
1263 {
1264         mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
1265
1266         if (mono_compile_aot) {
1267                 amd64_call_membase (code, AMD64_RIP, 0);
1268         }
1269         else {
1270                 gboolean near_call = FALSE;
1271
1272                 /*
1273                  * Indirect calls are expensive so try to make a near call if possible.
1274                  * The caller memory is allocated by the code manager so it is 
1275                  * guaranteed to be at a 32 bit offset.
1276                  */
1277
1278                 if (patch_type != MONO_PATCH_INFO_ABS) {
1279                         /* The target is in memory allocated using the code manager */
1280                         near_call = TRUE;
1281
1282                         if ((patch_type == MONO_PATCH_INFO_METHOD) || (patch_type == MONO_PATCH_INFO_METHOD_JUMP)) {
1283                                 if (((MonoMethod*)data)->klass->image->assembly->aot_module)
1284                                         /* The callee might be an AOT method */
1285                                         near_call = FALSE;
1286                         }
1287                 }
1288                 else {
1289                         if (mono_find_class_init_trampoline_by_addr (data))
1290                                 near_call = TRUE;
1291                         else {
1292                                 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (data);
1293                                 if (info) {
1294                                         if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && 
1295                                                 strstr (cfg->method->name, info->name)) {
1296                                                 /* A call to the wrapped function */
1297                                                 if ((((guint64)data) >> 32) == 0)
1298                                                         near_call = TRUE;
1299                                         }
1300                                         else
1301                                                 near_call = TRUE;
1302                                 }
1303                                 else if ((((guint64)data) >> 32) == 0)
1304                                         near_call = TRUE;
1305                         }
1306                 }
1307
1308                 if (near_call) {
1309                         amd64_call_code (code, 0);
1310                 }
1311                 else {
1312                         amd64_set_reg_template (code, GP_SCRATCH_REG);
1313                         amd64_call_reg (code, GP_SCRATCH_REG);
1314                 }
1315         }
1316
1317         return code;
1318 }
1319
1320 /* FIXME: Add more instructions */
1321 #define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG) || ((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_SETREG) || ((ins)->opcode == OP_ICONST) || ((ins)->opcode == OP_I8CONST) || ((ins)->opcode == OP_LOAD_MEMBASE))
1322
1323 static void
1324 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1325 {
1326         MonoInst *ins, *last_ins = NULL;
1327         ins = bb->code;
1328
1329         while (ins) {
1330
1331                 switch (ins->opcode) {
1332                 case OP_ICONST:
1333                 case OP_I8CONST:
1334                         /* reg = 0 -> XOR (reg, reg) */
1335                         /* XOR sets cflags on x86, so we cant do it always */
1336                         if (ins->inst_c0 == 0 && (ins->next && INST_IGNORES_CFLAGS (ins->next))) {
1337                                 ins->opcode = CEE_XOR;
1338                                 ins->sreg1 = ins->dreg;
1339                                 ins->sreg2 = ins->dreg;
1340                         }
1341                         break;
1342                 case OP_MUL_IMM: 
1343                         /* remove unnecessary multiplication with 1 */
1344                         if (ins->inst_imm == 1) {
1345                                 if (ins->dreg != ins->sreg1) {
1346                                         ins->opcode = OP_MOVE;
1347                                 } else {
1348                                         last_ins->next = ins->next;
1349                                         ins = ins->next;
1350                                         continue;
1351                                 }
1352                         }
1353                         break;
1354                 case OP_COMPARE_IMM:
1355                         /* OP_COMPARE_IMM (reg, 0) 
1356                          * --> 
1357                          * OP_AMD64_TEST_NULL (reg) 
1358                          */
1359                         if (!ins->inst_imm)
1360                                 ins->opcode = OP_AMD64_TEST_NULL;
1361                         break;
1362                 case OP_ICOMPARE_IMM:
1363                         if (!ins->inst_imm)
1364                                 ins->opcode = OP_X86_TEST_NULL;
1365                         break;
1366                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
1367                         /* 
1368                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1369                          * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1370                          * -->
1371                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1372                          * OP_COMPARE_IMM reg, imm
1373                          *
1374                          * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1375                          */
1376                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1377                             ins->inst_basereg == last_ins->inst_destbasereg &&
1378                             ins->inst_offset == last_ins->inst_offset) {
1379                                         ins->opcode = OP_ICOMPARE_IMM;
1380                                         ins->sreg1 = last_ins->sreg1;
1381
1382                                         /* check if we can remove cmp reg,0 with test null */
1383                                         if (!ins->inst_imm)
1384                                                 ins->opcode = OP_X86_TEST_NULL;
1385                                 }
1386
1387                         break;
1388                 case OP_LOAD_MEMBASE:
1389                 case OP_LOADI4_MEMBASE:
1390                         /* 
1391                          * Note: if reg1 = reg2 the load op is removed
1392                          *
1393                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1394                          * OP_LOAD_MEMBASE offset(basereg), reg2
1395                          * -->
1396                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1397                          * OP_MOVE reg1, reg2
1398                          */
1399                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG 
1400                                          || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1401                             ins->inst_basereg == last_ins->inst_destbasereg &&
1402                             ins->inst_offset == last_ins->inst_offset) {
1403                                 if (ins->dreg == last_ins->sreg1) {
1404                                         last_ins->next = ins->next;                             
1405                                         ins = ins->next;                                
1406                                         continue;
1407                                 } else {
1408                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1409                                         ins->opcode = OP_MOVE;
1410                                         ins->sreg1 = last_ins->sreg1;
1411                                 }
1412
1413                         /* 
1414                          * Note: reg1 must be different from the basereg in the second load
1415                          * Note: if reg1 = reg2 is equal then second load is removed
1416                          *
1417                          * OP_LOAD_MEMBASE offset(basereg), reg1
1418                          * OP_LOAD_MEMBASE offset(basereg), reg2
1419                          * -->
1420                          * OP_LOAD_MEMBASE offset(basereg), reg1
1421                          * OP_MOVE reg1, reg2
1422                          */
1423                         } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1424                                            || last_ins->opcode == OP_LOAD_MEMBASE) &&
1425                               ins->inst_basereg != last_ins->dreg &&
1426                               ins->inst_basereg == last_ins->inst_basereg &&
1427                               ins->inst_offset == last_ins->inst_offset) {
1428
1429                                 if (ins->dreg == last_ins->dreg) {
1430                                         last_ins->next = ins->next;                             
1431                                         ins = ins->next;                                
1432                                         continue;
1433                                 } else {
1434                                         ins->opcode = OP_MOVE;
1435                                         ins->sreg1 = last_ins->dreg;
1436                                 }
1437
1438                                 //g_assert_not_reached ();
1439
1440 #if 0
1441                         /* 
1442                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1443                          * OP_LOAD_MEMBASE offset(basereg), reg
1444                          * -->
1445                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1446                          * OP_ICONST reg, imm
1447                          */
1448                         } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1449                                                 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1450                                    ins->inst_basereg == last_ins->inst_destbasereg &&
1451                                    ins->inst_offset == last_ins->inst_offset) {
1452                                 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1453                                 ins->opcode = OP_ICONST;
1454                                 ins->inst_c0 = last_ins->inst_imm;
1455                                 g_assert_not_reached (); // check this rule
1456 #endif
1457                         }
1458                         break;
1459                 case OP_LOADU1_MEMBASE:
1460                 case OP_LOADI1_MEMBASE:
1461                         /* 
1462                          * Note: if reg1 = reg2 the load op is removed
1463                          *
1464                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1465                          * OP_LOAD_MEMBASE offset(basereg), reg2
1466                          * -->
1467                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1468                          * OP_MOVE reg1, reg2
1469                          */
1470                         if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1471                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1472                                         ins->inst_offset == last_ins->inst_offset) {
1473                                 if (ins->dreg == last_ins->sreg1) {
1474                                         last_ins->next = ins->next;                             
1475                                         ins = ins->next;                                
1476                                         continue;
1477                                 } else {
1478                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1479                                         ins->opcode = OP_MOVE;
1480                                         ins->sreg1 = last_ins->sreg1;
1481                                 }
1482                         }
1483                         break;
1484                 case OP_LOADU2_MEMBASE:
1485                 case OP_LOADI2_MEMBASE:
1486                         /* 
1487                          * Note: if reg1 = reg2 the load op is removed
1488                          *
1489                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1490                          * OP_LOAD_MEMBASE offset(basereg), reg2
1491                          * -->
1492                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1493                          * OP_MOVE reg1, reg2
1494                          */
1495                         if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1496                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1497                                         ins->inst_offset == last_ins->inst_offset) {
1498                                 if (ins->dreg == last_ins->sreg1) {
1499                                         last_ins->next = ins->next;                             
1500                                         ins = ins->next;                                
1501                                         continue;
1502                                 } else {
1503                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1504                                         ins->opcode = OP_MOVE;
1505                                         ins->sreg1 = last_ins->sreg1;
1506                                 }
1507                         }
1508                         break;
1509                 case CEE_CONV_I4:
1510                 case CEE_CONV_U4:
1511                 case OP_MOVE:
1512                         /*
1513                          * Removes:
1514                          *
1515                          * OP_MOVE reg, reg 
1516                          */
1517                         if (ins->dreg == ins->sreg1) {
1518                                 if (last_ins)
1519                                         last_ins->next = ins->next;                             
1520                                 ins = ins->next;
1521                                 continue;
1522                         }
1523                         /* 
1524                          * Removes:
1525                          *
1526                          * OP_MOVE sreg, dreg 
1527                          * OP_MOVE dreg, sreg
1528                          */
1529                         if (last_ins && last_ins->opcode == OP_MOVE &&
1530                             ins->sreg1 == last_ins->dreg &&
1531                             ins->dreg == last_ins->sreg1) {
1532                                 last_ins->next = ins->next;                             
1533                                 ins = ins->next;                                
1534                                 continue;
1535                         }
1536                         break;
1537                 }
1538                 last_ins = ins;
1539                 ins = ins->next;
1540         }
1541         bb->last_ins = last_ins;
1542 }
1543
1544 static const int 
1545 branch_cc_table [] = {
1546         X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1547         X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1548         X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1549 };
1550
1551 static int
1552 opcode_to_x86_cond (int opcode)
1553 {
1554         switch (opcode) {
1555         case OP_IBEQ:
1556                 return X86_CC_EQ;
1557         case OP_IBNE_UN:
1558                 return X86_CC_NE;
1559         case OP_IBLT:
1560                 return X86_CC_LT;
1561         case OP_IBLT_UN:
1562                 return X86_CC_LT;
1563         case OP_IBGT:
1564                 return X86_CC_GT;
1565         case OP_IBGT_UN:
1566                 return X86_CC_GT;
1567         case OP_IBGE:
1568                 return X86_CC_GE;
1569         case OP_IBGE_UN:
1570                 return X86_CC_GE;
1571         case OP_IBLE:
1572                 return X86_CC_LE;
1573         case OP_IBLE_UN:
1574                 return X86_CC_LE;
1575         case OP_COND_EXC_IOV:
1576                 return X86_CC_O;
1577         case OP_COND_EXC_IC:
1578                 return X86_CC_C;
1579         default:
1580                 g_assert_not_reached ();
1581         }
1582
1583         return -1;
1584 }
1585
1586 /*
1587  * returns the offset used by spillvar. It allocates a new
1588  * spill variable if necessary. 
1589  */
1590 static int
1591 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1592 {
1593         MonoSpillInfo **si, *info;
1594         int i = 0;
1595
1596         si = &cfg->spill_info; 
1597         
1598         while (i <= spillvar) {
1599
1600                 if (!*si) {
1601                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1602                         info->next = NULL;
1603                         cfg->stack_offset += sizeof (gpointer);
1604                         info->offset = - cfg->stack_offset;
1605                 }
1606
1607                 if (i == spillvar)
1608                         return (*si)->offset;
1609
1610                 i++;
1611                 si = &(*si)->next;
1612         }
1613
1614         g_assert_not_reached ();
1615         return 0;
1616 }
1617
1618 /*
1619  * returns the offset used by spillvar. It allocates a new
1620  * spill float variable if necessary. 
1621  * (same as mono_spillvar_offset but for float)
1622  */
1623 static int
1624 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1625 {
1626         MonoSpillInfo **si, *info;
1627         int i = 0;
1628
1629         si = &cfg->spill_info_float; 
1630         
1631         while (i <= spillvar) {
1632
1633                 if (!*si) {
1634                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1635                         info->next = NULL;
1636                         cfg->stack_offset += sizeof (double);
1637                         info->offset = - cfg->stack_offset;
1638                 }
1639
1640                 if (i == spillvar)
1641                         return (*si)->offset;
1642
1643                 i++;
1644                 si = &(*si)->next;
1645         }
1646
1647         g_assert_not_reached ();
1648         return 0;
1649 }
1650
1651 /*
1652  * Creates a store for spilled floating point items
1653  */
1654 static MonoInst*
1655 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1656 {
1657         MonoInst *store;
1658         MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
1659         store->sreg1 = reg;
1660         store->inst_destbasereg = AMD64_RBP;
1661         store->inst_offset = mono_spillvar_offset_float (cfg, spill);
1662
1663         DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)store->inst_offset, reg));
1664         return store;
1665 }
1666
1667 /*
1668  * Creates a load for spilled floating point items 
1669  */
1670 static MonoInst*
1671 create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1672 {
1673         MonoInst *load;
1674         MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
1675         load->dreg = reg;
1676         load->inst_basereg = AMD64_RBP;
1677         load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1678
1679         DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)load->inst_offset, reg));
1680         return load;
1681 }
1682
1683 #define is_global_ireg(r) ((r) >= 0 && (r) <= 15 && AMD64_IS_CALLEE_SAVED_REG ((r)))
1684 #define ireg_is_freeable(r) ((r) >= 0 && (r) <= 15 && AMD64_IS_CALLEE_REG ((r)))
1685 #define freg_is_freeable(r) ((r) >= 0 && (r) <= AMD64_XMM_NREG)
1686
1687 #define reg_is_freeable(r,fp) ((fp) ? freg_is_freeable ((r)) : ireg_is_freeable ((r)))
1688 #define reg_is_hard(r,fp) ((fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
1689 #define reg_is_soft(r,fp) (!reg_is_hard((r),(fp)))
1690 #define rassign(cfg,reg,fp) ((fp) ? (cfg)->rs->fassign [(reg)] : (cfg)->rs->iassign [(reg)])
1691 #define sreg1_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC1] == 'f')
1692 #define sreg2_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC2] == 'f')
1693 #define dreg_is_fp(ins)  (ins_spec [(ins)->opcode] [MONO_INST_DEST] == 'f')
1694
1695 typedef struct {
1696         int born_in;
1697         int killed_in;
1698         int last_use;
1699         int prev_use;
1700         int flags;              /* used to track fp spill/load */
1701 } RegTrack;
1702
1703 static const char*const * ins_spec = amd64_desc;
1704
1705 static void
1706 print_ins (int i, MonoInst *ins)
1707 {
1708         const char *spec = ins_spec [ins->opcode];
1709         g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1710         if (!spec)
1711                 g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
1712         if (spec [MONO_INST_DEST]) {
1713                 gboolean fp = (spec [MONO_INST_DEST] == 'f');
1714                 if (reg_is_soft (ins->dreg, fp))
1715                         g_print (" R%d <-", ins->dreg);
1716                 else
1717                         g_print (" %s <-", mono_amd64_regname (ins->dreg, fp));
1718         }
1719         if (spec [MONO_INST_SRC1]) {
1720                 gboolean fp = (spec [MONO_INST_SRC1] == 'f');
1721                 if (reg_is_soft (ins->sreg1, fp))
1722                         g_print (" R%d", ins->sreg1);
1723                 else
1724                         g_print (" %s", mono_amd64_regname (ins->sreg1, fp));
1725         }
1726         if (spec [MONO_INST_SRC2]) {
1727                 gboolean fp = (spec [MONO_INST_SRC2] == 'f');
1728                 if (reg_is_soft (ins->sreg2, fp))
1729                         g_print (" R%d", ins->sreg2);
1730                 else
1731                         g_print (" %s", mono_amd64_regname (ins->sreg2, fp));
1732         }
1733         if (spec [MONO_INST_CLOB])
1734                 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1735         g_print ("\n");
1736 }
1737
1738 static void
1739 print_regtrack (RegTrack *t, int num)
1740 {
1741         int i;
1742         char buf [32];
1743         const char *r;
1744         
1745         for (i = 0; i < num; ++i) {
1746                 if (!t [i].born_in)
1747                         continue;
1748                 if (i >= MONO_MAX_IREGS) {
1749                         g_snprintf (buf, sizeof(buf), "R%d", i);
1750                         r = buf;
1751                 } else
1752                         r = mono_arch_regname (i);
1753                 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1754         }
1755 }
1756
1757 typedef struct InstList InstList;
1758
1759 struct InstList {
1760         InstList *prev;
1761         InstList *next;
1762         MonoInst *data;
1763 };
1764
1765 static inline InstList*
1766 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1767 {
1768         InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1769         item->data = data;
1770         item->prev = NULL;
1771         item->next = list;
1772         if (list)
1773                 list->prev = item;
1774         return item;
1775 }
1776
1777 /*
1778  * Force the spilling of the variable in the symbolic register 'reg'.
1779  */
1780 static int
1781 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg, gboolean fp)
1782 {
1783         MonoInst *load;
1784         int i, sel, spill;
1785         int *assign, *symbolic;
1786
1787         if (fp) {
1788                 assign = cfg->rs->fassign;
1789                 symbolic = cfg->rs->fsymbolic;
1790         }
1791         else {
1792                 assign = cfg->rs->iassign;
1793                 symbolic = cfg->rs->isymbolic;
1794         }       
1795         
1796         sel = assign [reg];
1797         /*i = cfg->rs->isymbolic [sel];
1798         g_assert (i == reg);*/
1799         i = reg;
1800         spill = ++cfg->spill_count;
1801         assign [i] = -spill - 1;
1802         if (fp)
1803                 mono_regstate_free_float (cfg->rs, sel);
1804         else
1805                 mono_regstate_free_int (cfg->rs, sel);
1806         /* we need to create a spill var and insert a load to sel after the current instruction */
1807         if (fp)
1808                 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1809         else
1810                 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1811         load->dreg = sel;
1812         load->inst_basereg = AMD64_RBP;
1813         load->inst_offset = mono_spillvar_offset (cfg, spill);
1814         if (item->prev) {
1815                 while (ins->next != item->prev->data)
1816                         ins = ins->next;
1817         }
1818         load->next = ins->next;
1819         ins->next = load;
1820         DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1821         if (fp)
1822                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1823         else
1824                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1825         g_assert (i == sel);
1826
1827         return sel;
1828 }
1829
1830 static int
1831 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg, gboolean fp)
1832 {
1833         MonoInst *load;
1834         int i, sel, spill;
1835         int *assign, *symbolic;
1836
1837         if (fp) {
1838                 assign = cfg->rs->fassign;
1839                 symbolic = cfg->rs->fsymbolic;
1840         }
1841         else {
1842                 assign = cfg->rs->iassign;
1843                 symbolic = cfg->rs->isymbolic;
1844         }
1845
1846         DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1847         /* exclude the registers in the current instruction */
1848         if ((sreg1_is_fp (ins) == fp) && (reg != ins->sreg1) && (reg_is_freeable (ins->sreg1, fp) || (reg_is_soft (ins->sreg1, fp) && rassign (cfg, ins->sreg1, fp) >= 0))) {
1849                 if (reg_is_soft (ins->sreg1, fp))
1850                         regmask &= ~ (1 << rassign (cfg, ins->sreg1, fp));
1851                 else
1852                         regmask &= ~ (1 << ins->sreg1);
1853                 DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_amd64_regname (ins->sreg1, fp)));
1854         }
1855         if ((sreg2_is_fp (ins) == fp) && (reg != ins->sreg2) && (reg_is_freeable (ins->sreg2, fp) || (reg_is_soft (ins->sreg2, fp) && rassign (cfg, ins->sreg2, fp) >= 0))) {
1856                 if (reg_is_soft (ins->sreg2, fp))
1857                         regmask &= ~ (1 << rassign (cfg, ins->sreg2, fp));
1858                 else
1859                         regmask &= ~ (1 << ins->sreg2);
1860                 DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_amd64_regname (ins->sreg2, fp), ins->sreg2));
1861         }
1862         if ((dreg_is_fp (ins) == fp) && (reg != ins->dreg) && reg_is_freeable (ins->dreg, fp)) {
1863                 regmask &= ~ (1 << ins->dreg);
1864                 DEBUG (g_print ("\t\texcluding dreg %s\n", mono_amd64_regname (ins->dreg, fp)));
1865         }
1866
1867         DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
1868         g_assert (regmask); /* need at least a register we can free */
1869         sel = -1;
1870         /* we should track prev_use and spill the register that's farther */
1871         if (fp) {
1872                 for (i = 0; i < MONO_MAX_FREGS; ++i) {
1873                         if (regmask & (1 << i)) {
1874                                 sel = i;
1875                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_fregname (sel), cfg->rs->fassign [sel]));
1876                                 break;
1877                         }
1878                 }
1879
1880                 i = cfg->rs->fsymbolic [sel];
1881                 spill = ++cfg->spill_count;
1882                 cfg->rs->fassign [i] = -spill - 1;
1883                 mono_regstate_free_float (cfg->rs, sel);
1884         }
1885         else {
1886                 for (i = 0; i < MONO_MAX_IREGS; ++i) {
1887                         if (regmask & (1 << i)) {
1888                                 sel = i;
1889                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1890                                 break;
1891                         }
1892                 }
1893
1894                 i = cfg->rs->isymbolic [sel];
1895                 spill = ++cfg->spill_count;
1896                 cfg->rs->iassign [i] = -spill - 1;
1897                 mono_regstate_free_int (cfg->rs, sel);
1898         }
1899
1900         /* we need to create a spill var and insert a load to sel after the current instruction */
1901         MONO_INST_NEW (cfg, load, fp ? OP_LOADR8_MEMBASE : OP_LOAD_MEMBASE);
1902         load->dreg = sel;
1903         load->inst_basereg = AMD64_RBP;
1904         load->inst_offset = mono_spillvar_offset (cfg, spill);
1905         if (item->prev) {
1906                 while (ins->next != item->prev->data)
1907                         ins = ins->next;
1908         }
1909         load->next = ins->next;
1910         ins->next = load;
1911         DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1912         if (fp)
1913                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1914         else
1915                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1916         g_assert (i == sel);
1917         
1918         return sel;
1919 }
1920
1921 static MonoInst*
1922 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins, gboolean fp)
1923 {
1924         MonoInst *copy;
1925
1926         if (fp)
1927                 MONO_INST_NEW (cfg, copy, OP_FMOVE);
1928         else
1929                 MONO_INST_NEW (cfg, copy, OP_MOVE);
1930
1931         copy->dreg = dest;
1932         copy->sreg1 = src;
1933         if (ins) {
1934                 copy->next = ins->next;
1935                 ins->next = copy;
1936         }
1937         DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
1938         return copy;
1939 }
1940
1941 static MonoInst*
1942 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins, gboolean fp)
1943 {
1944         MonoInst *store;
1945         MONO_INST_NEW (cfg, store, fp ? OP_STORER8_MEMBASE_REG : OP_STORE_MEMBASE_REG);
1946         store->sreg1 = reg;
1947         store->inst_destbasereg = AMD64_RBP;
1948         store->inst_offset = mono_spillvar_offset (cfg, spill);
1949         if (ins) {
1950                 store->next = ins->next;
1951                 ins->next = store;
1952         }
1953         DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_amd64_regname (reg, fp)));
1954         return store;
1955 }
1956
1957 static void
1958 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
1959 {
1960         MonoInst *prev;
1961         if (item->next) {
1962                 prev = item->next->data;
1963
1964                 while (prev->next != ins)
1965                         prev = prev->next;
1966                 to_insert->next = ins;
1967                 prev->next = to_insert;
1968         } else {
1969                 to_insert->next = ins;
1970         }
1971         /* 
1972          * needed otherwise in the next instruction we can add an ins to the 
1973          * end and that would get past this instruction.
1974          */
1975         item->data = to_insert; 
1976 }
1977
1978 /* flags used in reginfo->flags */
1979 enum {
1980         MONO_X86_FP_NEEDS_LOAD_SPILL    = 1 << 0,
1981         MONO_X86_FP_NEEDS_SPILL                 = 1 << 1,
1982         MONO_X86_FP_NEEDS_LOAD                  = 1 << 2,
1983         MONO_X86_REG_NOT_ECX                    = 1 << 3,
1984         MONO_X86_REG_EAX                                = 1 << 4,
1985         MONO_X86_REG_EDX                                = 1 << 5,
1986         MONO_X86_REG_ECX                                = 1 << 6
1987 };
1988
1989 static int
1990 mono_amd64_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
1991 {
1992         int val;
1993         int test_mask = dest_mask;
1994
1995         if (flags & MONO_X86_REG_EAX)
1996                 test_mask &= (1 << AMD64_RAX);
1997         else if (flags & MONO_X86_REG_EDX)
1998                 test_mask &= (1 << AMD64_RDX);
1999         else if (flags & MONO_X86_REG_ECX)
2000                 test_mask &= (1 << AMD64_RCX);
2001         else if (flags & MONO_X86_REG_NOT_ECX)
2002                 test_mask &= ~ (1 << AMD64_RCX);
2003
2004         val = mono_regstate_alloc_int (cfg->rs, test_mask);
2005         if (val >= 0 && test_mask != dest_mask)
2006                 DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
2007
2008         if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
2009                 DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
2010                 val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << AMD64_RCX)));
2011         }
2012
2013         if (val < 0) {
2014                 val = mono_regstate_alloc_int (cfg->rs, dest_mask);
2015                 if (val < 0)
2016                         val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, FALSE);
2017         }
2018
2019         return val;
2020 }
2021
2022 static int
2023 mono_amd64_alloc_float_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg)
2024 {
2025         int val;
2026
2027         val = mono_regstate_alloc_float (cfg->rs, dest_mask);
2028
2029         if (val < 0) {
2030                 val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, TRUE);
2031         }
2032
2033         return val;
2034 }
2035
2036 static inline void
2037 assign_ireg (MonoRegState *rs, int reg, int hreg)
2038 {
2039         g_assert (reg >= MONO_MAX_IREGS);
2040         g_assert (hreg < MONO_MAX_IREGS);
2041         g_assert (! is_global_ireg (hreg));
2042
2043         rs->iassign [reg] = hreg;
2044         rs->isymbolic [hreg] = reg;
2045         rs->ifree_mask &= ~ (1 << hreg);
2046 }
2047
2048 /*#include "cprop.c"*/
2049
2050 /*
2051  * Local register allocation.
2052  * We first scan the list of instructions and we save the liveness info of
2053  * each register (when the register is first used, when it's value is set etc.).
2054  * We also reverse the list of instructions (in the InstList list) because assigning
2055  * registers backwards allows for more tricks to be used.
2056  */
2057 void
2058 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
2059 {
2060         MonoInst *ins;
2061         MonoRegState *rs = cfg->rs;
2062         int i, val, fpcount;
2063         RegTrack *reginfo, *reginfof;
2064         RegTrack *reginfo1, *reginfo2, *reginfod;
2065         InstList *tmp, *reversed = NULL;
2066         const char *spec;
2067         guint32 src1_mask, src2_mask, dest_mask;
2068         GList *fspill_list = NULL;
2069         int fspill = 0;
2070
2071         if (!bb->code)
2072                 return;
2073         rs->next_vireg = bb->max_ireg;
2074         rs->next_vfreg = bb->max_freg;
2075         mono_regstate_assign (rs);
2076         reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
2077         reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
2078         rs->ifree_mask = AMD64_CALLEE_REGS;
2079         rs->ffree_mask = AMD64_CALLEE_FREGS;
2080
2081         if (!use_sse2)
2082                 /* The fp stack is 6 entries deep */
2083                 rs->ffree_mask = 0x3f;
2084
2085         ins = bb->code;
2086
2087         /*if (cfg->opt & MONO_OPT_COPYPROP)
2088                 local_copy_prop (cfg, ins);*/
2089
2090         i = 1;
2091         fpcount = 0;
2092         DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
2093         /* forward pass on the instructions to collect register liveness info */
2094         while (ins) {
2095                 spec = ins_spec [ins->opcode];
2096                 
2097                 DEBUG (print_ins (i, ins));
2098
2099                 if (spec [MONO_INST_SRC1]) {
2100                         if (spec [MONO_INST_SRC1] == 'f') {
2101                                 reginfo1 = reginfof;
2102
2103                                 if (!use_sse2) {
2104                                         GList *spill;
2105
2106                                         spill = g_list_first (fspill_list);
2107                                         if (spill && fpcount < FPSTACK_SIZE) {
2108                                                 reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
2109                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2110                                         } else
2111                                                 fpcount--;
2112                                 }
2113                         }
2114                         else
2115                                 reginfo1 = reginfo;
2116                         reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
2117                         reginfo1 [ins->sreg1].last_use = i;
2118                         if (spec [MONO_INST_SRC1] == 'L') {
2119                                 /* The virtual register is allocated sequentially */
2120                                 reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
2121                                 reginfo1 [ins->sreg1 + 1].last_use = i;
2122                                 if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
2123                                         reginfo1 [ins->sreg1 + 1].born_in = i;
2124
2125                                 reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
2126                                 reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
2127                         }
2128                 } else {
2129                         ins->sreg1 = -1;
2130                 }
2131                 if (spec [MONO_INST_SRC2]) {
2132                         if (spec [MONO_INST_SRC2] == 'f') {
2133                                 reginfo2 = reginfof;
2134
2135                                 if (!use_sse2) {
2136                                         GList *spill;
2137
2138                                         spill = g_list_first (fspill_list);
2139                                         if (spill) {
2140                                                 reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
2141                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2142                                                 if (fpcount >= FPSTACK_SIZE) {
2143                                                         fspill++;
2144                                                         fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2145                                                         reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
2146                                                 }
2147                                         } else
2148                                                 fpcount--;
2149                                 }
2150                         }
2151                         else
2152                                 reginfo2 = reginfo;
2153                         reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
2154                         reginfo2 [ins->sreg2].last_use = i;
2155                         if (spec [MONO_INST_SRC2] == 'L') {
2156                                 /* The virtual register is allocated sequentially */
2157                                 reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
2158                                 reginfo2 [ins->sreg2 + 1].last_use = i;
2159                                 if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
2160                                         reginfo2 [ins->sreg2 + 1].born_in = i;
2161                         }
2162                         if (spec [MONO_INST_CLOB] == 's') {
2163                                 reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
2164                                 reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
2165                         }
2166                 } else {
2167                         ins->sreg2 = -1;
2168                 }
2169                 if (spec [MONO_INST_DEST]) {
2170                         if (spec [MONO_INST_DEST] == 'f') {
2171                                 reginfod = reginfof;
2172                                 if (!use_sse2 && (spec [MONO_INST_CLOB] != 'm')) {
2173                                         if (fpcount >= FPSTACK_SIZE) {
2174                                                 reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
2175                                                 fspill++;
2176                                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2177                                                 fpcount--;
2178                                         }
2179                                         fpcount++;
2180                                 }
2181                         }
2182                         else
2183                                 reginfod = reginfo;
2184                         if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
2185                                 reginfod [ins->dreg].killed_in = i;
2186                         reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
2187                         reginfod [ins->dreg].last_use = i;
2188                         if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
2189                                 reginfod [ins->dreg].born_in = i;
2190                         if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
2191                                 /* The virtual register is allocated sequentially */
2192                                 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
2193                                 reginfod [ins->dreg + 1].last_use = i;
2194                                 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
2195                                         reginfod [ins->dreg + 1].born_in = i;
2196
2197                                 reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
2198                                 reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
2199                         }
2200                 } else {
2201                         ins->dreg = -1;
2202                 }
2203
2204                 if (spec [MONO_INST_CLOB] == 'c') {
2205                         /* A call instruction implicitly uses all registers in call->out_ireg_args */
2206
2207                         MonoCallInst *call = (MonoCallInst*)ins;
2208                         GSList *list;
2209
2210                         list = call->out_ireg_args;
2211                         if (list) {
2212                                 while (list) {
2213                                         guint64 regpair;
2214                                         int reg, hreg;
2215
2216                                         regpair = (guint64) (list->data);
2217                                         hreg = regpair >> 32;
2218                                         reg = regpair & 0xffffffff;
2219
2220                                         reginfo [reg].prev_use = reginfo [reg].last_use;
2221                                         reginfo [reg].last_use = i;
2222
2223                                         list = g_slist_next (list);
2224                                 }
2225                         }
2226
2227                         list = call->out_freg_args;
2228                         if (use_sse2 && list) {
2229                                 while (list) {
2230                                         guint64 regpair;
2231                                         int reg, hreg;
2232
2233                                         regpair = (guint64) (list->data);
2234                                         hreg = regpair >> 32;
2235                                         reg = regpair & 0xffffffff;
2236
2237                                         reginfof [reg].prev_use = reginfof [reg].last_use;
2238                                         reginfof [reg].last_use = i;
2239
2240                                         list = g_slist_next (list);
2241                                 }
2242                         }
2243                 }
2244
2245                 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
2246                 ++i;
2247                 ins = ins->next;
2248         }
2249
2250         // todo: check if we have anything left on fp stack, in verify mode?
2251         fspill = 0;
2252
2253         DEBUG (print_regtrack (reginfo, rs->next_vireg));
2254         DEBUG (print_regtrack (reginfof, rs->next_vfreg));
2255         tmp = reversed;
2256         while (tmp) {
2257                 int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
2258                 dest_mask = src1_mask = src2_mask = AMD64_CALLEE_REGS;
2259                 --i;
2260                 ins = tmp->data;
2261                 spec = ins_spec [ins->opcode];
2262                 prev_dreg = -1;
2263                 clob_dreg = -1;
2264                 DEBUG (g_print ("processing:"));
2265                 DEBUG (print_ins (i, ins));
2266                 if (spec [MONO_INST_CLOB] == 's') {
2267                         /*
2268                          * Shift opcodes, SREG2 must be RCX
2269                          */
2270                         if (rs->ifree_mask & (1 << AMD64_RCX)) {
2271                                 if (ins->sreg2 < MONO_MAX_IREGS) {
2272                                         /* Argument already in hard reg, need to copy */
2273                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2274                                         insert_before_ins (ins, tmp, copy);
2275                                 }
2276                                 else {
2277                                         DEBUG (g_print ("\tshortcut assignment of R%d to ECX\n", ins->sreg2));
2278                                         assign_ireg (rs, ins->sreg2, AMD64_RCX);
2279                                 }
2280                         } else {
2281                                 int need_ecx_spill = TRUE;
2282                                 /* 
2283                                  * we first check if src1/dreg is already assigned a register
2284                                  * and then we force a spill of the var assigned to ECX.
2285                                  */
2286                                 /* the destination register can't be ECX */
2287                                 dest_mask &= ~ (1 << AMD64_RCX);
2288                                 src1_mask &= ~ (1 << AMD64_RCX);
2289                                 val = rs->iassign [ins->dreg];
2290                                 /* 
2291                                  * the destination register is already assigned to ECX:
2292                                  * we need to allocate another register for it and then
2293                                  * copy from this to ECX.
2294                                  */
2295                                 if (val == AMD64_RCX && ins->dreg != ins->sreg2) {
2296                                         int new_dest;
2297                                         new_dest = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2298                                         g_assert (new_dest >= 0);
2299                                         DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
2300
2301                                         rs->isymbolic [new_dest] = ins->dreg;
2302                                         rs->iassign [ins->dreg] = new_dest;
2303                                         clob_dreg = ins->dreg;
2304                                         ins->dreg = new_dest;
2305                                         create_copy_ins (cfg, AMD64_RCX, new_dest, ins, FALSE);
2306                                         need_ecx_spill = FALSE;
2307                                         /*DEBUG (g_print ("\tforced spill of R%d\n", ins->dreg));
2308                                         val = get_register_force_spilling (cfg, tmp, ins, ins->dreg);
2309                                         rs->iassign [ins->dreg] = val;
2310                                         rs->isymbolic [val] = prev_dreg;
2311                                         ins->dreg = val;*/
2312                                 }
2313                                 if (is_global_ireg (ins->sreg2)) {
2314                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2315                                         insert_before_ins (ins, tmp, copy);
2316                                 }
2317                                 else {
2318                                         val = rs->iassign [ins->sreg2];
2319                                         if (val >= 0 && val != AMD64_RCX) {
2320                                                 MonoInst *move = create_copy_ins (cfg, AMD64_RCX, val, NULL, FALSE);
2321                                                 DEBUG (g_print ("\tmoved arg from R%d (%d) to ECX\n", val, ins->sreg2));
2322                                                 move->next = ins;
2323                                                 g_assert_not_reached ();
2324                                                 /* FIXME: where is move connected to the instruction list? */
2325                                                 //tmp->prev->data->next = move;
2326                                         }
2327                                         else {
2328                                                 if (val == AMD64_RCX)
2329                                                 need_ecx_spill = FALSE;
2330                                         }
2331                                 }
2332                                 if (need_ecx_spill && !(rs->ifree_mask & (1 << AMD64_RCX))) {
2333                                         DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RCX]));
2334                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RCX], FALSE);
2335                                         mono_regstate_free_int (rs, AMD64_RCX);
2336                                 }
2337                                 if (!is_global_ireg (ins->sreg2))
2338                                         /* force-set sreg2 */
2339                                         assign_ireg (rs, ins->sreg2, AMD64_RCX);
2340                         }
2341                         ins->sreg2 = AMD64_RCX;
2342                 } else if (spec [MONO_INST_CLOB] == 'd') { 
2343                         /*
2344                          * DIVISION/REMAINER
2345                          */
2346                         int dest_reg = AMD64_RAX;
2347                         int clob_reg = AMD64_RDX;
2348                         if (spec [MONO_INST_DEST] == 'd') {
2349                                 dest_reg = AMD64_RDX; /* reminder */
2350                                 clob_reg = AMD64_RAX;
2351                         }
2352                         if (is_global_ireg (ins->dreg))
2353                                 val = ins->dreg;
2354                         else
2355                                 val = rs->iassign [ins->dreg];
2356                         if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
2357                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2358                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2359                                 mono_regstate_free_int (rs, dest_reg);
2360                         }
2361                         if (val < 0) {
2362                                 if (val < -1) {
2363                                         /* the register gets spilled after this inst */
2364                                         int spill = -val -1;
2365                                         dest_mask = 1 << clob_reg;
2366                                         prev_dreg = ins->dreg;
2367                                         val = mono_regstate_alloc_int (rs, dest_mask);
2368                                         if (val < 0)
2369                                                 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg, FALSE);
2370                                         rs->iassign [ins->dreg] = val;
2371                                         if (spill)
2372                                                 create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2373                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2374                                         rs->isymbolic [val] = prev_dreg;
2375                                         ins->dreg = val;
2376                                 } else {
2377                                         DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
2378                                         prev_dreg = ins->dreg;
2379                                         assign_ireg (rs, ins->dreg, dest_reg);
2380                                         ins->dreg = dest_reg;
2381                                         val = dest_reg;
2382                                 }
2383                         }
2384
2385                         //DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
2386                         if (val != dest_reg) { /* force a copy */
2387                                 create_copy_ins (cfg, val, dest_reg, ins, FALSE);
2388                                 if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
2389                                         DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2390                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2391                                         mono_regstate_free_int (rs, dest_reg);
2392                                 }
2393                         }
2394                         if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= MONO_MAX_IREGS)) {
2395                                 DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
2396                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg], FALSE);
2397                                 mono_regstate_free_int (rs, clob_reg);
2398                         }
2399                         src1_mask = 1 << AMD64_RAX;
2400                         src2_mask = 1 << AMD64_RCX;
2401                 }
2402                 if (spec [MONO_INST_DEST] == 'l') {
2403                         int hreg;
2404                         val = rs->iassign [ins->dreg];
2405                         /* check special case when dreg have been moved from ecx (clob shift) */
2406                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2407                                 hreg = clob_dreg + 1;
2408                         else
2409                                 hreg = ins->dreg + 1;
2410
2411                         /* base prev_dreg on fixed hreg, handle clob case */
2412                         val = hreg - 1;
2413
2414                         if (val != rs->isymbolic [AMD64_RAX] && !(rs->ifree_mask & (1 << AMD64_RAX))) {
2415                                 DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2416                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2417                                 mono_regstate_free_int (rs, AMD64_RAX);
2418                         }
2419                         if (hreg != rs->isymbolic [AMD64_RDX] && !(rs->ifree_mask & (1 << AMD64_RDX))) {
2420                                 DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [AMD64_RDX]));
2421                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RDX], FALSE);
2422                                 mono_regstate_free_int (rs, AMD64_RDX);
2423                         }
2424                 }
2425
2426                 /*
2427                  * TRACK DREG
2428                  */
2429                 if (spec [MONO_INST_DEST] == 'f') {
2430                         if (use_sse2) {
2431                                 /* Allocate an XMM reg the same way as an int reg */
2432                                 if (reg_is_soft (ins->dreg, TRUE)) {
2433                                         val = rs->fassign [ins->dreg];
2434                                         prev_dreg = ins->dreg;
2435                                         
2436                                         if (val < 0) {
2437                                                 int spill = 0;
2438                                                 if (val < -1) {
2439                                                         /* the register gets spilled after this inst */
2440                                                         spill = -val -1;
2441                                                 }
2442                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->dreg);
2443                                                 rs->fassign [ins->dreg] = val;
2444                                                 if (spill)
2445                                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, TRUE);
2446                                         }
2447                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_amd64_regname (val, TRUE), ins->dreg));
2448                                         rs->fsymbolic [val] = prev_dreg;
2449                                         ins->dreg = val;
2450                                 }
2451                         }
2452                         else if (spec [MONO_INST_CLOB] != 'm') {
2453                                 if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
2454                                         GList *spill_node;
2455                                         MonoInst *store;
2456                                         spill_node = g_list_first (fspill_list);
2457                                         g_assert (spill_node);
2458
2459                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
2460                                         insert_before_ins (ins, tmp, store);
2461                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2462                                         fspill--;
2463                                 }
2464                         }
2465                 } else if (spec [MONO_INST_DEST] == 'L') {
2466                         int hreg;
2467                         val = rs->iassign [ins->dreg];
2468                         /* check special case when dreg have been moved from ecx (clob shift) */
2469                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2470                                 hreg = clob_dreg + 1;
2471                         else
2472                                 hreg = ins->dreg + 1;
2473
2474                         /* base prev_dreg on fixed hreg, handle clob case */
2475                         prev_dreg = hreg - 1;
2476
2477                         if (val < 0) {
2478                                 int spill = 0;
2479                                 if (val < -1) {
2480                                         /* the register gets spilled after this inst */
2481                                         spill = -val -1;
2482                                 }
2483                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2484                                 rs->iassign [ins->dreg] = val;
2485                                 if (spill)
2486                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2487                         }
2488
2489                         DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
2490  
2491                         rs->isymbolic [val] = hreg - 1;
2492                         ins->dreg = val;
2493                         
2494                         val = rs->iassign [hreg];
2495                         if (val < 0) {
2496                                 int spill = 0;
2497                                 if (val < -1) {
2498                                         /* the register gets spilled after this inst */
2499                                         spill = -val -1;
2500                                 }
2501                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2502                                 rs->iassign [hreg] = val;
2503                                 if (spill)
2504                                         create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2505                         }
2506
2507                         DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
2508                         rs->isymbolic [val] = hreg;
2509                         /* save reg allocating into unused */
2510                         ins->unused = val;
2511
2512                         /* check if we can free our long reg */
2513                         if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2514                                 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
2515                                 mono_regstate_free_int (rs, val);
2516                         }
2517                 }
2518                 else if (ins->dreg >= MONO_MAX_IREGS) {
2519                         int hreg;
2520                         val = rs->iassign [ins->dreg];
2521                         if (spec [MONO_INST_DEST] == 'l') {
2522                                 /* check special case when dreg have been moved from ecx (clob shift) */
2523                                 if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2524                                         hreg = clob_dreg + 1;
2525                                 else
2526                                         hreg = ins->dreg + 1;
2527
2528                                 /* base prev_dreg on fixed hreg, handle clob case */
2529                                 prev_dreg = hreg - 1;
2530                         } else
2531                                 prev_dreg = ins->dreg;
2532
2533                         if (val < 0) {
2534                                 int spill = 0;
2535                                 if (val < -1) {
2536                                         /* the register gets spilled after this inst */
2537                                         spill = -val -1;
2538                                 }
2539                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2540                                 rs->iassign [ins->dreg] = val;
2541                                 if (spill)
2542                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2543                         }
2544                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2545                         rs->isymbolic [val] = prev_dreg;
2546                         ins->dreg = val;
2547                         /* handle cases where lreg needs to be eax:edx */
2548                         if (spec [MONO_INST_DEST] == 'l') {
2549                                 /* check special case when dreg have been moved from ecx (clob shift) */
2550                                 int hreg = prev_dreg + 1;
2551                                 val = rs->iassign [hreg];
2552                                 if (val < 0) {
2553                                         int spill = 0;
2554                                         if (val < -1) {
2555                                                 /* the register gets spilled after this inst */
2556                                                 spill = -val -1;
2557                                         }
2558                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2559                                         rs->iassign [hreg] = val;
2560                                         if (spill)
2561                                                 create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2562                                 }
2563                                 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
2564                                 rs->isymbolic [val] = hreg;
2565                                 if (ins->dreg == AMD64_RAX) {
2566                                         if (val != AMD64_RDX)
2567                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2568                                 } else if (ins->dreg == AMD64_RDX) {
2569                                         if (val == AMD64_RAX) {
2570                                                 /* swap */
2571                                                 g_assert_not_reached ();
2572                                         } else {
2573                                                 /* two forced copies */
2574                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2575                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2576                                         }
2577                                 } else {
2578                                         if (val == AMD64_RDX) {
2579                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2580                                         } else {
2581                                                 /* two forced copies */
2582                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2583                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2584                                         }
2585                                 }
2586                                 if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2587                                         DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
2588                                         mono_regstate_free_int (rs, val);
2589                                 }
2590                         } else if (spec [MONO_INST_DEST] == 'a' && ins->dreg != AMD64_RAX && spec [MONO_INST_CLOB] != 'd') {
2591                                 /* this instruction only outputs to EAX, need to copy */
2592                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2593                         } else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != AMD64_RDX && spec [MONO_INST_CLOB] != 'd') {
2594                                 create_copy_ins (cfg, ins->dreg, AMD64_RDX, ins, FALSE);
2595                         }
2596                 }
2597
2598                 if (use_sse2 && spec [MONO_INST_DEST] == 'f' && reg_is_freeable (ins->dreg, TRUE) && prev_dreg >= 0 && reginfof [prev_dreg].born_in >= i) {
2599                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_fregname (ins->dreg), prev_dreg, reginfof [prev_dreg].born_in));
2600                         mono_regstate_free_float (rs, ins->dreg);
2601                 }
2602                 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg, FALSE) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
2603                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2604                         mono_regstate_free_int (rs, ins->dreg);
2605                 }
2606
2607                 /* put src1 in EAX if it needs to be */
2608                 if (spec [MONO_INST_SRC1] == 'a') {
2609                         if (!(rs->ifree_mask & (1 << AMD64_RAX))) {
2610                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2611                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2612                                 mono_regstate_free_int (rs, AMD64_RAX);
2613                         }
2614                         if (ins->sreg1 < MONO_MAX_IREGS) {
2615                                 /* The argument is already in a hard reg, need to copy */
2616                                 MonoInst *copy = create_copy_ins (cfg, AMD64_RAX, ins->sreg1, NULL, FALSE);
2617                                 insert_before_ins (ins, tmp, copy);
2618                         }
2619                         else
2620                                 /* force-set sreg1 */
2621                                 assign_ireg (rs, ins->sreg1, AMD64_RAX);
2622                         ins->sreg1 = AMD64_RAX;
2623                 }
2624
2625                 /*
2626                  * TRACK SREG1
2627                  */
2628                 if (spec [MONO_INST_SRC1] == 'f') {
2629                         if (use_sse2) {
2630                                 if (reg_is_soft (ins->sreg1, TRUE)) {
2631                                         val = rs->fassign [ins->sreg1];
2632                                         prev_sreg1 = ins->sreg1;
2633                                         if (val < 0) {
2634                                                 int spill = 0;
2635                                                 if (val < -1) {
2636                                                         /* the register gets spilled after this inst */
2637                                                         spill = -val -1;
2638                                                 }
2639                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg1);
2640                                                 rs->fassign [ins->sreg1] = val;
2641                                                 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_fregname (val), ins->sreg1));
2642                                                 if (spill) {
2643                                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, TRUE);
2644                                                         insert_before_ins (ins, tmp, store);
2645                                                 }
2646                                         }
2647                                         rs->fsymbolic [val] = prev_sreg1;
2648                                         ins->sreg1 = val;
2649                                 } else {
2650                                         prev_sreg1 = -1;
2651                                 }
2652                         }
2653                         else
2654                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
2655                                 MonoInst *load;
2656                                 MonoInst *store = NULL;
2657
2658                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2659                                         GList *spill_node;
2660                                         spill_node = g_list_first (fspill_list);
2661                                         g_assert (spill_node);
2662
2663                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);          
2664                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2665                                 }
2666
2667                                 fspill++;
2668                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2669                                 load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
2670                                 insert_before_ins (ins, tmp, load);
2671                                 if (store) 
2672                                         insert_before_ins (load, tmp, store);
2673                         }
2674                 } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
2675                         /* force source to be same as dest */
2676                         rs->iassign [ins->sreg1] = ins->dreg;
2677                         rs->iassign [ins->sreg1 + 1] = ins->unused;
2678
2679                         DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
2680                         DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
2681
2682                         ins->sreg1 = ins->dreg;
2683                         /* 
2684                          * No need for saving the reg, we know that src1=dest in this cases
2685                          * ins->inst_c0 = ins->unused;
2686                          */
2687
2688                         /* make sure that we remove them from free mask */
2689                         rs->ifree_mask &= ~ (1 << ins->dreg);
2690                         rs->ifree_mask &= ~ (1 << ins->unused);
2691                 }
2692                 else if (ins->sreg1 >= MONO_MAX_IREGS) {
2693                         val = rs->iassign [ins->sreg1];
2694                         prev_sreg1 = ins->sreg1;
2695                         if (val < 0) {
2696                                 int spill = 0;
2697                                 if (val < -1) {
2698                                         /* the register gets spilled after this inst */
2699                                         spill = -val -1;
2700                                 }
2701                                 if (0 && (ins->opcode == OP_MOVE)) {
2702                                         /* 
2703                                          * small optimization: the dest register is already allocated
2704                                          * but the src one is not: we can simply assign the same register
2705                                          * here and peephole will get rid of the instruction later.
2706                                          * This optimization may interfere with the clobbering handling:
2707                                          * it removes a mov operation that will be added again to handle clobbering.
2708                                          * There are also some other issues that should with make testjit.
2709                                          */
2710                                         mono_regstate_alloc_int (rs, 1 << ins->dreg);
2711                                         val = rs->iassign [ins->sreg1] = ins->dreg;
2712                                         //g_assert (val >= 0);
2713                                         DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2714                                 } else {
2715                                         //g_assert (val == -1); /* source cannot be spilled */
2716                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
2717                                         rs->iassign [ins->sreg1] = val;
2718                                         DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2719                                 }
2720                                 if (spill) {
2721                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, FALSE);
2722                                         insert_before_ins (ins, tmp, store);
2723                                 }
2724                         }
2725                         rs->isymbolic [val] = prev_sreg1;
2726                         ins->sreg1 = val;
2727                 } else {
2728                         prev_sreg1 = -1;
2729                 }
2730
2731                 /* handle clobbering of sreg1 */
2732                 if (((spec [MONO_INST_DEST] == 'f' && spec [MONO_INST_SRC1] == 'f' && use_sse2) || spec [MONO_INST_CLOB] == '1' || spec [MONO_INST_CLOB] == 's') && ins->dreg != ins->sreg1) {
2733                         MonoInst *sreg2_copy = NULL;
2734                         MonoInst *copy;
2735                         gboolean fp = (spec [MONO_INST_SRC1] == 'f');
2736
2737                         if (ins->dreg == ins->sreg2) {
2738                                 /* 
2739                                  * copying sreg1 to dreg could clobber sreg2, so allocate a new
2740                                  * register for it.
2741                                  */
2742                                 int reg2 = 0;
2743
2744                                 if (fp)
2745                                         reg2 = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2746                                 else
2747                                         reg2 = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->sreg2, 0);
2748
2749                                 DEBUG (g_print ("\tneed to copy sreg2 %s to reg %s\n", mono_amd64_regname (ins->sreg2, fp), mono_amd64_regname (reg2, fp)));
2750                                 sreg2_copy = create_copy_ins (cfg, reg2, ins->sreg2, NULL, fp);
2751                                 prev_sreg2 = ins->sreg2 = reg2;
2752
2753                                 if (fp)
2754                                         mono_regstate_free_float (rs, reg2);
2755                                 else
2756                                         mono_regstate_free_int (rs, reg2);
2757                         }
2758
2759                         copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL, fp);
2760                         DEBUG (g_print ("\tneed to copy sreg1 %s to dreg %s\n", mono_amd64_regname (ins->sreg1, fp), mono_amd64_regname (ins->dreg, fp)));
2761                         insert_before_ins (ins, tmp, copy);
2762
2763                         if (sreg2_copy)
2764                                 insert_before_ins (copy, tmp, sreg2_copy);
2765
2766                         /*
2767                          * Need to prevent sreg2 to be allocated to sreg1, since that
2768                          * would screw up the previous copy.
2769                          */
2770                         src2_mask &= ~ (1 << ins->sreg1);
2771                         /* we set sreg1 to dest as well */
2772                         prev_sreg1 = ins->sreg1 = ins->dreg;
2773                         src2_mask &= ~ (1 << ins->dreg);
2774                 }
2775
2776                 /*
2777                  * TRACK SREG2
2778                  */
2779                 if (spec [MONO_INST_SRC2] == 'f') {
2780                         if (use_sse2) {
2781                                 if (reg_is_soft (ins->sreg2, TRUE)) {
2782                                         val = rs->fassign [ins->sreg2];
2783                                         prev_sreg2 = ins->sreg2;
2784                                         if (val < 0) {
2785                                                 int spill = 0;
2786                                                 if (val < -1) {
2787                                                         /* the register gets spilled after this inst */
2788                                                         spill = -val -1;
2789                                                 }
2790                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2791                                                 rs->fassign [ins->sreg2] = val;
2792                                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_fregname (val), ins->sreg2));
2793                                                 if (spill)
2794                                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, TRUE);
2795                                         }
2796                                         rs->fsymbolic [val] = prev_sreg2;
2797                                         ins->sreg2 = val;
2798                                 } else {
2799                                         prev_sreg2 = -1;
2800                                 }
2801                         }
2802                         else
2803                         if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
2804                                 MonoInst *load;
2805                                 MonoInst *store = NULL;
2806
2807                                 if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2808                                         GList *spill_node;
2809
2810                                         spill_node = g_list_first (fspill_list);
2811                                         g_assert (spill_node);
2812                                         if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
2813                                                 spill_node = g_list_next (spill_node);
2814         
2815                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
2816                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2817                                 } 
2818                                 
2819                                 fspill++;
2820                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2821                                 load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
2822                                 insert_before_ins (ins, tmp, load);
2823                                 if (store) 
2824                                         insert_before_ins (load, tmp, store);
2825                         }
2826                 } 
2827                 else if (ins->sreg2 >= MONO_MAX_IREGS) {
2828                         val = rs->iassign [ins->sreg2];
2829                         prev_sreg2 = ins->sreg2;
2830                         if (val < 0) {
2831                                 int spill = 0;
2832                                 if (val < -1) {
2833                                         /* the register gets spilled after this inst */
2834                                         spill = -val -1;
2835                                 }
2836                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
2837                                 rs->iassign [ins->sreg2] = val;
2838                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2839                                 if (spill)
2840                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, FALSE);
2841                         }
2842                         rs->isymbolic [val] = prev_sreg2;
2843                         ins->sreg2 = val;
2844                         if (spec [MONO_INST_CLOB] == 's' && ins->sreg2 != AMD64_RCX) {
2845                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d, but ECX is needed (R%d)\n", mono_arch_regname (val), ins->sreg2, rs->iassign [AMD64_RCX]));
2846                         }
2847                 } else {
2848                         prev_sreg2 = -1;
2849                 }
2850
2851                 if (spec [MONO_INST_CLOB] == 'c') {
2852                         int j, s;
2853                         MonoCallInst *call = (MonoCallInst*)ins;
2854                         GSList *list;
2855                         guint32 clob_mask = AMD64_CALLEE_REGS;
2856
2857                         for (j = 0; j < MONO_MAX_IREGS; ++j) {
2858                                 s = 1 << j;
2859                                 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2860                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [j], FALSE);
2861                                         mono_regstate_free_int (rs, j);
2862                                         //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2863                                 }
2864                         }
2865
2866                         if (use_sse2) {
2867                                 clob_mask = AMD64_CALLEE_FREGS;
2868
2869                                 for (j = 0; j < MONO_MAX_FREGS; ++j) {
2870                                         s = 1 << j;
2871                                         if ((clob_mask & s) && !(rs->ffree_mask & s) && j != ins->sreg1) {
2872                                                 get_register_force_spilling (cfg, tmp, ins, rs->fsymbolic [j], TRUE);
2873                                                 mono_regstate_free_float (rs, j);
2874                                                 //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2875                                         }
2876                                 }
2877                         }
2878
2879                         /* 
2880                          * Assign all registers in call->out_reg_args to the proper 
2881                          * argument registers.
2882                          */
2883
2884                         list = call->out_ireg_args;
2885                         if (list) {
2886                                 while (list) {
2887                                         guint64 regpair;
2888                                         int reg, hreg;
2889
2890                                         regpair = (guint64) (list->data);
2891                                         hreg = regpair >> 32;
2892                                         reg = regpair & 0xffffffff;
2893
2894                                         assign_ireg (rs, reg, hreg);
2895
2896                                         DEBUG (g_print ("\tassigned arg reg %s to R%d\n", mono_arch_regname (hreg), reg));
2897
2898                                         list = g_slist_next (list);
2899                                 }
2900                                 g_slist_free (call->out_ireg_args);
2901                         }
2902
2903                         list = call->out_freg_args;
2904                         if (list && use_sse2) {
2905                                 while (list) {
2906                                         guint64 regpair;
2907                                         int reg, hreg;
2908
2909                                         regpair = (guint64) (list->data);
2910                                         hreg = regpair >> 32;
2911                                         reg = regpair & 0xffffffff;
2912
2913                                         rs->fassign [reg] = hreg;
2914                                         rs->fsymbolic [hreg] = reg;
2915                                         rs->ffree_mask &= ~ (1 << hreg);
2916
2917                                         list = g_slist_next (list);
2918                                 }
2919                         }
2920                         if (call->out_freg_args)
2921                                 g_slist_free (call->out_freg_args);
2922                 }
2923
2924                 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2925                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2926                         mono_regstate_free_int (rs, ins->sreg1);
2927                 }
2928                 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2929                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2930                         mono_regstate_free_int (rs, ins->sreg2);
2931                 }*/
2932         
2933                 DEBUG (print_ins (i, ins));
2934                 /* this may result from a insert_before call */
2935                 if (!tmp->next)
2936                         bb->code = tmp->data;
2937                 tmp = tmp->next;
2938         }
2939
2940         g_free (reginfo);
2941         g_free (reginfof);
2942         g_list_free (fspill_list);
2943 }
2944
2945 static unsigned char*
2946 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2947 {
2948         if (use_sse2) {
2949                 amd64_sse_cvttsd2si_reg_reg (code, dreg, sreg);
2950         }
2951         else {
2952                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
2953                 x86_fnstcw_membase(code, AMD64_RSP, 0);
2954                 amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
2955                 amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
2956                 amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
2957                 amd64_fldcw_membase (code, AMD64_RSP, 2);
2958                 amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
2959                 amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
2960                 amd64_pop_reg (code, dreg);
2961                 amd64_fldcw_membase (code, AMD64_RSP, 0);
2962                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
2963         }
2964
2965         if (size == 1)
2966                 amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
2967         else if (size == 2)
2968                 amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
2969         return code;
2970 }
2971
2972 static unsigned char*
2973 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
2974 {
2975         int sreg = tree->sreg1;
2976 #ifdef PLATFORM_WIN32
2977         guint8* br[5];
2978
2979         NOT_IMPLEMENTED;
2980
2981         /*
2982          * Under Windows:
2983          * If requested stack size is larger than one page,
2984          * perform stack-touch operation
2985          */
2986         /*
2987          * Generate stack probe code.
2988          * Under Windows, it is necessary to allocate one page at a time,
2989          * "touching" stack after each successful sub-allocation. This is
2990          * because of the way stack growth is implemented - there is a
2991          * guard page before the lowest stack page that is currently commited.
2992          * Stack normally grows sequentially so OS traps access to the
2993          * guard page and commits more pages when needed.
2994          */
2995         amd64_test_reg_imm (code, sreg, ~0xFFF);
2996         br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
2997
2998         br[2] = code; /* loop */
2999         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
3000         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
3001         amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
3002         amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
3003         br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
3004         amd64_patch (br[3], br[2]);
3005         amd64_test_reg_reg (code, sreg, sreg);
3006         br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
3007         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
3008
3009         br[1] = code; x86_jump8 (code, 0);
3010
3011         amd64_patch (br[0], code);
3012         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
3013         amd64_patch (br[1], code);
3014         amd64_patch (br[4], code);
3015 #else /* PLATFORM_WIN32 */
3016         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
3017 #endif
3018         if (tree->flags & MONO_INST_INIT) {
3019                 int offset = 0;
3020                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
3021                         amd64_push_reg (code, AMD64_RAX);
3022                         offset += 8;
3023                 }
3024                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
3025                         amd64_push_reg (code, AMD64_RCX);
3026                         offset += 8;
3027                 }
3028                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
3029                         amd64_push_reg (code, AMD64_RDI);
3030                         offset += 8;
3031                 }
3032                 
3033                 amd64_shift_reg_imm (code, X86_SHR, sreg, 4);
3034                 if (sreg != AMD64_RCX)
3035                         amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
3036                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3037                                 
3038                 amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
3039                 amd64_cld (code);
3040                 amd64_prefix (code, X86_REP_PREFIX);
3041                 amd64_stosl (code);
3042                 
3043                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
3044                         amd64_pop_reg (code, AMD64_RDI);
3045                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
3046                         amd64_pop_reg (code, AMD64_RCX);
3047                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
3048                         amd64_pop_reg (code, AMD64_RAX);
3049         }
3050         return code;
3051 }
3052
3053 static guint8*
3054 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
3055 {
3056         CallInfo *cinfo;
3057         guint32 offset, quad;
3058
3059         /* Move return value to the target register */
3060         /* FIXME: do this in the local reg allocator */
3061         switch (ins->opcode) {
3062         case CEE_CALL:
3063         case OP_CALL_REG:
3064         case OP_CALL_MEMBASE:
3065         case OP_LCALL:
3066         case OP_LCALL_REG:
3067         case OP_LCALL_MEMBASE:
3068                 if (ins->dreg != AMD64_RAX)
3069                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, 8);
3070                 break;
3071         case OP_FCALL:
3072         case OP_FCALL_REG:
3073         case OP_FCALL_MEMBASE:
3074                 /* FIXME: optimize this */
3075                 offset = mono_spillvar_offset_float (cfg, 0);
3076                 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
3077                         if (use_sse2)
3078                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
3079                         else {
3080                                 amd64_movss_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
3081                                 amd64_fld_membase (code, AMD64_RBP, offset, FALSE);
3082                         }
3083                 }
3084                 else {
3085                         if (use_sse2) {
3086                                 if (ins->dreg != AMD64_XMM0)
3087                                         amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
3088                         }
3089                         else {
3090                                 amd64_movsd_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
3091                                 amd64_fld_membase (code, AMD64_RBP, offset, TRUE);
3092                         }
3093                 }
3094                 break;
3095         case OP_VCALL:
3096         case OP_VCALL_REG:
3097         case OP_VCALL_MEMBASE:
3098                 cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
3099                 if (cinfo->ret.storage == ArgValuetypeInReg) {
3100                         /* Pop the destination address from the stack */
3101                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3102                         amd64_pop_reg (code, AMD64_RCX);
3103                         
3104                         for (quad = 0; quad < 2; quad ++) {
3105                                 switch (cinfo->ret.pair_storage [quad]) {
3106                                 case ArgInIReg:
3107                                         amd64_mov_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad], 8);
3108                                         break;
3109                                 case ArgInFloatSSEReg:
3110                                         amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3111                                         break;
3112                                 case ArgInDoubleSSEReg:
3113                                         amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3114                                         break;
3115                                 case ArgNone:
3116                                         break;
3117                                 default:
3118                                         NOT_IMPLEMENTED;
3119                                 }
3120                         }
3121                 }
3122                 g_free (cinfo);
3123                 break;
3124         }
3125
3126         return code;
3127 }
3128
3129 /*
3130  * emit_load_volatile_arguments:
3131  *
3132  *  Load volatile arguments from the stack to the original input registers.
3133  * Required before a tail call.
3134  */
3135 static guint8*
3136 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3137 {
3138         MonoMethod *method = cfg->method;
3139         MonoMethodSignature *sig;
3140         MonoInst *inst;
3141         CallInfo *cinfo;
3142         guint32 i;
3143
3144         /* FIXME: Generate intermediate code instead */
3145
3146         sig = mono_method_signature (method);
3147
3148         cinfo = get_call_info (sig, FALSE);
3149         
3150         /* This is the opposite of the code in emit_prolog */
3151
3152         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3153                 ArgInfo *ainfo = cinfo->args + i;
3154                 MonoType *arg_type;
3155                 inst = cfg->varinfo [i];
3156
3157                 if (sig->hasthis && (i == 0))
3158                         arg_type = &mono_defaults.object_class->byval_arg;
3159                 else
3160                         arg_type = sig->params [i - sig->hasthis];
3161
3162                 if (inst->opcode != OP_REGVAR) {
3163                         switch (ainfo->storage) {
3164                         case ArgInIReg: {
3165                                 guint32 size = 8;
3166
3167                                 /* FIXME: I1 etc */
3168                                 amd64_mov_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset, size);
3169                                 break;
3170                         }
3171                         case ArgInFloatSSEReg:
3172                                 amd64_movss_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3173                                 break;
3174                         case ArgInDoubleSSEReg:
3175                                 amd64_movsd_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3176                                 break;
3177                         default:
3178                                 break;
3179                         }
3180                 }
3181         }
3182
3183         g_free (cinfo);
3184
3185         return code;
3186 }
3187
3188 #define REAL_PRINT_REG(text,reg) \
3189 mono_assert (reg >= 0); \
3190 amd64_push_reg (code, AMD64_RAX); \
3191 amd64_push_reg (code, AMD64_RDX); \
3192 amd64_push_reg (code, AMD64_RCX); \
3193 amd64_push_reg (code, reg); \
3194 amd64_push_imm (code, reg); \
3195 amd64_push_imm (code, text " %d %p\n"); \
3196 amd64_mov_reg_imm (code, AMD64_RAX, printf); \
3197 amd64_call_reg (code, AMD64_RAX); \
3198 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 3*4); \
3199 amd64_pop_reg (code, AMD64_RCX); \
3200 amd64_pop_reg (code, AMD64_RDX); \
3201 amd64_pop_reg (code, AMD64_RAX);
3202
3203 /* benchmark and set based on cpu */
3204 #define LOOP_ALIGNMENT 8
3205 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
3206
3207 void
3208 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3209 {
3210         MonoInst *ins;
3211         MonoCallInst *call;
3212         guint offset;
3213         guint8 *code = cfg->native_code + cfg->code_len;
3214         MonoInst *last_ins = NULL;
3215         guint last_offset = 0;
3216         int max_len, cpos;
3217
3218         if (cfg->opt & MONO_OPT_PEEPHOLE)
3219                 peephole_pass (cfg, bb);
3220
3221         if (cfg->opt & MONO_OPT_LOOP) {
3222                 int pad, align = LOOP_ALIGNMENT;
3223                 /* set alignment depending on cpu */
3224                 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
3225                         pad = align - pad;
3226                         /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
3227                         amd64_padding (code, pad);
3228                         cfg->code_len += pad;
3229                         bb->native_offset = cfg->code_len;
3230                 }
3231         }
3232
3233         if (cfg->verbose_level > 2)
3234                 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3235
3236         cpos = bb->max_offset;
3237
3238         if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3239                 MonoProfileCoverageInfo *cov = cfg->coverage_info;
3240                 g_assert (!mono_compile_aot);
3241                 cpos += 6;
3242
3243                 cov->data [bb->dfn].cil_code = bb->cil_code;
3244                 /* this is not thread save, but good enough */
3245                 amd64_inc_mem (code, (guint64)&cov->data [bb->dfn].count); 
3246         }
3247
3248         offset = code - cfg->native_code;
3249
3250         ins = bb->code;
3251         while (ins) {
3252                 offset = code - cfg->native_code;
3253
3254                 max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
3255
3256                 if (offset > (cfg->code_size - max_len - 16)) {
3257                         cfg->code_size *= 2;
3258                         cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3259                         code = cfg->native_code + offset;
3260                         mono_jit_stats.code_reallocs++;
3261                 }
3262
3263                 mono_debug_record_line_number (cfg, ins, offset);
3264
3265                 switch (ins->opcode) {
3266                 case OP_BIGMUL:
3267                         amd64_mul_reg (code, ins->sreg2, TRUE);
3268                         break;
3269                 case OP_BIGMUL_UN:
3270                         amd64_mul_reg (code, ins->sreg2, FALSE);
3271                         break;
3272                 case OP_X86_SETEQ_MEMBASE:
3273                         amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
3274                         break;
3275                 case OP_STOREI1_MEMBASE_IMM:
3276                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
3277                         break;
3278                 case OP_STOREI2_MEMBASE_IMM:
3279                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
3280                         break;
3281                 case OP_STOREI4_MEMBASE_IMM:
3282                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
3283                         break;
3284                 case OP_STOREI1_MEMBASE_REG:
3285                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
3286                         break;
3287                 case OP_STOREI2_MEMBASE_REG:
3288                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
3289                         break;
3290                 case OP_STORE_MEMBASE_REG:
3291                 case OP_STOREI8_MEMBASE_REG:
3292                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
3293                         break;
3294                 case OP_STOREI4_MEMBASE_REG:
3295                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
3296                         break;
3297                 case OP_STORE_MEMBASE_IMM:
3298                 case OP_STOREI8_MEMBASE_IMM:
3299                         if (amd64_is_imm32 (ins->inst_imm))
3300                                 amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
3301                         else {
3302                                 amd64_mov_reg_imm (code, GP_SCRATCH_REG, ins->inst_imm);
3303                                 amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, GP_SCRATCH_REG, 8);
3304                         }
3305                         break;
3306                 case CEE_LDIND_I:
3307                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, sizeof (gpointer));
3308                         break;
3309                 case CEE_LDIND_I4:
3310                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3311                         break;
3312                 case CEE_LDIND_U4:
3313                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3314                         break;
3315                 case OP_LOADU4_MEM:
3316                         amd64_mov_reg_imm (code, ins->dreg, ins->inst_p0);
3317                         amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
3318                         break;
3319                 case OP_LOAD_MEMBASE:
3320                 case OP_LOADI8_MEMBASE:
3321                         if (amd64_is_imm32 (ins->inst_offset)) {
3322                                 amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
3323                         }
3324                         else {
3325                                 amd64_mov_reg_imm_size (code, GP_SCRATCH_REG, ins->inst_offset, 8);
3326                                 amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, GP_SCRATCH_REG, 0, 8);
3327                         }
3328                         break;
3329                 case OP_LOADI4_MEMBASE:
3330                         amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3331                         break;
3332                 case OP_LOADU4_MEMBASE:
3333                         amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
3334                         break;
3335                 case OP_LOADU1_MEMBASE:
3336                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
3337                         break;
3338                 case OP_LOADI1_MEMBASE:
3339                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
3340                         break;
3341                 case OP_LOADU2_MEMBASE:
3342                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
3343                         break;
3344                 case OP_LOADI2_MEMBASE:
3345                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
3346                         break;
3347                 case CEE_CONV_I1:
3348                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3349                         break;
3350                 case CEE_CONV_I2:
3351                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3352                         break;
3353                 case CEE_CONV_U1:
3354                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
3355                         break;
3356                 case CEE_CONV_U2:
3357                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
3358                         break;
3359                 case CEE_CONV_U8:
3360                 case CEE_CONV_U:
3361                         /* Clean out the upper word */
3362                         amd64_mov_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
3363                         break;
3364                 case CEE_CONV_I8:
3365                 case CEE_CONV_I:
3366                         amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
3367                         break;                  
3368                 case OP_COMPARE:
3369                 case OP_LCOMPARE:
3370                         amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3371                         break;
3372                 case OP_COMPARE_IMM:
3373                         if (!amd64_is_imm32 (ins->inst_imm)) {
3374                                 amd64_mov_reg_imm (code, AMD64_R11, ins->inst_imm);
3375                                 amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, AMD64_R11);
3376                         } else {
3377                                 amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
3378                         }
3379                         break;
3380                 case OP_X86_COMPARE_REG_MEMBASE:
3381                         amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
3382                         break;
3383                 case OP_X86_TEST_NULL:
3384                         amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4);
3385                         break;
3386                 case OP_AMD64_TEST_NULL:
3387                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
3388                         break;
3389                 case OP_X86_ADD_MEMBASE_IMM:
3390                         /* FIXME: Make a 64 version too */
3391                         amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3392                         break;
3393                 case OP_X86_ADD_MEMBASE:
3394                         amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3395                         break;
3396                 case OP_X86_SUB_MEMBASE_IMM:
3397                         g_assert (amd64_is_imm32 (ins->inst_imm));
3398                         amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3399                         break;
3400                 case OP_X86_SUB_MEMBASE:
3401                         amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3402                         break;
3403                 case OP_X86_INC_MEMBASE:
3404                         amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3405                         break;
3406                 case OP_X86_INC_REG:
3407                         amd64_inc_reg_size (code, ins->dreg, 4);
3408                         break;
3409                 case OP_X86_DEC_MEMBASE:
3410                         amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3411                         break;
3412                 case OP_X86_DEC_REG:
3413                         amd64_dec_reg_size (code, ins->dreg, 4);
3414                         break;
3415                 case OP_X86_MUL_MEMBASE:
3416                         amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3417                         break;
3418                 case OP_AMD64_ICOMPARE_MEMBASE_REG:
3419                         amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
3420                         break;
3421                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
3422                         amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3423                         break;
3424                 case OP_AMD64_ICOMPARE_REG_MEMBASE:
3425                         amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3426                         break;
3427                 case CEE_BREAK:
3428                         amd64_breakpoint (code);
3429                         break;
3430
3431                 case OP_ADDCC:
3432                 case CEE_ADD:
3433                         amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
3434                         break;
3435                 case OP_ADC:
3436                         amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
3437                         break;
3438                 case OP_ADD_IMM:
3439                         g_assert (amd64_is_imm32 (ins->inst_imm));
3440                         amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
3441                         break;
3442                 case OP_ADC_IMM:
3443                         g_assert (amd64_is_imm32 (ins->inst_imm));
3444                         amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
3445                         break;
3446                 case OP_SUBCC:
3447                 case CEE_SUB:
3448                         amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
3449                         break;
3450                 case OP_SBB:
3451                         amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
3452                         break;
3453                 case OP_SUB_IMM:
3454                         g_assert (amd64_is_imm32 (ins->inst_imm));
3455                         amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
3456                         break;
3457                 case OP_SBB_IMM:
3458                         g_assert (amd64_is_imm32 (ins->inst_imm));
3459                         amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
3460                         break;
3461                 case CEE_AND:
3462                         amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
3463                         break;
3464                 case OP_AND_IMM:
3465                         g_assert (amd64_is_imm32 (ins->inst_imm));
3466                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
3467                         break;
3468                 case CEE_MUL:
3469                         amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
3470                         break;
3471                 case OP_MUL_IMM:
3472                         amd64_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
3473                         break;
3474                 case CEE_DIV:
3475                         amd64_cdq (code);
3476                         amd64_div_reg (code, ins->sreg2, TRUE);
3477                         break;
3478                 case CEE_DIV_UN:
3479                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3480                         amd64_div_reg (code, ins->sreg2, FALSE);
3481                         break;
3482                 case OP_DIV_IMM:
3483                         g_assert (amd64_is_imm32 (ins->inst_imm));
3484                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3485                         amd64_cdq (code);
3486                         amd64_div_reg (code, ins->sreg2, TRUE);
3487                         break;
3488                 case CEE_REM:
3489                         amd64_cdq (code);
3490                         amd64_div_reg (code, ins->sreg2, TRUE);
3491                         break;
3492                 case CEE_REM_UN:
3493                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3494                         amd64_div_reg (code, ins->sreg2, FALSE);
3495                         break;
3496                 case OP_REM_IMM:
3497                         g_assert (amd64_is_imm32 (ins->inst_imm));
3498                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3499                         amd64_cdq (code);
3500                         amd64_div_reg (code, ins->sreg2, TRUE);
3501                         break;
3502                 case CEE_OR:
3503                         amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
3504                         break;
3505                 case OP_OR_IMM
3506 :                       g_assert (amd64_is_imm32 (ins->inst_imm));
3507                         amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
3508                         break;
3509                 case CEE_XOR:
3510                         amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
3511                         break;
3512                 case OP_XOR_IMM:
3513                         g_assert (amd64_is_imm32 (ins->inst_imm));
3514                         amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
3515                         break;
3516                 case CEE_SHL:
3517                 case OP_LSHL:
3518                         g_assert (ins->sreg2 == AMD64_RCX);
3519                         amd64_shift_reg (code, X86_SHL, ins->dreg);
3520                         break;
3521                 case CEE_SHR:
3522                 case OP_LSHR:
3523                         g_assert (ins->sreg2 == AMD64_RCX);
3524                         amd64_shift_reg (code, X86_SAR, ins->dreg);
3525                         break;
3526                 case OP_SHR_IMM:
3527                         g_assert (amd64_is_imm32 (ins->inst_imm));
3528                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3529                         break;
3530                 case OP_LSHR_IMM:
3531                         g_assert (amd64_is_imm32 (ins->inst_imm));
3532                         amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
3533                         break;
3534                 case OP_SHR_UN_IMM:
3535                         g_assert (amd64_is_imm32 (ins->inst_imm));
3536                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3537                         break;
3538                 case OP_LSHR_UN_IMM:
3539                         g_assert (amd64_is_imm32 (ins->inst_imm));
3540                         amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
3541                         break;
3542                 case CEE_SHR_UN:
3543                         g_assert (ins->sreg2 == AMD64_RCX);
3544                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3545                         break;
3546                 case OP_LSHR_UN:
3547                         g_assert (ins->sreg2 == AMD64_RCX);
3548                         amd64_shift_reg (code, X86_SHR, ins->dreg);
3549                         break;
3550                 case OP_SHL_IMM:
3551                         g_assert (amd64_is_imm32 (ins->inst_imm));
3552                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3553                         break;
3554                 case OP_LSHL_IMM:
3555                         g_assert (amd64_is_imm32 (ins->inst_imm));
3556                         amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
3557                         break;
3558
3559                 case OP_IADDCC:
3560                 case OP_IADD:
3561                         amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4);
3562                         break;
3563                 case OP_IADC:
3564                         amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4);
3565                         break;
3566                 case OP_IADD_IMM:
3567                         amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4);
3568                         break;
3569                 case OP_IADC_IMM:
3570                         amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4);
3571                         break;
3572                 case OP_ISUBCC:
3573                 case OP_ISUB:
3574                         amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4);
3575                         break;
3576                 case OP_ISBB:
3577                         amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4);
3578                         break;
3579                 case OP_ISUB_IMM:
3580                         amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4);
3581                         break;
3582                 case OP_ISBB_IMM:
3583                         amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4);
3584                         break;
3585                 case OP_IAND:
3586                         amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4);
3587                         break;
3588                 case OP_IAND_IMM:
3589                         amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4);
3590                         break;
3591                 case OP_IOR:
3592                         amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4);
3593                         break;
3594                 case OP_IOR_IMM:
3595                         amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4);
3596                         break;
3597                 case OP_IXOR:
3598                         amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4);
3599                         break;
3600                 case OP_IXOR_IMM:
3601                         amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4);
3602                         break;
3603                 case OP_INEG:
3604                         amd64_neg_reg_size (code, ins->sreg1, 4);
3605                         break;
3606                 case OP_INOT:
3607                         amd64_not_reg_size (code, ins->sreg1, 4);
3608                         break;
3609                 case OP_ISHL:
3610                         g_assert (ins->sreg2 == AMD64_RCX);
3611                         amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4);
3612                         break;
3613                 case OP_ISHR:
3614                         g_assert (ins->sreg2 == AMD64_RCX);
3615                         amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4);
3616                         break;
3617                 case OP_ISHR_IMM:
3618                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3619                         break;
3620                 case OP_ISHR_UN_IMM:
3621                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3622                         break;
3623                 case OP_ISHR_UN:
3624                         g_assert (ins->sreg2 == AMD64_RCX);
3625                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3626                         break;
3627                 case OP_ISHL_IMM:
3628                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3629                         break;
3630                 case OP_IMUL:
3631                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3632                         break;
3633                 case OP_IMUL_IMM:
3634                         amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, 4);
3635                         break;
3636                 case OP_IMUL_OVF:
3637                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3638                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3639                         break;
3640                 case OP_IMUL_OVF_UN: {
3641                         /* the mul operation and the exception check should most likely be split */
3642                         int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
3643                         /*g_assert (ins->sreg2 == X86_EAX);
3644                         g_assert (ins->dreg == X86_EAX);*/
3645                         if (ins->sreg2 == X86_EAX) {
3646                                 non_eax_reg = ins->sreg1;
3647                         } else if (ins->sreg1 == X86_EAX) {
3648                                 non_eax_reg = ins->sreg2;
3649                         } else {
3650                                 /* no need to save since we're going to store to it anyway */
3651                                 if (ins->dreg != X86_EAX) {
3652                                         saved_eax = TRUE;
3653                                         amd64_push_reg (code, X86_EAX);
3654                                 }
3655                                 amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
3656                                 non_eax_reg = ins->sreg2;
3657                         }
3658                         if (ins->dreg == X86_EDX) {
3659                                 if (!saved_eax) {
3660                                         saved_eax = TRUE;
3661                                         amd64_push_reg (code, X86_EAX);
3662                                 }
3663                         } else if (ins->dreg != X86_EAX) {
3664                                 saved_edx = TRUE;
3665                                 amd64_push_reg (code, X86_EDX);
3666                         }
3667                         amd64_mul_reg_size (code, non_eax_reg, FALSE, 4);
3668                         /* save before the check since pop and mov don't change the flags */
3669                         if (ins->dreg != X86_EAX)
3670                                 amd64_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
3671                         if (saved_edx)
3672                                 amd64_pop_reg (code, X86_EDX);
3673                         if (saved_eax)
3674                                 amd64_pop_reg (code, X86_EAX);
3675                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3676                         break;
3677                 }
3678                 case OP_IDIV:
3679                         amd64_cdq_size (code, 4);
3680                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3681                         break;
3682                 case OP_IDIV_UN:
3683                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3684                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
3685                         break;
3686                 case OP_IDIV_IMM:
3687                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3688                         amd64_cdq_size (code, 4);
3689                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3690                         break;
3691                 case OP_IREM:
3692                         amd64_cdq_size (code, 4);
3693                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3694                         break;
3695                 case OP_IREM_UN:
3696                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3697                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
3698                         break;
3699                 case OP_IREM_IMM:
3700                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3701                         amd64_cdq_size (code, 4);
3702                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
3703                         break;
3704
3705                 case OP_ICOMPARE:
3706                         amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
3707                         break;
3708                 case OP_ICOMPARE_IMM:
3709                         amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
3710                         break;
3711
3712                 case OP_IBEQ:
3713                 case OP_IBLT:
3714                 case OP_IBGT:
3715                 case OP_IBGE:
3716                 case OP_IBLE:
3717                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), TRUE);
3718                         break;
3719                 case OP_IBNE_UN:
3720                 case OP_IBLT_UN:
3721                 case OP_IBGT_UN:
3722                 case OP_IBGE_UN:
3723                 case OP_IBLE_UN:
3724                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), FALSE);
3725                         break;
3726                 case OP_COND_EXC_IOV:
3727                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3728                                                                                 TRUE, ins->inst_p1);
3729                         break;
3730                 case OP_COND_EXC_IC:
3731                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3732                                                                                 FALSE, ins->inst_p1);
3733                         break;
3734                 case CEE_NOT:
3735                         amd64_not_reg (code, ins->sreg1);
3736                         break;
3737                 case CEE_NEG:
3738                         amd64_neg_reg (code, ins->sreg1);
3739                         break;
3740                 case OP_SEXT_I1:
3741                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3742                         break;
3743                 case OP_SEXT_I2:
3744                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3745                         break;
3746                 case OP_ICONST:
3747                 case OP_I8CONST:
3748                         if ((((guint64)ins->inst_c0) >> 32) == 0)
3749                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
3750                         else
3751                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
3752                         break;
3753                 case OP_AOTCONST:
3754                         mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3755                         amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, 8);
3756                         break;
3757                 case CEE_CONV_I4:
3758                 case CEE_CONV_U4:
3759                 case OP_MOVE:
3760                 case OP_SETREG:
3761                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
3762                         break;
3763                 case OP_AMD64_SET_XMMREG_R4: {
3764                         if (use_sse2) {
3765                                 amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
3766                         }
3767                         else {
3768                                 amd64_fst_membase (code, AMD64_RSP, -8, FALSE, TRUE);
3769                                 /* ins->dreg is set to -1 by the reg allocator */
3770                                 amd64_movss_reg_membase (code, ins->unused, AMD64_RSP, -8);
3771                         }
3772                         break;
3773                 }
3774                 case OP_AMD64_SET_XMMREG_R8: {
3775                         if (use_sse2) {
3776                                 if (ins->dreg != ins->sreg1)
3777                                         amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
3778                         }
3779                         else {
3780                                 amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE);
3781                                 /* ins->dreg is set to -1 by the reg allocator */
3782                                 amd64_movsd_reg_membase (code, ins->unused, AMD64_RSP, -8);
3783                         }
3784                         break;
3785                 }
3786                 case CEE_JMP: {
3787                         /*
3788                          * Note: this 'frame destruction' logic is useful for tail calls, too.
3789                          * Keep in sync with the code in emit_epilog.
3790                          */
3791                         int pos = 0, i;
3792
3793                         /* FIXME: no tracing support... */
3794                         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3795                                 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
3796
3797                         g_assert (!cfg->method->save_lmf);
3798
3799                         code = emit_load_volatile_arguments (cfg, code);
3800
3801                         for (i = 0; i < AMD64_NREG; ++i)
3802                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
3803                                         pos -= sizeof (gpointer);
3804                         
3805                         if (pos)
3806                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
3807
3808                         /* Pop registers in reverse order */
3809                         for (i = AMD64_NREG - 1; i > 0; --i)
3810                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
3811                                         amd64_pop_reg (code, i);
3812                                 }
3813
3814                         amd64_leave (code);
3815                         offset = code - cfg->native_code;
3816                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3817                         if (mono_compile_aot)
3818                                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
3819                         else
3820                                 amd64_set_reg_template (code, AMD64_R11);
3821                         amd64_jump_reg (code, AMD64_R11);
3822                         break;
3823                 }
3824                 case OP_CHECK_THIS:
3825                         /* ensure ins->sreg1 is not NULL */
3826                         amd64_alu_membase_imm (code, X86_CMP, ins->sreg1, 0, 0);
3827                         break;
3828                 case OP_ARGLIST: {
3829                         amd64_lea_membase (code, AMD64_R11, AMD64_RBP, cfg->sig_cookie);
3830                         amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
3831                         break;
3832                 }
3833                 case OP_FCALL:
3834                 case OP_LCALL:
3835                 case OP_VCALL:
3836                 case OP_VOIDCALL:
3837                 case CEE_CALL:
3838                         call = (MonoCallInst*)ins;
3839                         /*
3840                          * The AMD64 ABI forces callers to know about varargs.
3841                          */
3842                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke))
3843                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3844                         else if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (cfg->method->klass->image != mono_defaults.corlib)) {
3845                                 /* 
3846                                  * Since the unmanaged calling convention doesn't contain a 
3847                                  * 'vararg' entry, we have to treat every pinvoke call as a
3848                                  * potential vararg call.
3849                                  */
3850                                 guint32 nregs, i;
3851                                 nregs = 0;
3852                                 for (i = 0; i < AMD64_XMM_NREG; ++i)
3853                                         if (call->used_fregs & (1 << i))
3854                                                 nregs ++;
3855                                 if (!nregs)
3856                                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3857                                 else
3858                                         amd64_mov_reg_imm (code, AMD64_RAX, nregs);
3859                         }
3860
3861                         if (ins->flags & MONO_INST_HAS_METHOD)
3862                                 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
3863                         else
3864                                 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
3865                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3866                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3867                         code = emit_move_return_value (cfg, ins, code);
3868                         break;
3869                 case OP_FCALL_REG:
3870                 case OP_LCALL_REG:
3871                 case OP_VCALL_REG:
3872                 case OP_VOIDCALL_REG:
3873                 case OP_CALL_REG:
3874                         call = (MonoCallInst*)ins;
3875
3876                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3877                                 amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
3878                                 ins->sreg1 = AMD64_R11;
3879                         }
3880
3881                         /*
3882                          * The AMD64 ABI forces callers to know about varargs.
3883                          */
3884                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke)) {
3885                                 if (ins->sreg1 == AMD64_RAX) {
3886                                         amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
3887                                         ins->sreg1 = AMD64_R11;
3888                                 }
3889                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3890                         }
3891                         amd64_call_reg (code, ins->sreg1);
3892                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3893                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3894                         code = emit_move_return_value (cfg, ins, code);
3895                         break;
3896                 case OP_FCALL_MEMBASE:
3897                 case OP_LCALL_MEMBASE:
3898                 case OP_VCALL_MEMBASE:
3899                 case OP_VOIDCALL_MEMBASE:
3900                 case OP_CALL_MEMBASE:
3901                         call = (MonoCallInst*)ins;
3902
3903                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3904                                 /* 
3905                                  * Can't use R11 because it is clobbered by the trampoline 
3906                                  * code, and the reg value is needed by get_vcall_slot_addr.
3907                                  */
3908                                 amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
3909                                 ins->sreg1 = AMD64_RAX;
3910                         }
3911
3912                         amd64_call_membase (code, ins->sreg1, ins->inst_offset);
3913                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3914                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3915                         code = emit_move_return_value (cfg, ins, code);
3916                         break;
3917                 case OP_OUTARG:
3918                 case OP_X86_PUSH:
3919                         amd64_push_reg (code, ins->sreg1);
3920                         break;
3921                 case OP_X86_PUSH_IMM:
3922                         g_assert (amd64_is_imm32 (ins->inst_imm));
3923                         amd64_push_imm (code, ins->inst_imm);
3924                         break;
3925                 case OP_X86_PUSH_MEMBASE:
3926                         amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
3927                         break;
3928                 case OP_X86_PUSH_OBJ: 
3929                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ins->inst_imm);
3930                         amd64_push_reg (code, AMD64_RDI);
3931                         amd64_push_reg (code, AMD64_RSI);
3932                         amd64_push_reg (code, AMD64_RCX);
3933                         if (ins->inst_offset)
3934                                 amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset);
3935                         else
3936                                 amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8);
3937                         amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, 3 * 8);
3938                         amd64_mov_reg_imm (code, AMD64_RCX, (ins->inst_imm >> 3));
3939                         amd64_cld (code);
3940                         amd64_prefix (code, X86_REP_PREFIX);
3941                         amd64_movsd (code);
3942                         amd64_pop_reg (code, AMD64_RCX);
3943                         amd64_pop_reg (code, AMD64_RSI);
3944                         amd64_pop_reg (code, AMD64_RDI);
3945                         break;
3946                 case OP_X86_LEA:
3947                         amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
3948                         break;
3949                 case OP_X86_LEA_MEMBASE:
3950                         amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
3951                         break;
3952                 case OP_X86_XCHG:
3953                         amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
3954                         break;
3955                 case OP_LOCALLOC:
3956                         /* keep alignment */
3957                         amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
3958                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
3959                         code = mono_emit_stack_alloc (code, ins);
3960                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
3961                         break;
3962                 case CEE_RET:
3963                         amd64_ret (code);
3964                         break;
3965                 case CEE_THROW: {
3966                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
3967                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3968                                              (gpointer)"mono_arch_throw_exception");
3969                         break;
3970                 }
3971                 case OP_RETHROW: {
3972                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
3973                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3974                                              (gpointer)"mono_arch_rethrow_exception");
3975                         break;
3976                 }
3977                 case OP_CALL_HANDLER: 
3978                         /* Align stack */
3979                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
3980                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3981                         amd64_call_imm (code, 0);
3982                         /* Restore stack alignment */
3983                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3984                         break;
3985                 case OP_LABEL:
3986                         ins->inst_c0 = code - cfg->native_code;
3987                         break;
3988                 case CEE_BR:
3989                         //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3990                         //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3991                         //break;
3992                         if (ins->flags & MONO_INST_BRLABEL) {
3993                                 if (ins->inst_i0->inst_c0) {
3994                                         amd64_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3995                                 } else {
3996                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3997                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3998                                             x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
3999                                                 x86_jump8 (code, 0);
4000                                         else 
4001                                                 x86_jump32 (code, 0);
4002                                 }
4003                         } else {
4004                                 if (ins->inst_target_bb->native_offset) {
4005                                         amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); 
4006                                 } else {
4007                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4008                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
4009                                             x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
4010                                                 x86_jump8 (code, 0);
4011                                         else 
4012                                                 x86_jump32 (code, 0);
4013                                 } 
4014                         }
4015                         break;
4016                 case OP_BR_REG:
4017                         amd64_jump_reg (code, ins->sreg1);
4018                         break;
4019                 case OP_CEQ:
4020                 case OP_ICEQ:
4021                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4022                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4023                         break;
4024                 case OP_CLT:
4025                 case OP_ICLT:
4026                         amd64_set_reg (code, X86_CC_LT, ins->dreg, TRUE);
4027                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4028                         break;
4029                 case OP_CLT_UN:
4030                 case OP_ICLT_UN:
4031                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4032                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4033                         break;
4034                 case OP_CGT:
4035                 case OP_ICGT:
4036                         amd64_set_reg (code, X86_CC_GT, ins->dreg, TRUE);
4037                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4038                         break;
4039                 case OP_CGT_UN:
4040                 case OP_ICGT_UN:
4041                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4042                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4043                         break;
4044                 case OP_COND_EXC_EQ:
4045                 case OP_COND_EXC_NE_UN:
4046                 case OP_COND_EXC_LT:
4047                 case OP_COND_EXC_LT_UN:
4048                 case OP_COND_EXC_GT:
4049                 case OP_COND_EXC_GT_UN:
4050                 case OP_COND_EXC_GE:
4051                 case OP_COND_EXC_GE_UN:
4052                 case OP_COND_EXC_LE:
4053                 case OP_COND_EXC_LE_UN:
4054                 case OP_COND_EXC_OV:
4055                 case OP_COND_EXC_NO:
4056                 case OP_COND_EXC_C:
4057                 case OP_COND_EXC_NC:
4058                         EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], 
4059                                                     (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
4060                         break;
4061                 case CEE_BEQ:
4062                 case CEE_BNE_UN:
4063                 case CEE_BLT:
4064                 case CEE_BLT_UN:
4065                 case CEE_BGT:
4066                 case CEE_BGT_UN:
4067                 case CEE_BGE:
4068                 case CEE_BGE_UN:
4069                 case CEE_BLE:
4070                 case CEE_BLE_UN:
4071                         EMIT_COND_BRANCH (ins, branch_cc_table [ins->opcode - CEE_BEQ], (ins->opcode < CEE_BNE_UN));
4072                         break;
4073
4074                 /* floating point opcodes */
4075                 case OP_R8CONST: {
4076                         double d = *(double *)ins->inst_p0;
4077
4078                         if (use_sse2) {
4079                                 if ((d == 0.0) && (mono_signbit (d) == 0)) {
4080                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
4081                                 }
4082                                 else {
4083                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
4084                                         amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
4085                                 }
4086                         }
4087                         else if ((d == 0.0) && (mono_signbit (d) == 0)) {
4088                                 amd64_fldz (code);
4089                         } else if (d == 1.0) {
4090                                 x86_fld1 (code);
4091                         } else {
4092                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
4093                                 amd64_fld_membase (code, AMD64_RIP, 0, TRUE);
4094                         }
4095                         break;
4096                 }
4097                 case OP_R4CONST: {
4098                         float f = *(float *)ins->inst_p0;
4099
4100                         if (use_sse2) {
4101                                 if ((f == 0.0) && (mono_signbit (f) == 0)) {
4102                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
4103                                 }
4104                                 else {
4105                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
4106                                         amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0);
4107                                         amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
4108                                 }
4109                         }
4110                         else if ((f == 0.0) && (mono_signbit (f) == 0)) {
4111                                 amd64_fldz (code);
4112                         } else if (f == 1.0) {
4113                                 x86_fld1 (code);
4114                         } else {
4115                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
4116                                 amd64_fld_membase (code, AMD64_RIP, 0, FALSE);
4117                         }
4118                         break;
4119                 }
4120                 case OP_STORER8_MEMBASE_REG:
4121                         if (use_sse2)
4122                                 amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
4123                         else
4124                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
4125                         break;
4126                 case OP_LOADR8_SPILL_MEMBASE:
4127                         if (use_sse2)
4128                                 g_assert_not_reached ();
4129                         amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4130                         amd64_fxch (code, 1);
4131                         break;
4132                 case OP_LOADR8_MEMBASE:
4133                         if (use_sse2)
4134                                 amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4135                         else
4136                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4137                         break;
4138                 case OP_STORER4_MEMBASE_REG:
4139                         if (use_sse2) {
4140                                 /* This requires a double->single conversion */
4141                                 amd64_sse_cvtsd2ss_reg_reg (code, AMD64_XMM15, ins->sreg1);
4142                                 amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, AMD64_XMM15);
4143                         }
4144                         else
4145                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
4146                         break;
4147                 case OP_LOADR4_MEMBASE:
4148                         if (use_sse2) {
4149                                 amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4150                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
4151                         }
4152                         else
4153                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4154                         break;
4155                 case CEE_CONV_R4: /* FIXME: change precision */
4156                 case CEE_CONV_R8:
4157                         if (use_sse2)
4158                                 amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
4159                         else {
4160                                 amd64_push_reg (code, ins->sreg1);
4161                                 amd64_fild_membase (code, AMD64_RSP, 0, FALSE);
4162                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4163                         }
4164                         break;
4165                 case CEE_CONV_R_UN:
4166                         /* Emulated */
4167                         g_assert_not_reached ();
4168                         break;
4169                 case OP_LCONV_TO_R4: /* FIXME: change precision */
4170                 case OP_LCONV_TO_R8:
4171                         if (use_sse2)
4172                                 amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
4173                         else {
4174                                 amd64_push_reg (code, ins->sreg1);
4175                                 amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4176                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4177                         }
4178                         break;
4179                 case OP_X86_FP_LOAD_I8:
4180                         if (use_sse2)
4181                                 g_assert_not_reached ();
4182                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4183                         break;
4184                 case OP_X86_FP_LOAD_I4:
4185                         if (use_sse2)
4186                                 g_assert_not_reached ();
4187                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4188                         break;
4189                 case OP_FCONV_TO_I1:
4190                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4191                         break;
4192                 case OP_FCONV_TO_U1:
4193                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4194                         break;
4195                 case OP_FCONV_TO_I2:
4196                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4197                         break;
4198                 case OP_FCONV_TO_U2:
4199                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4200                         break;
4201                 case OP_FCONV_TO_I4:
4202                 case OP_FCONV_TO_I:
4203                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4204                         break;
4205                 case OP_FCONV_TO_I8:
4206                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
4207                         break;
4208                 case OP_LCONV_TO_R_UN: { 
4209                         static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
4210                         guint8 *br;
4211
4212                         if (use_sse2)
4213                                 g_assert_not_reached ();
4214
4215                         /* load 64bit integer to FP stack */
4216                         amd64_push_imm (code, 0);
4217                         amd64_push_reg (code, ins->sreg2);
4218                         amd64_push_reg (code, ins->sreg1);
4219                         amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4220                         /* store as 80bit FP value */
4221                         x86_fst80_membase (code, AMD64_RSP, 0);
4222                         
4223                         /* test if lreg is negative */
4224                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4225                         br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
4226         
4227                         /* add correction constant mn */
4228                         x86_fld80_mem (code, mn);
4229                         x86_fld80_membase (code, AMD64_RSP, 0);
4230                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4231                         x86_fst80_membase (code, AMD64_RSP, 0);
4232
4233                         amd64_patch (br, code);
4234
4235                         x86_fld80_membase (code, AMD64_RSP, 0);
4236                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 12);
4237
4238                         break;
4239                 }
4240                 case OP_LCONV_TO_OVF_I: {
4241                         guint8 *br [3], *label [1];
4242
4243                         if (use_sse2)
4244                                 g_assert_not_reached ();
4245
4246                         /* 
4247                          * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4248                          */
4249                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
4250
4251                         /* If the low word top bit is set, see if we are negative */
4252                         br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
4253                         /* We are not negative (no top bit set, check for our top word to be zero */
4254                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4255                         br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
4256                         label [0] = code;
4257
4258                         /* throw exception */
4259                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
4260                         x86_jump32 (code, 0);
4261         
4262                         amd64_patch (br [0], code);
4263                         /* our top bit is set, check that top word is 0xfffffff */
4264                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
4265                 
4266                         amd64_patch (br [1], code);
4267                         /* nope, emit exception */
4268                         br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
4269                         amd64_patch (br [2], label [0]);
4270
4271                         if (ins->dreg != ins->sreg1)
4272                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
4273                         break;
4274                 }
4275                 case CEE_CONV_OVF_U4:
4276                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0);
4277                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException");
4278                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4279                         break;
4280                 case CEE_CONV_OVF_I4_UN:
4281                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff);
4282                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException");
4283                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4284                         break;
4285                 case OP_FMOVE:
4286                         if (use_sse2 && (ins->dreg != ins->sreg1))
4287                                 amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
4288                         break;
4289                 case OP_FADD:
4290                         if (use_sse2)
4291                                 amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2);
4292                         else
4293                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4294                         break;
4295                 case OP_FSUB:
4296                         if (use_sse2)
4297                                 amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2);
4298                         else
4299                                 amd64_fp_op_reg (code, X86_FSUB, 1, TRUE);
4300                         break;          
4301                 case OP_FMUL:
4302                         if (use_sse2)
4303                                 amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2);
4304                         else
4305                                 amd64_fp_op_reg (code, X86_FMUL, 1, TRUE);
4306                         break;          
4307                 case OP_FDIV:
4308                         if (use_sse2)
4309                                 amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2);
4310                         else
4311                                 amd64_fp_op_reg (code, X86_FDIV, 1, TRUE);
4312                         break;          
4313                 case OP_FNEG:
4314                         if (use_sse2) {
4315                                 amd64_mov_reg_imm_size (code, AMD64_R11, 0x8000000000000000, 8);
4316                                 amd64_push_reg (code, AMD64_R11);
4317                                 amd64_push_reg (code, AMD64_R11);
4318                                 amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RSP, 0);
4319                         }
4320                         else
4321                                 amd64_fchs (code);
4322                         break;          
4323                 case OP_SIN:
4324                         if (use_sse2)
4325                                 g_assert_not_reached ();
4326                         amd64_fsin (code);
4327                         amd64_fldz (code);
4328                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4329                         break;          
4330                 case OP_COS:
4331                         if (use_sse2)
4332                                 g_assert_not_reached ();
4333                         amd64_fcos (code);
4334                         amd64_fldz (code);
4335                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4336                         break;          
4337                 case OP_ABS:
4338                         if (use_sse2)
4339                                 g_assert_not_reached ();
4340                         amd64_fabs (code);
4341                         break;          
4342                 case OP_TAN: {
4343                         /* 
4344                          * it really doesn't make sense to inline all this code,
4345                          * it's here just to show that things may not be as simple 
4346                          * as they appear.
4347                          */
4348                         guchar *check_pos, *end_tan, *pop_jump;
4349                         if (use_sse2)
4350                                 g_assert_not_reached ();
4351                         amd64_push_reg (code, AMD64_RAX);
4352                         amd64_fptan (code);
4353                         amd64_fnstsw (code);
4354                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4355                         check_pos = code;
4356                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4357                         amd64_fstp (code, 0); /* pop the 1.0 */
4358                         end_tan = code;
4359                         x86_jump8 (code, 0);
4360                         amd64_fldpi (code);
4361                         amd64_fp_op (code, X86_FADD, 0);
4362                         amd64_fxch (code, 1);
4363                         x86_fprem1 (code);
4364                         amd64_fstsw (code);
4365                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4366                         pop_jump = code;
4367                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4368                         amd64_fstp (code, 1);
4369                         amd64_fptan (code);
4370                         amd64_patch (pop_jump, code);
4371                         amd64_fstp (code, 0); /* pop the 1.0 */
4372                         amd64_patch (check_pos, code);
4373                         amd64_patch (end_tan, code);
4374                         amd64_fldz (code);
4375                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4376                         amd64_pop_reg (code, AMD64_RAX);
4377                         break;
4378                 }
4379                 case OP_ATAN:
4380                         if (use_sse2)
4381                                 g_assert_not_reached ();
4382                         x86_fld1 (code);
4383                         amd64_fpatan (code);
4384                         amd64_fldz (code);
4385                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4386                         break;          
4387                 case OP_SQRT:
4388                         if (use_sse2)
4389                                 g_assert_not_reached ();
4390                         amd64_fsqrt (code);
4391                         break;          
4392                 case OP_X86_FPOP:
4393                         if (!use_sse2)
4394                                 amd64_fstp (code, 0);
4395                         break;          
4396                 case OP_FREM: {
4397                         guint8 *l1, *l2;
4398
4399                         if (use_sse2)
4400                                 g_assert_not_reached ();
4401                         amd64_push_reg (code, AMD64_RAX);
4402                         /* we need to exchange ST(0) with ST(1) */
4403                         amd64_fxch (code, 1);
4404
4405                         /* this requires a loop, because fprem somtimes 
4406                          * returns a partial remainder */
4407                         l1 = code;
4408                         /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
4409                         /* x86_fprem1 (code); */
4410                         amd64_fprem (code);
4411                         amd64_fnstsw (code);
4412                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_C2);
4413                         l2 = code + 2;
4414                         x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
4415
4416                         /* pop result */
4417                         amd64_fstp (code, 1);
4418
4419                         amd64_pop_reg (code, AMD64_RAX);
4420                         break;
4421                 }
4422                 case OP_FCOMPARE:
4423                         if (use_sse2) {
4424                                 /* 
4425                                  * The two arguments are swapped because the fbranch instructions
4426                                  * depend on this for the non-sse case to work.
4427                                  */
4428                                 amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
4429                                 break;
4430                         }
4431                         if (cfg->opt & MONO_OPT_FCMOV) {
4432                                 amd64_fcomip (code, 1);
4433                                 amd64_fstp (code, 0);
4434                                 break;
4435                         }
4436                         /* this overwrites EAX */
4437                         EMIT_FPCOMPARE(code);
4438                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4439                         break;
4440                 case OP_FCEQ:
4441                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4442                                 /* zeroing the register at the start results in 
4443                                  * shorter and faster code (we can also remove the widening op)
4444                                  */
4445                                 guchar *unordered_check;
4446                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4447                                 
4448                                 if (use_sse2)
4449                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4450                                 else {
4451                                         amd64_fcomip (code, 1);
4452                                         amd64_fstp (code, 0);
4453                                 }
4454                                 unordered_check = code;
4455                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4456                                 amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
4457                                 amd64_patch (unordered_check, code);
4458                                 break;
4459                         }
4460                         if (ins->dreg != AMD64_RAX) 
4461                                 amd64_push_reg (code, AMD64_RAX);
4462
4463                         EMIT_FPCOMPARE(code);
4464                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4465                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4466                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4467                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4468
4469                         if (ins->dreg != AMD64_RAX) 
4470                                 amd64_pop_reg (code, AMD64_RAX);
4471                         break;
4472                 case OP_FCLT:
4473                 case OP_FCLT_UN:
4474                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4475                                 /* zeroing the register at the start results in 
4476                                  * shorter and faster code (we can also remove the widening op)
4477                                  */
4478                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4479                                 if (use_sse2)
4480                                         amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
4481                                 else {
4482                                         amd64_fcomip (code, 1);
4483                                         amd64_fstp (code, 0);
4484                                 }
4485                                 if (ins->opcode == OP_FCLT_UN) {
4486                                         guchar *unordered_check = code;
4487                                         guchar *jump_to_end;
4488                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4489                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4490                                         jump_to_end = code;
4491                                         x86_jump8 (code, 0);
4492                                         amd64_patch (unordered_check, code);
4493                                         amd64_inc_reg (code, ins->dreg);
4494                                         amd64_patch (jump_to_end, code);
4495                                 } else {
4496                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4497                                 }
4498                                 break;
4499                         }
4500                         if (ins->dreg != AMD64_RAX) 
4501                                 amd64_push_reg (code, AMD64_RAX);
4502
4503                         EMIT_FPCOMPARE(code);
4504                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4505                         if (ins->opcode == OP_FCLT_UN) {
4506                                 guchar *is_not_zero_check, *end_jump;
4507                                 is_not_zero_check = code;
4508                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4509                                 end_jump = code;
4510                                 x86_jump8 (code, 0);
4511                                 amd64_patch (is_not_zero_check, code);
4512                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4513
4514                                 amd64_patch (end_jump, code);
4515                         }
4516                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4517                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4518
4519                         if (ins->dreg != AMD64_RAX) 
4520                                 amd64_pop_reg (code, AMD64_RAX);
4521                         break;
4522                 case OP_FCGT:
4523                 case OP_FCGT_UN:
4524                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4525                                 /* zeroing the register at the start results in 
4526                                  * shorter and faster code (we can also remove the widening op)
4527                                  */
4528                                 guchar *unordered_check;
4529                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4530                                 if (use_sse2)
4531                                         amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
4532                                 else {
4533                                         amd64_fcomip (code, 1);
4534                                         amd64_fstp (code, 0);
4535                                 }
4536                                 if (ins->opcode == OP_FCGT) {
4537                                         unordered_check = code;
4538                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4539                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4540                                         amd64_patch (unordered_check, code);
4541                                 } else {
4542                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4543                                 }
4544                                 break;
4545                         }
4546                         if (ins->dreg != AMD64_RAX) 
4547                                 amd64_push_reg (code, AMD64_RAX);
4548
4549                         EMIT_FPCOMPARE(code);
4550                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4551                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4552                         if (ins->opcode == OP_FCGT_UN) {
4553                                 guchar *is_not_zero_check, *end_jump;
4554                                 is_not_zero_check = code;
4555                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4556                                 end_jump = code;
4557                                 x86_jump8 (code, 0);
4558                                 amd64_patch (is_not_zero_check, code);
4559                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4560
4561                                 amd64_patch (end_jump, code);
4562                         }
4563                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4564                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4565
4566                         if (ins->dreg != AMD64_RAX) 
4567                                 amd64_pop_reg (code, AMD64_RAX);
4568                         break;
4569                 case OP_FCLT_MEMBASE:
4570                 case OP_FCGT_MEMBASE:
4571                 case OP_FCLT_UN_MEMBASE:
4572                 case OP_FCGT_UN_MEMBASE:
4573                 case OP_FCEQ_MEMBASE: {
4574                         guchar *unordered_check, *jump_to_end;
4575                         int x86_cond;
4576                         g_assert (use_sse2);
4577
4578                         amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4579                         amd64_sse_comisd_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
4580
4581                         switch (ins->opcode) {
4582                         case OP_FCEQ_MEMBASE:
4583                                 x86_cond = X86_CC_EQ;
4584                                 break;
4585                         case OP_FCLT_MEMBASE:
4586                         case OP_FCLT_UN_MEMBASE:
4587                                 x86_cond = X86_CC_LT;
4588                                 break;
4589                         case OP_FCGT_MEMBASE:
4590                         case OP_FCGT_UN_MEMBASE:
4591                                 x86_cond = X86_CC_GT;
4592                                 break;
4593                         default:
4594                                 g_assert_not_reached ();
4595                         }
4596
4597                         unordered_check = code;
4598                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4599                         amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
4600
4601                         switch (ins->opcode) {
4602                         case OP_FCEQ_MEMBASE:
4603                         case OP_FCLT_MEMBASE:
4604                         case OP_FCGT_MEMBASE:
4605                                 amd64_patch (unordered_check, code);
4606                                 break;
4607                         case OP_FCLT_UN_MEMBASE:
4608                         case OP_FCGT_UN_MEMBASE:
4609                                 jump_to_end = code;
4610                                 x86_jump8 (code, 0);
4611                                 amd64_patch (unordered_check, code);
4612                                 amd64_inc_reg (code, ins->dreg);
4613                                 amd64_patch (jump_to_end, code);
4614                                 break;
4615                         default:
4616                                 break;
4617                         }
4618                         break;
4619                 }
4620                 case OP_FBEQ:
4621                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4622                                 guchar *jump = code;
4623                                 x86_branch8 (code, X86_CC_P, 0, TRUE);
4624                                 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4625                                 amd64_patch (jump, code);
4626                                 break;
4627                         }
4628                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4629                         EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
4630                         break;
4631                 case OP_FBNE_UN:
4632                         /* Branch if C013 != 100 */
4633                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4634                                 /* branch if !ZF or (PF|CF) */
4635                                 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4636                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4637                                 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
4638                                 break;
4639                         }
4640                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4641                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4642                         break;
4643                 case OP_FBLT:
4644                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4645                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4646                                 break;
4647                         }
4648                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4649                         break;
4650                 case OP_FBLT_UN:
4651                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4652                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4653                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4654                                 break;
4655                         }
4656                         if (ins->opcode == OP_FBLT_UN) {
4657                                 guchar *is_not_zero_check, *end_jump;
4658                                 is_not_zero_check = code;
4659                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4660                                 end_jump = code;
4661                                 x86_jump8 (code, 0);
4662                                 amd64_patch (is_not_zero_check, code);
4663                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4664
4665                                 amd64_patch (end_jump, code);
4666                         }
4667                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4668                         break;
4669                 case OP_FBGT:
4670                 case OP_FBGT_UN:
4671                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4672                                 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
4673                                 break;
4674                         }
4675                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4676                         if (ins->opcode == OP_FBGT_UN) {
4677                                 guchar *is_not_zero_check, *end_jump;
4678                                 is_not_zero_check = code;
4679                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4680                                 end_jump = code;
4681                                 x86_jump8 (code, 0);
4682                                 amd64_patch (is_not_zero_check, code);
4683                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4684
4685                                 amd64_patch (end_jump, code);
4686                         }
4687                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4688                         break;
4689                 case OP_FBGE:
4690                         /* Branch if C013 == 100 or 001 */
4691                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4692                                 guchar *br1;
4693
4694                                 /* skip branch if C1=1 */
4695                                 br1 = code;
4696                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4697                                 /* branch if (C0 | C3) = 1 */
4698                                 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
4699                                 amd64_patch (br1, code);
4700                                 break;
4701                         }
4702                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4703                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4704                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4705                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4706                         break;
4707                 case OP_FBGE_UN:
4708                         /* Branch if C013 == 000 */
4709                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4710                                 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
4711                                 break;
4712                         }
4713                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4714                         break;
4715                 case OP_FBLE:
4716                         /* Branch if C013=000 or 100 */
4717                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4718                                 guchar *br1;
4719
4720                                 /* skip branch if C1=1 */
4721                                 br1 = code;
4722                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4723                                 /* branch if C0=0 */
4724                                 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
4725                                 amd64_patch (br1, code);
4726                                 break;
4727                         }
4728                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, (X86_FP_C0|X86_FP_C1));
4729                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4730                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4731                         break;
4732                 case OP_FBLE_UN:
4733                         /* Branch if C013 != 001 */
4734                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4735                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4736                                 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
4737                                 break;
4738                         }
4739                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4740                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4741                         break;
4742                 case CEE_CKFINITE: {
4743                         if (use_sse2) {
4744                                 /* Transfer value to the fp stack */
4745                                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
4746                                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
4747                                 amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
4748                         }
4749                         amd64_push_reg (code, AMD64_RAX);
4750                         amd64_fxam (code);
4751                         amd64_fnstsw (code);
4752                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100);
4753                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4754                         amd64_pop_reg (code, AMD64_RAX);
4755                         if (use_sse2) {
4756                                 amd64_fstp (code, 0);
4757                         }                               
4758                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
4759                         break;
4760                 }
4761                 case OP_TLS_GET: {
4762                         x86_prefix (code, X86_FS_PREFIX);
4763                         amd64_mov_reg_mem (code, ins->dreg, ins->inst_offset, 8);
4764                         break;
4765                 }
4766                 case OP_ATOMIC_ADD_I4:
4767                 case OP_ATOMIC_ADD_I8: {
4768                         int dreg = ins->dreg;
4769                         guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
4770
4771                         if (dreg == ins->inst_basereg)
4772                                 dreg = AMD64_R11;
4773                         
4774                         if (dreg != ins->sreg2)
4775                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg2, size);
4776
4777                         x86_prefix (code, X86_LOCK_PREFIX);
4778                         amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
4779
4780                         if (dreg != ins->dreg)
4781                                 amd64_mov_reg_reg (code, ins->dreg, dreg, size);
4782
4783                         break;
4784                 }
4785                 case OP_ATOMIC_ADD_NEW_I4:
4786                 case OP_ATOMIC_ADD_NEW_I8: {
4787                         int dreg = ins->dreg;
4788                         guint32 size = (ins->opcode == OP_ATOMIC_ADD_NEW_I4) ? 4 : 8;
4789
4790                         if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
4791                                 dreg = AMD64_R11;
4792
4793                         amd64_mov_reg_reg (code, dreg, ins->sreg2, size);
4794                         amd64_prefix (code, X86_LOCK_PREFIX);
4795                         amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
4796                         /* dreg contains the old value, add with sreg2 value */
4797                         amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size);
4798                         
4799                         if (ins->dreg != dreg)
4800                                 amd64_mov_reg_reg (code, ins->dreg, dreg, size);
4801
4802                         break;
4803                 }
4804                 case OP_ATOMIC_EXCHANGE_I4:
4805                 case OP_ATOMIC_EXCHANGE_I8: {
4806                         guchar *br[2];
4807                         int sreg2 = ins->sreg2;
4808                         int breg = ins->inst_basereg;
4809                         guint32 size = (ins->opcode == OP_ATOMIC_EXCHANGE_I4) ? 4 : 8;
4810
4811                         /* 
4812                          * See http://msdn.microsoft.com/msdnmag/issues/0700/Win32/ for
4813                          * an explanation of how this works.
4814                          */
4815
4816                         /* cmpxchg uses eax as comperand, need to make sure we can use it
4817                          * hack to overcome limits in x86 reg allocator 
4818                          * (req: dreg == eax and sreg2 != eax and breg != eax) 
4819                          */
4820                         if (ins->dreg != AMD64_RAX)
4821                                 amd64_push_reg (code, AMD64_RAX);
4822                         
4823                         /* We need the EAX reg for the cmpxchg */
4824                         if (ins->sreg2 == AMD64_RAX) {
4825                                 amd64_push_reg (code, AMD64_RDX);
4826                                 amd64_mov_reg_reg (code, AMD64_RDX, AMD64_RAX, size);
4827                                 sreg2 = AMD64_RDX;
4828                         }
4829
4830                         if (breg == AMD64_RAX) {
4831                                 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, size);
4832                                 breg = AMD64_R11;
4833                         }
4834
4835                         amd64_mov_reg_membase (code, AMD64_RAX, breg, ins->inst_offset, size);
4836
4837                         br [0] = code; amd64_prefix (code, X86_LOCK_PREFIX);
4838                         amd64_cmpxchg_membase_reg_size (code, breg, ins->inst_offset, sreg2, size);
4839                         br [1] = code; amd64_branch8 (code, X86_CC_NE, -1, FALSE);
4840                         amd64_patch (br [1], br [0]);
4841
4842                         if (ins->dreg != AMD64_RAX) {
4843                                 amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
4844                                 amd64_pop_reg (code, AMD64_RAX);
4845                         }
4846
4847                         if (ins->sreg2 != sreg2)
4848                                 amd64_pop_reg (code, AMD64_RDX);
4849
4850                         break;
4851                 }
4852                 default:
4853                         g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4854                         g_assert_not_reached ();
4855                 }
4856
4857                 if ((code - cfg->native_code - offset) > max_len) {
4858                         g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4859                                    mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4860                         g_assert_not_reached ();
4861                 }
4862                
4863                 cpos += max_len;
4864
4865                 last_ins = ins;
4866                 last_offset = offset;
4867                 
4868                 ins = ins->next;
4869         }
4870
4871         cfg->code_len = code - cfg->native_code;
4872 }
4873
4874 void
4875 mono_arch_register_lowlevel_calls (void)
4876 {
4877 }
4878
4879 void
4880 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4881 {
4882         MonoJumpInfo *patch_info;
4883
4884         for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4885                 unsigned char *ip = patch_info->ip.i + code;
4886                 const unsigned char *target;
4887
4888                 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4889
4890                 if (mono_compile_aot) {
4891                         switch (patch_info->type) {
4892                         case MONO_PATCH_INFO_BB:
4893                         case MONO_PATCH_INFO_LABEL:
4894                                 break;
4895                         default: {
4896                                 /* Just to make code run at aot time work */
4897                                 const unsigned char **tmp;
4898
4899                                 mono_domain_lock (domain);
4900                                 tmp = mono_code_manager_reserve (domain->code_mp, sizeof (gpointer));
4901                                 mono_domain_unlock (domain);
4902
4903                                 *tmp = target;
4904                                 target = (const unsigned char*)(guint64)((guint8*)tmp - (guint8*)ip);
4905                                 break;
4906                         }
4907                         }
4908                 }
4909
4910                 switch (patch_info->type) {
4911                 case MONO_PATCH_INFO_NONE:
4912                         continue;
4913                 case MONO_PATCH_INFO_CLASS_INIT: {
4914                         /* Might already been changed to a nop */
4915                         guint8* ip2 = ip;
4916                         if (mono_compile_aot)
4917                                 amd64_call_membase (ip2, AMD64_RIP, 0);
4918                         else {
4919                                 amd64_call_code (ip2, 0);
4920                         }
4921                         break;
4922                 }
4923                 case MONO_PATCH_INFO_METHOD_REL:
4924                 case MONO_PATCH_INFO_R8:
4925                 case MONO_PATCH_INFO_R4:
4926                         g_assert_not_reached ();
4927                         continue;
4928                 case MONO_PATCH_INFO_BB:
4929                         break;
4930                 default:
4931                         break;
4932                 }
4933                 amd64_patch (ip, (gpointer)target);
4934         }
4935 }
4936
4937 guint8 *
4938 mono_arch_emit_prolog (MonoCompile *cfg)
4939 {
4940         MonoMethod *method = cfg->method;
4941         MonoBasicBlock *bb;
4942         MonoMethodSignature *sig;
4943         MonoInst *inst;
4944         int alloc_size, pos, max_offset, i, quad;
4945         guint8 *code;
4946         CallInfo *cinfo;
4947
4948         cfg->code_size =  MAX (((MonoMethodNormal *)method)->header->code_size * 4, 512);
4949         code = cfg->native_code = g_malloc (cfg->code_size);
4950
4951         amd64_push_reg (code, AMD64_RBP);
4952         amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
4953
4954         /* Stack alignment check */
4955 #if 0
4956         {
4957                 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8);
4958                 amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf);
4959                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4960                 x86_branch8 (code, X86_CC_EQ, 2, FALSE);
4961                 amd64_breakpoint (code);
4962         }
4963 #endif
4964
4965         alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
4966         pos = 0;
4967
4968         if (method->save_lmf) {
4969                 gint32 lmf_offset;
4970
4971                 pos = ALIGN_TO (pos + sizeof (MonoLMF), 16);
4972
4973                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, pos);
4974
4975                 lmf_offset = - cfg->arch.lmf_offset;
4976
4977                 /* Save ip */
4978                 amd64_lea_membase (code, AMD64_R11, AMD64_RIP, 0);
4979                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
4980                 /* Save fp */
4981                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), AMD64_RBP, 8);
4982                 /* Save method */
4983                 /* FIXME: add a relocation for this */
4984                 if (IS_IMM32 (cfg->method))
4985                         amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), (guint64)cfg->method, 8);
4986                 else {
4987                         amd64_mov_reg_imm (code, AMD64_R11, cfg->method);
4988                         amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
4989                 }
4990                 /* Save callee saved regs */
4991                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
4992                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
4993                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
4994                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
4995                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
4996         } else {
4997
4998                 for (i = 0; i < AMD64_NREG; ++i)
4999                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
5000                                 amd64_push_reg (code, i);
5001                                 pos += sizeof (gpointer);
5002                         }
5003         }
5004
5005         alloc_size -= pos;
5006
5007         if (alloc_size) {
5008                 /* See mono_emit_stack_alloc */
5009 #ifdef PLATFORM_WIN32
5010                 guint32 remaining_size = alloc_size;
5011                 while (remaining_size >= 0x1000) {
5012                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
5013                         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
5014                         remaining_size -= 0x1000;
5015                 }
5016                 if (remaining_size)
5017                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
5018 #else
5019                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
5020 #endif
5021         }
5022
5023         /* compute max_offset in order to use short forward jumps */
5024         max_offset = 0;
5025         if (cfg->opt & MONO_OPT_BRANCH) {
5026                 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5027                         MonoInst *ins = bb->code;
5028                         bb->max_offset = max_offset;
5029
5030                         if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5031                                 max_offset += 6;
5032                         /* max alignment for loops */
5033                         if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
5034                                 max_offset += LOOP_ALIGNMENT;
5035
5036                         while (ins) {
5037                                 if (ins->opcode == OP_LABEL)
5038                                         ins->inst_c1 = max_offset;
5039                                 
5040                                 max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
5041                                 ins = ins->next;
5042                         }
5043                 }
5044         }
5045
5046         sig = mono_method_signature (method);
5047         pos = 0;
5048
5049         cinfo = get_call_info (sig, FALSE);
5050
5051         if (sig->ret->type != MONO_TYPE_VOID) {
5052                 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
5053                         /* Save volatile arguments to the stack */
5054                         amd64_mov_membase_reg (code, cfg->ret->inst_basereg, cfg->ret->inst_offset, cinfo->ret.reg, 8);
5055                 }
5056         }
5057
5058         /* Keep this in sync with emit_load_volatile_arguments */
5059         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5060                 ArgInfo *ainfo = cinfo->args + i;
5061                 gint32 stack_offset;
5062                 MonoType *arg_type;
5063                 inst = cfg->varinfo [i];
5064
5065                 if (sig->hasthis && (i == 0))
5066                         arg_type = &mono_defaults.object_class->byval_arg;
5067                 else
5068                         arg_type = sig->params [i - sig->hasthis];
5069
5070                 stack_offset = ainfo->offset + ARGS_OFFSET;
5071
5072                 /* Save volatile arguments to the stack */
5073                 if (inst->opcode != OP_REGVAR) {
5074                         switch (ainfo->storage) {
5075                         case ArgInIReg: {
5076                                 guint32 size = 8;
5077
5078                                 /* FIXME: I1 etc */
5079                                 /*
5080                                 if (stack_offset & 0x1)
5081                                         size = 1;
5082                                 else if (stack_offset & 0x2)
5083                                         size = 2;
5084                                 else if (stack_offset & 0x4)
5085                                         size = 4;
5086                                 else
5087                                         size = 8;
5088                                 */
5089                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg, size);
5090                                 break;
5091                         }
5092                         case ArgInFloatSSEReg:
5093                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
5094                                 break;
5095                         case ArgInDoubleSSEReg:
5096                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
5097                                 break;
5098                         case ArgValuetypeInReg:
5099                                 for (quad = 0; quad < 2; quad ++) {
5100                                         switch (ainfo->pair_storage [quad]) {
5101                                         case ArgInIReg:
5102                                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
5103                                                 break;
5104                                         case ArgInFloatSSEReg:
5105                                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
5106                                                 break;
5107                                         case ArgInDoubleSSEReg:
5108                                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
5109                                                 break;
5110                                         case ArgNone:
5111                                                 break;
5112                                         default:
5113                                                 g_assert_not_reached ();
5114                                         }
5115                                 }
5116                                 break;
5117                         default:
5118                                 break;
5119                         }
5120                 }
5121
5122                 if (inst->opcode == OP_REGVAR) {
5123                         /* Argument allocated to (non-volatile) register */
5124                         switch (ainfo->storage) {
5125                         case ArgInIReg:
5126                                 amd64_mov_reg_reg (code, inst->dreg, ainfo->reg, 8);
5127                                 break;
5128                         case ArgOnStack:
5129                                 amd64_mov_reg_membase (code, inst->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
5130                                 break;
5131                         default:
5132                                 g_assert_not_reached ();
5133                         }
5134                 }
5135         }
5136
5137         if (method->save_lmf) {
5138                 gint32 lmf_offset;
5139
5140                 if (lmf_tls_offset != -1) {
5141                         /* Load lmf quicky using the FS register */
5142                         x86_prefix (code, X86_FS_PREFIX);
5143                         amd64_mov_reg_mem (code, AMD64_RAX, lmf_tls_offset, 8);
5144                 }
5145                 else {
5146                         /* 
5147                          * The call might clobber argument registers, but they are already
5148                          * saved to the stack/global regs.
5149                          */
5150
5151                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
5152                                                                  (gpointer)"mono_get_lmf_addr");                
5153                 }
5154
5155                 lmf_offset = - cfg->arch.lmf_offset;
5156
5157                 /* Save lmf_addr */
5158                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
5159                 /* Save previous_lmf */
5160                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
5161                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
5162                 /* Set new lmf */
5163                 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
5164                 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
5165         }
5166
5167
5168         g_free (cinfo);
5169
5170         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5171                 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5172
5173         cfg->code_len = code - cfg->native_code;
5174
5175         g_assert (cfg->code_len < cfg->code_size);
5176
5177         return code;
5178 }
5179
5180 void
5181 mono_arch_emit_epilog (MonoCompile *cfg)
5182 {
5183         MonoMethod *method = cfg->method;
5184         int quad, pos, i;
5185         guint8 *code;
5186         int max_epilog_size = 16;
5187         CallInfo *cinfo;
5188         
5189         if (cfg->method->save_lmf)
5190                 max_epilog_size += 256;
5191         
5192         if (mono_jit_trace_calls != NULL)
5193                 max_epilog_size += 50;
5194
5195         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5196                 max_epilog_size += 50;
5197
5198         max_epilog_size += (AMD64_NREG * 2);
5199
5200         while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5201                 cfg->code_size *= 2;
5202                 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5203                 mono_jit_stats.code_reallocs++;
5204         }
5205
5206         code = cfg->native_code + cfg->code_len;
5207
5208         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5209                 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5210
5211         /* the code restoring the registers must be kept in sync with CEE_JMP */
5212         pos = 0;
5213         
5214         if (method->save_lmf) {
5215                 gint32 lmf_offset = - cfg->arch.lmf_offset;
5216
5217                 /* Restore previous lmf */
5218                 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
5219                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
5220                 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
5221
5222                 /* Restore caller saved regs */
5223                 if (cfg->used_int_regs & (1 << AMD64_RBX)) {
5224                         amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
5225                 }
5226                 if (cfg->used_int_regs & (1 << AMD64_R12)) {
5227                         amd64_mov_reg_membase (code, AMD64_R12, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
5228                 }
5229                 if (cfg->used_int_regs & (1 << AMD64_R13)) {
5230                         amd64_mov_reg_membase (code, AMD64_R13, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
5231                 }
5232                 if (cfg->used_int_regs & (1 << AMD64_R14)) {
5233                         amd64_mov_reg_membase (code, AMD64_R14, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
5234                 }
5235                 if (cfg->used_int_regs & (1 << AMD64_R15)) {
5236                         amd64_mov_reg_membase (code, AMD64_R15, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
5237                 }
5238         } else {
5239
5240                 for (i = 0; i < AMD64_NREG; ++i)
5241                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
5242                                 pos -= sizeof (gpointer);
5243
5244                 if (pos) {
5245                         if (pos == - sizeof (gpointer)) {
5246                                 /* Only one register, so avoid lea */
5247                                 for (i = AMD64_NREG - 1; i > 0; --i)
5248                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
5249                                                 amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
5250                                         }
5251                         }
5252                         else {
5253                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
5254
5255                                 /* Pop registers in reverse order */
5256                                 for (i = AMD64_NREG - 1; i > 0; --i)
5257                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
5258                                                 amd64_pop_reg (code, i);
5259                                         }
5260                         }
5261                 }
5262         }
5263
5264         /* Load returned vtypes into registers if needed */
5265         cinfo = get_call_info (mono_method_signature (method), FALSE);
5266         if (cinfo->ret.storage == ArgValuetypeInReg) {
5267                 ArgInfo *ainfo = &cinfo->ret;
5268                 MonoInst *inst = cfg->ret;
5269
5270                 for (quad = 0; quad < 2; quad ++) {
5271                         switch (ainfo->pair_storage [quad]) {
5272                         case ArgInIReg:
5273                                 amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), sizeof (gpointer));
5274                                 break;
5275                         case ArgInFloatSSEReg:
5276                                 amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
5277                                 break;
5278                         case ArgInDoubleSSEReg:
5279                                 amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
5280                                 break;
5281                         case ArgNone:
5282                                 break;
5283                         default:
5284                                 g_assert_not_reached ();
5285                         }
5286                 }
5287         }
5288         g_free (cinfo);
5289
5290         amd64_leave (code);
5291         amd64_ret (code);
5292
5293         cfg->code_len = code - cfg->native_code;
5294
5295         g_assert (cfg->code_len < cfg->code_size);
5296
5297 }
5298
5299 void
5300 mono_arch_emit_exceptions (MonoCompile *cfg)
5301 {
5302         MonoJumpInfo *patch_info;
5303         int nthrows, i;
5304         guint8 *code;
5305         MonoClass *exc_classes [16];
5306         guint8 *exc_throw_start [16], *exc_throw_end [16];
5307         guint32 code_size = 0;
5308
5309         /* Compute needed space */
5310         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5311                 if (patch_info->type == MONO_PATCH_INFO_EXC)
5312                         code_size += 40;
5313                 if (patch_info->type == MONO_PATCH_INFO_R8)
5314                         code_size += 8 + 7; /* sizeof (double) + alignment */
5315                 if (patch_info->type == MONO_PATCH_INFO_R4)
5316                         code_size += 4 + 7; /* sizeof (float) + alignment */
5317         }
5318
5319         while (cfg->code_len + code_size > (cfg->code_size - 16)) {
5320                 cfg->code_size *= 2;
5321                 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5322                 mono_jit_stats.code_reallocs++;
5323         }
5324
5325         code = cfg->native_code + cfg->code_len;
5326
5327         /* add code to raise exceptions */
5328         nthrows = 0;
5329         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5330                 switch (patch_info->type) {
5331                 case MONO_PATCH_INFO_EXC: {
5332                         MonoClass *exc_class;
5333                         guint8 *buf, *buf2;
5334                         guint32 throw_ip;
5335
5336                         amd64_patch (patch_info->ip.i + cfg->native_code, code);
5337
5338                         exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5339                         g_assert (exc_class);
5340                         throw_ip = patch_info->ip.i;
5341
5342                         //x86_breakpoint (code);
5343                         /* Find a throw sequence for the same exception class */
5344                         for (i = 0; i < nthrows; ++i)
5345                                 if (exc_classes [i] == exc_class)
5346                                         break;
5347                         if (i < nthrows) {
5348                                 amd64_mov_reg_imm (code, AMD64_RSI, (exc_throw_end [i] - cfg->native_code) - throw_ip);
5349                                 x86_jump_code (code, exc_throw_start [i]);
5350                                 patch_info->type = MONO_PATCH_INFO_NONE;
5351                         }
5352                         else {
5353                                 buf = code;
5354                                 amd64_mov_reg_imm_size (code, AMD64_RSI, 0xf0f0f0f0, 4);
5355                                 buf2 = code;
5356
5357                                 if (nthrows < 16) {
5358                                         exc_classes [nthrows] = exc_class;
5359                                         exc_throw_start [nthrows] = code;
5360                                 }
5361
5362                                 amd64_mov_reg_imm (code, AMD64_RDI, exc_class->type_token);
5363                                 patch_info->data.name = "mono_arch_throw_corlib_exception";
5364                                 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5365                                 patch_info->ip.i = code - cfg->native_code;
5366
5367                                 if (mono_compile_aot)
5368                                         amd64_mov_reg_membase (code, GP_SCRATCH_REG, AMD64_RIP, 0, 8);
5369                                 else
5370                                         amd64_set_reg_template (code, GP_SCRATCH_REG);
5371                                 amd64_call_reg (code, GP_SCRATCH_REG);
5372
5373                                 amd64_mov_reg_imm (buf, AMD64_RSI, (code - cfg->native_code) - throw_ip);
5374                                 while (buf < buf2)
5375                                         x86_nop (buf);
5376
5377                                 if (nthrows < 16) {
5378                                         exc_throw_end [nthrows] = code;
5379                                         nthrows ++;
5380                                 }
5381                         }
5382                         break;
5383                 }
5384                 default:
5385                         /* do nothing */
5386                         break;
5387                 }
5388         }
5389
5390         /* Handle relocations with RIP relative addressing */
5391         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5392                 gboolean remove = FALSE;
5393
5394                 switch (patch_info->type) {
5395                 case MONO_PATCH_INFO_R8: {
5396                         guint8 *pos;
5397
5398                         code = (guint8*)ALIGN_TO (code, 8);
5399
5400                         pos = cfg->native_code + patch_info->ip.i;
5401
5402                         *(double*)code = *(double*)patch_info->data.target;
5403
5404                         if (use_sse2)
5405                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
5406                         else
5407                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
5408                         code += 8;
5409
5410                         remove = TRUE;
5411                         break;
5412                 }
5413                 case MONO_PATCH_INFO_R4: {
5414                         guint8 *pos;
5415
5416                         code = (guint8*)ALIGN_TO (code, 8);
5417
5418                         pos = cfg->native_code + patch_info->ip.i;
5419
5420                         *(float*)code = *(float*)patch_info->data.target;
5421
5422                         if (use_sse2)
5423                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
5424                         else
5425                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
5426                         code += 4;
5427
5428                         remove = TRUE;
5429                         break;
5430                 }
5431                 default:
5432                         break;
5433                 }
5434
5435                 if (remove) {
5436                         if (patch_info == cfg->patch_info)
5437                                 cfg->patch_info = patch_info->next;
5438                         else {
5439                                 MonoJumpInfo *tmp;
5440
5441                                 for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next)
5442                                         ;
5443                                 tmp->next = patch_info->next;
5444                         }
5445                 }
5446         }
5447
5448         cfg->code_len = code - cfg->native_code;
5449
5450         g_assert (cfg->code_len < cfg->code_size);
5451
5452 }
5453
5454 void*
5455 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5456 {
5457         guchar *code = p;
5458         CallInfo *cinfo;
5459         MonoMethodSignature *sig;
5460         MonoInst *inst;
5461         int i, n, stack_area = 0;
5462
5463         /* Keep this in sync with mono_arch_get_argument_info */
5464
5465         if (enable_arguments) {
5466                 /* Allocate a new area on the stack and save arguments there */
5467                 sig = mono_method_signature (cfg->method);
5468
5469                 cinfo = get_call_info (sig, FALSE);
5470
5471                 n = sig->param_count + sig->hasthis;
5472
5473                 stack_area = ALIGN_TO (n * 8, 16);
5474
5475                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_area);
5476
5477                 for (i = 0; i < n; ++i) {
5478                         inst = cfg->varinfo [i];
5479
5480                         if (inst->opcode == OP_REGVAR)
5481                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), inst->dreg, 8);
5482                         else {
5483                                 amd64_mov_reg_membase (code, AMD64_R11, inst->inst_basereg, inst->inst_offset, 8);
5484                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), AMD64_R11, 8);
5485                         }
5486                 }
5487         }
5488
5489         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
5490         amd64_set_reg_template (code, AMD64_RDI);
5491         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RSP, 8);
5492         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5493
5494         if (enable_arguments) {
5495                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, stack_area);
5496
5497                 g_free (cinfo);
5498         }
5499
5500         return code;
5501 }
5502
5503 enum {
5504         SAVE_NONE,
5505         SAVE_STRUCT,
5506         SAVE_EAX,
5507         SAVE_EAX_EDX,
5508         SAVE_XMM
5509 };
5510
5511 void*
5512 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5513 {
5514         guchar *code = p;
5515         int save_mode = SAVE_NONE;
5516         MonoMethod *method = cfg->method;
5517         int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
5518         
5519         switch (rtype) {
5520         case MONO_TYPE_VOID:
5521                 /* special case string .ctor icall */
5522                 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
5523                         save_mode = SAVE_EAX;
5524                 else
5525                         save_mode = SAVE_NONE;
5526                 break;
5527         case MONO_TYPE_I8:
5528         case MONO_TYPE_U8:
5529                 save_mode = SAVE_EAX;
5530                 break;
5531         case MONO_TYPE_R4:
5532         case MONO_TYPE_R8:
5533                 save_mode = SAVE_XMM;
5534                 break;
5535         case MONO_TYPE_VALUETYPE:
5536                 save_mode = SAVE_STRUCT;
5537                 break;
5538         default:
5539                 save_mode = SAVE_EAX;
5540                 break;
5541         }
5542
5543         /* Save the result and copy it into the proper argument register */
5544         switch (save_mode) {
5545         case SAVE_EAX:
5546                 amd64_push_reg (code, AMD64_RAX);
5547                 /* Align stack */
5548                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5549                 if (enable_arguments)
5550                         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RAX, 8);
5551                 break;
5552         case SAVE_STRUCT:
5553                 /* FIXME: */
5554                 if (enable_arguments)
5555                         amd64_mov_reg_imm (code, AMD64_RSI, 0);
5556                 break;
5557         case SAVE_XMM:
5558                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5559                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
5560                 /* Align stack */
5561                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5562                 /* 
5563                  * The result is already in the proper argument register so no copying
5564                  * needed.
5565                  */
5566                 break;
5567         case SAVE_NONE:
5568                 break;
5569         default:
5570                 g_assert_not_reached ();
5571         }
5572
5573         /* Set %al since this is a varargs call */
5574         if (save_mode == SAVE_XMM)
5575                 amd64_mov_reg_imm (code, AMD64_RAX, 1);
5576         else
5577                 amd64_mov_reg_imm (code, AMD64_RAX, 0);
5578
5579         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
5580         amd64_set_reg_template (code, AMD64_RDI);
5581         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5582
5583         /* Restore result */
5584         switch (save_mode) {
5585         case SAVE_EAX:
5586                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5587                 amd64_pop_reg (code, AMD64_RAX);
5588                 break;
5589         case SAVE_STRUCT:
5590                 /* FIXME: */
5591                 break;
5592         case SAVE_XMM:
5593                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5594                 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
5595                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5596                 break;
5597         case SAVE_NONE:
5598                 break;
5599         default:
5600                 g_assert_not_reached ();
5601         }
5602
5603         return code;
5604 }
5605
5606 void
5607 mono_arch_flush_icache (guint8 *code, gint size)
5608 {
5609         /* Not needed */
5610 }
5611
5612 void
5613 mono_arch_flush_register_windows (void)
5614 {
5615 }
5616
5617 gboolean 
5618 mono_arch_is_inst_imm (gint64 imm)
5619 {
5620         return amd64_is_imm32 (imm);
5621 }
5622
5623 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
5624
5625 static int reg_to_ucontext_reg [] = {
5626         REG_RAX, REG_RCX, REG_RDX, REG_RBX, REG_RSP, REG_RBP, REG_RSI, REG_RDI,
5627         REG_R8, REG_R9, REG_R10, REG_R11, REG_R12, REG_R13, REG_R14, REG_R15,
5628         REG_RIP
5629 };
5630
5631 /*
5632  * Determine whenever the trap whose info is in SIGINFO is caused by
5633  * integer overflow.
5634  */
5635 gboolean
5636 mono_arch_is_int_overflow (void *sigctx, void *info)
5637 {
5638         ucontext_t *ctx = (ucontext_t*)sigctx;
5639         guint8* rip;
5640         int reg;
5641
5642         rip = (guint8*)ctx->uc_mcontext.gregs [REG_RIP];
5643
5644         if (IS_REX (rip [0])) {
5645                 reg = amd64_rex_r (rip [0]);
5646                 rip ++;
5647         }
5648         else
5649                 reg = 0;
5650
5651         if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) {
5652                 /* idiv REG */
5653                 reg += x86_modrm_rm (rip [1]);
5654
5655                 if (ctx->uc_mcontext.gregs [reg_to_ucontext_reg [reg]] == -1)
5656                         return TRUE;
5657         }
5658
5659         return FALSE;
5660 }
5661
5662 guint32
5663 mono_arch_get_patch_offset (guint8 *code)
5664 {
5665         return 3;
5666 }
5667
5668 gpointer*
5669 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
5670 {
5671         guint32 reg;
5672         guint32 disp;
5673         guint8 rex = 0;
5674
5675         /* go to the start of the call instruction
5676          *
5677          * address_byte = (m << 6) | (o << 3) | reg
5678          * call opcode: 0xff address_byte displacement
5679          * 0xff m=1,o=2 imm8
5680          * 0xff m=2,o=2 imm32
5681          */
5682         code -= 7;
5683
5684         /* 
5685          * A given byte sequence can match more than case here, so we have to be
5686          * really careful about the ordering of the cases. Longer sequences
5687          * come first.
5688          */
5689         if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
5690                 /* call OFFSET(%rip) */
5691                 return NULL;
5692         }
5693         else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
5694                 /* call *[reg+disp32] */
5695                 if (IS_REX (code [0]))
5696                         rex = code [0];
5697                 reg = amd64_modrm_rm (code [2]);
5698                 disp = *(guint32*)(code + 3);
5699                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5700         }
5701         else if (code [2] == 0xe8) {
5702                 /* call <ADDR> */
5703                 return NULL;
5704         }
5705         else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
5706                 /* call *%reg */
5707                 return NULL;
5708         }
5709         else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
5710                 /* call *[reg+disp8] */
5711                 if (IS_REX (code [3]))
5712                         rex = code [3];
5713                 reg = amd64_modrm_rm (code [5]);
5714                 disp = *(guint8*)(code + 6);
5715                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5716         }
5717         else if ((code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x0)) {
5718                         /*
5719                          * This is a interface call: should check the above code can't catch it earlier 
5720                          * 8b 40 30   mov    0x30(%eax),%eax
5721                          * ff 10      call   *(%eax)
5722                          */
5723                 if (IS_REX (code [4]))
5724                         rex = code [4];
5725                 reg = amd64_modrm_rm (code [6]);
5726                 disp = 0;
5727         }
5728         else
5729                 g_assert_not_reached ();
5730
5731         reg += amd64_rex_b (rex);
5732
5733         /* R11 is clobbered by the trampoline code */
5734         g_assert (reg != AMD64_R11);
5735
5736         return (gpointer)(((guint64)(regs [reg])) + disp);
5737 }
5738
5739 gpointer*
5740 mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
5741 {
5742         guint32 reg;
5743         guint32 disp;
5744
5745         code -= 10;
5746
5747         if (IS_REX (code [0]) && (code [1] == 0x8b) && (code [3] == 0x48) && (code [4] == 0x8b) && (code [5] == 0x40) && (code [7] == 0x48) && (code [8] == 0xff) && (code [9] == 0xd0)) {
5748                 /* mov REG, %rax; mov <OFFSET>(%rax), %rax; call *%rax */
5749                 reg = amd64_rex_b (code [0]) + amd64_modrm_rm (code [2]);
5750                 disp = code [6];
5751
5752                 if (reg == AMD64_RAX)
5753                         return NULL;
5754                 else
5755                         return (gpointer*)(((guint64)(regs [reg])) + disp);
5756         }
5757
5758         return NULL;
5759 }
5760
5761 /*
5762  * Support for fast access to the thread-local lmf structure using the GS
5763  * segment register on NPTL + kernel 2.6.x.
5764  */
5765
5766 static gboolean tls_offset_inited = FALSE;
5767
5768 /* code should be simply return <tls var>; */
5769 static int 
5770 read_tls_offset_from_method (void* method)
5771 {
5772         guint8 *code = (guint8*)method;
5773
5774         /* 
5775          * Determine the offset of mono_lfm_addr inside the TLS structures
5776          * by disassembling the function above.
5777          */
5778         /* This is generated by gcc 3.3.2 */
5779         if ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5780                 (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5781                 (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5782                 (code [9] == 0x00) && (code [10] == 0x00) && (code [11] == 0x00) &&
5783                 (code [12] == 0x0) && (code [13] == 0x48) && (code [14] == 0x8b) &&
5784                 (code [15] == 0x80)) {
5785                 return *(gint32*)&(code [16]);
5786         } else if
5787                 /* This is generated by gcc-3.3.2 with -O=2 */
5788                 /* mov fs:0, %rax ; mov <offset>(%rax), %rax ; retq */
5789                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5790                  (code [3] == 0x04) && (code [4] == 0x25) &&
5791                  (code [9] == 0x48) && (code [10] == 0x8b) && (code [11] == 0x80) &&
5792                  (code [16] == 0xc3)) {
5793                         return *(gint32*)&(code [12]);
5794         } else if 
5795                 /* This is generated by gcc-3.4.1 */
5796                 ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5797                  (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5798                  (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5799                  (code [13] == 0xc9) && (code [14] == 0xc3)) {
5800                         return *(gint32*)&(code [9]);
5801         } else if
5802                 /* This is generated by gcc-3.4.1 with -O=2 */
5803                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5804                  (code [3] == 0x04) && (code [4] == 0x25)) {
5805                 return *(gint32*)&(code [5]);
5806         }
5807
5808         return -1;
5809 }
5810
5811 void
5812 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5813 {
5814 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5815         pthread_t self = pthread_self();
5816         pthread_attr_t attr;
5817         void *staddr = NULL;
5818         size_t stsize = 0;
5819         struct sigaltstack sa;
5820 #endif
5821
5822         if (!tls_offset_inited) {
5823                 tls_offset_inited = TRUE;
5824
5825                 lmf_tls_offset = read_tls_offset_from_method (mono_get_lmf_addr);
5826                 appdomain_tls_offset = read_tls_offset_from_method (mono_domain_get);
5827                 //thread_tls_offset = read_tls_offset_from_method (mono_thread_current);
5828         }               
5829
5830 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5831
5832         /* Determine stack boundaries */
5833         if (!mono_running_on_valgrind ()) {
5834 #ifdef HAVE_PTHREAD_GETATTR_NP
5835                 pthread_getattr_np( self, &attr );
5836 #else
5837 #ifdef HAVE_PTHREAD_ATTR_GET_NP
5838                 pthread_attr_get_np( self, &attr );
5839 #elif defined(sun)
5840                 pthread_attr_init( &attr );
5841                 pthread_attr_getstacksize( &attr, &stsize );
5842 #else
5843 #error "Not implemented"
5844 #endif
5845 #endif
5846 #ifndef sun
5847                 pthread_attr_getstack( &attr, &staddr, &stsize );
5848 #endif
5849         }
5850
5851         /* 
5852          * staddr seems to be wrong for the main thread, so we keep the value in
5853          * tls->end_of_stack
5854          */
5855         tls->stack_size = stsize;
5856
5857         /* Setup an alternate signal stack */
5858         tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
5859         tls->signal_stack_size = SIGNAL_STACK_SIZE;
5860
5861         sa.ss_sp = tls->signal_stack;
5862         sa.ss_size = SIGNAL_STACK_SIZE;
5863         sa.ss_flags = SS_ONSTACK;
5864         sigaltstack (&sa, NULL);
5865 #endif
5866 }
5867
5868 void
5869 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5870 {
5871 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5872         struct sigaltstack sa;
5873
5874         sa.ss_sp = tls->signal_stack;
5875         sa.ss_size = SIGNAL_STACK_SIZE;
5876         sa.ss_flags = SS_DISABLE;
5877         sigaltstack  (&sa, NULL);
5878
5879         if (tls->signal_stack)
5880                 g_free (tls->signal_stack);
5881 #endif
5882 }
5883
5884 void
5885 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
5886 {
5887         MonoCallInst *call = (MonoCallInst*)inst;
5888         int out_reg = param_regs [0];
5889         guint64 regpair;
5890
5891         if (vt_reg != -1) {
5892                 CallInfo * cinfo = get_call_info (inst->signature, FALSE);
5893                 MonoInst *vtarg;
5894
5895                 if (cinfo->ret.storage == ArgValuetypeInReg) {
5896                         /*
5897                          * The valuetype is in RAX:RDX after the call, need to be copied to
5898                          * the stack. Push the address here, so the call instruction can
5899                          * access it.
5900                          */
5901                         MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
5902                         vtarg->sreg1 = vt_reg;
5903                         mono_bblock_add_inst (cfg->cbb, vtarg);
5904
5905                         /* Align stack */
5906                         MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
5907                 }
5908                 else {
5909                         MONO_INST_NEW (cfg, vtarg, OP_SETREG);
5910                         vtarg->sreg1 = vt_reg;
5911                         vtarg->dreg = mono_regstate_next_int (cfg->rs);
5912                         mono_bblock_add_inst (cfg->cbb, vtarg);
5913
5914                         regpair = (((guint64)out_reg) << 32) + vtarg->dreg;
5915                         call->out_ireg_args = g_slist_append (call->out_ireg_args, (gpointer)(regpair));
5916
5917                         out_reg = param_regs [1];
5918                 }
5919
5920                 g_free (cinfo);
5921         }
5922
5923         /* add the this argument */
5924         if (this_reg != -1) {
5925                 MonoInst *this;
5926                 MONO_INST_NEW (cfg, this, OP_SETREG);
5927                 this->type = this_type;
5928                 this->sreg1 = this_reg;
5929                 this->dreg = mono_regstate_next_int (cfg->rs);
5930                 mono_bblock_add_inst (cfg->cbb, this);
5931
5932                 regpair = (((guint64)out_reg) << 32) + this->dreg;
5933                 call->out_ireg_args = g_slist_append (call->out_ireg_args, (gpointer)(regpair));
5934         }
5935 }
5936
5937 MonoInst*
5938 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5939 {
5940         MonoInst *ins = NULL;
5941
5942         if (use_sse2)
5943                 return NULL;
5944
5945         if (cmethod->klass == mono_defaults.math_class) {
5946                 if (strcmp (cmethod->name, "Sin") == 0) {
5947                         MONO_INST_NEW (cfg, ins, OP_SIN);
5948                         ins->inst_i0 = args [0];
5949                 } else if (strcmp (cmethod->name, "Cos") == 0) {
5950                         MONO_INST_NEW (cfg, ins, OP_COS);
5951                         ins->inst_i0 = args [0];
5952                 } else if (strcmp (cmethod->name, "Tan") == 0) {
5953                         MONO_INST_NEW (cfg, ins, OP_TAN);
5954                         ins->inst_i0 = args [0];
5955                 } else if (strcmp (cmethod->name, "Atan") == 0) {
5956                         MONO_INST_NEW (cfg, ins, OP_ATAN);
5957                         ins->inst_i0 = args [0];
5958                 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5959                         MONO_INST_NEW (cfg, ins, OP_SQRT);
5960                         ins->inst_i0 = args [0];
5961                 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5962                         MONO_INST_NEW (cfg, ins, OP_ABS);
5963                         ins->inst_i0 = args [0];
5964                 }
5965 #if 0
5966                 /* OP_FREM is not IEEE compatible */
5967                 else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
5968                         MONO_INST_NEW (cfg, ins, OP_FREM);
5969                         ins->inst_i0 = args [0];
5970                         ins->inst_i1 = args [1];
5971                 }
5972 #endif
5973         } else if(cmethod->klass->image == mono_defaults.corlib &&
5974                            (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5975                            (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5976
5977                 if (strcmp (cmethod->name, "Increment") == 0) {
5978                         MonoInst *ins_iconst;
5979                         guint32 opcode;
5980
5981                         if (fsig->params [0]->type == MONO_TYPE_I4)
5982                                 opcode = OP_ATOMIC_ADD_NEW_I4;
5983                         else if (fsig->params [0]->type == MONO_TYPE_I8)
5984                                 opcode = OP_ATOMIC_ADD_NEW_I8;
5985                         else
5986                                 g_assert_not_reached ();
5987                         MONO_INST_NEW (cfg, ins, opcode);
5988                         MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5989                         ins_iconst->inst_c0 = 1;
5990
5991                         ins->inst_i0 = args [0];
5992                         ins->inst_i1 = ins_iconst;
5993                 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5994                         MonoInst *ins_iconst;
5995                         guint32 opcode;
5996
5997                         if (fsig->params [0]->type == MONO_TYPE_I4)
5998                                 opcode = OP_ATOMIC_ADD_NEW_I4;
5999                         else if (fsig->params [0]->type == MONO_TYPE_I8)
6000                                 opcode = OP_ATOMIC_ADD_NEW_I8;
6001                         else
6002                                 g_assert_not_reached ();
6003                         MONO_INST_NEW (cfg, ins, opcode);
6004                         MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6005                         ins_iconst->inst_c0 = -1;
6006
6007                         ins->inst_i0 = args [0];
6008                         ins->inst_i1 = ins_iconst;
6009                 } else if (strcmp (cmethod->name, "Add") == 0) {
6010                         guint32 opcode;
6011
6012                         if (fsig->params [0]->type == MONO_TYPE_I4)
6013                                 opcode = OP_ATOMIC_ADD_I4;
6014                         else if (fsig->params [0]->type == MONO_TYPE_I8)
6015                                 opcode = OP_ATOMIC_ADD_I8;
6016                         else
6017                                 g_assert_not_reached ();
6018                         
6019                         MONO_INST_NEW (cfg, ins, opcode);
6020
6021                         ins->inst_i0 = args [0];
6022                         ins->inst_i1 = args [1];
6023                 } else if (strcmp (cmethod->name, "Exchange") == 0) {
6024                         guint32 opcode;
6025
6026                         if (fsig->params [0]->type == MONO_TYPE_I4)
6027                                 opcode = OP_ATOMIC_EXCHANGE_I4;
6028                         else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
6029                                          (fsig->params [0]->type == MONO_TYPE_I) ||
6030                                          (fsig->params [0]->type == MONO_TYPE_OBJECT))
6031                                 opcode = OP_ATOMIC_EXCHANGE_I8;
6032                         else
6033                                 return NULL;
6034
6035                         MONO_INST_NEW (cfg, ins, opcode);
6036
6037                         ins->inst_i0 = args [0];
6038                         ins->inst_i1 = args [1];
6039                 } else if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6040                         /* 64 bit reads are already atomic */
6041                         MONO_INST_NEW (cfg, ins, CEE_LDIND_I8);
6042                         ins->inst_i0 = args [0];
6043                 }
6044
6045                 /* 
6046                  * Can't implement CompareExchange methods this way since they have
6047                  * three arguments.
6048                  */
6049         }
6050
6051         return ins;
6052 }
6053
6054 gboolean
6055 mono_arch_print_tree (MonoInst *tree, int arity)
6056 {
6057         return 0;
6058 }
6059
6060 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
6061 {
6062         MonoInst* ins;
6063         
6064         if (appdomain_tls_offset == -1)
6065                 return NULL;
6066         
6067         MONO_INST_NEW (cfg, ins, OP_TLS_GET);
6068         ins->inst_offset = appdomain_tls_offset;
6069         return ins;
6070 }
6071
6072 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
6073 {
6074         MonoInst* ins;
6075         
6076         if (thread_tls_offset == -1)
6077                 return NULL;
6078         
6079         MONO_INST_NEW (cfg, ins, OP_TLS_GET);
6080         ins->inst_offset = thread_tls_offset;
6081         return ins;
6082 }