2005-03-29 Zoltan Varga <vargaz@freemail.hu>
[mono.git] / mono / mini / mini-amd64.c
1 /*
2  * mini-amd64.c: AMD64 backend for the Mono code generator
3  *
4  * Based on mini-x86.c.
5  *
6  * Authors:
7  *   Paolo Molaro (lupus@ximian.com)
8  *   Dietmar Maurer (dietmar@ximian.com)
9  *   Patrik Torstensson
10  *
11  * (C) 2003 Ximian, Inc.
12  */
13 #include "mini.h"
14 #include <string.h>
15 #include <math.h>
16 #include <unistd.h>
17 #include <sys/mman.h>
18
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/utils/mono-math.h>
24
25 #include "trace.h"
26 #include "mini-amd64.h"
27 #include "inssel.h"
28 #include "cpu-amd64.h"
29
30 static gint lmf_tls_offset = -1;
31 static gint appdomain_tls_offset = -1;
32 static gint thread_tls_offset = -1;
33
34 /* Use SSE2 instructions for fp arithmetic */
35 static gboolean use_sse2 = TRUE;
36
37 /* xmm15 is reserved for use by some opcodes */
38 #define AMD64_CALLEE_FREGS 0xef
39
40 #define FPSTACK_SIZE 6
41
42 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
43
44 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
45
46 #ifdef PLATFORM_WIN32
47 /* Under windows, the default pinvoke calling convention is stdcall */
48 #define CALLCONV_IS_STDCALL(call_conv) (((call_conv) == MONO_CALL_STDCALL) || ((call_conv) == MONO_CALL_DEFAULT))
49 #else
50 #define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
51 #endif
52
53 #define SIGNAL_STACK_SIZE (64 * 1024)
54
55 #define ARGS_OFFSET 16
56 #define GP_SCRATCH_REG AMD64_R11
57
58 /*
59  * AMD64 register usage:
60  * - callee saved registers are used for global register allocation
61  * - %r11 is used for materializing 64 bit constants in opcodes
62  * - the rest is used for local allocation
63  */
64
65 /*
66  * FIXME: 
67  * - Use xmm registers instead of the x87 stack
68  * - Allocate arguments to global registers
69  * - implement emulated opcodes
70  * - (all archs) do not store trampoline addresses in method->info since they
71  *   are domain specific.   
72  */
73
74 /*
75  * Floating point comparison results:
76  *                  ZF PF CF
77  * A > B            0  0  0
78  * A < B            0  0  1
79  * A = B            1  0  0
80  * A > B            0  0  0
81  * UNORDERED        1  1  1
82  */
83
84 #define NOT_IMPLEMENTED g_assert_not_reached ()
85
86 const char*
87 mono_arch_regname (int reg) {
88         switch (reg) {
89         case AMD64_RAX: return "%rax";
90         case AMD64_RBX: return "%rbx";
91         case AMD64_RCX: return "%rcx";
92         case AMD64_RDX: return "%rdx";
93         case AMD64_RSP: return "%rsp";  
94         case AMD64_RBP: return "%rbp";
95         case AMD64_RDI: return "%rdi";
96         case AMD64_RSI: return "%rsi";
97         case AMD64_R8: return "%r8";
98         case AMD64_R9: return "%r9";
99         case AMD64_R10: return "%r10";
100         case AMD64_R11: return "%r11";
101         case AMD64_R12: return "%r12";
102         case AMD64_R13: return "%r13";
103         case AMD64_R14: return "%r14";
104         case AMD64_R15: return "%r15";
105         }
106         return "unknown";
107 }
108
109 static const char * xmmregs [] = {
110         "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8",
111         "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
112 };
113
114 static const char*
115 mono_arch_fregname (int reg)
116 {
117         if (reg < AMD64_XMM_NREG)
118                 return xmmregs [reg];
119         else
120                 return "unknown";
121 }
122
123 static const char*
124 mono_amd64_regname (int reg, gboolean fp)
125 {
126         if (fp)
127                 return mono_arch_fregname (reg);
128         else
129                 return mono_arch_regname (reg);
130 }
131
132 static inline void 
133 amd64_patch (unsigned char* code, gpointer target)
134 {
135         /* Skip REX */
136         if ((code [0] >= 0x40) && (code [0] <= 0x4f))
137                 code += 1;
138
139         if ((code [0] & 0xf8) == 0xb8) {
140                 /* amd64_set_reg_template */
141                 *(guint64*)(code + 1) = (guint64)target;
142         }
143         else if (code [0] == 0x8b) {
144                 /* mov 0(%rip), %dreg */
145                 *(guint32*)(code + 2) = (guint32)(guint64)target - 7;
146         }
147         else if ((code [0] == 0xff) && (code [1] == 0x15)) {
148                 /* call *<OFFSET>(%rip) */
149                 *(guint32*)(code + 2) = ((guint32)(guint64)target) - 7;
150         }
151         else
152                 x86_patch (code, (unsigned char*)target);
153 }
154
155 typedef enum {
156         ArgInIReg,
157         ArgInFloatSSEReg,
158         ArgInDoubleSSEReg,
159         ArgOnStack,
160         ArgValuetypeInReg,
161         ArgNone /* only in pair_storage */
162 } ArgStorage;
163
164 typedef struct {
165         gint16 offset;
166         gint8  reg;
167         ArgStorage storage;
168
169         /* Only if storage == ArgValuetypeInReg */
170         ArgStorage pair_storage [2];
171         gint8 pair_regs [2];
172 } ArgInfo;
173
174 typedef struct {
175         int nargs;
176         guint32 stack_usage;
177         guint32 reg_usage;
178         guint32 freg_usage;
179         gboolean need_stack_align;
180         ArgInfo ret;
181         ArgInfo sig_cookie;
182         ArgInfo args [1];
183 } CallInfo;
184
185 #define DEBUG(a) if (cfg->verbose_level > 1) a
186
187 #define NEW_ICONST(cfg,dest,val) do {   \
188                 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst));       \
189                 (dest)->opcode = OP_ICONST;     \
190                 (dest)->inst_c0 = (val);        \
191                 (dest)->type = STACK_I4;        \
192         } while (0)
193
194 #define PARAM_REGS 6
195
196 static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
197
198 static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
199
200 static void inline
201 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
202 {
203     ainfo->offset = *stack_size;
204
205     if (*gr >= PARAM_REGS) {
206                 ainfo->storage = ArgOnStack;
207                 (*stack_size) += sizeof (gpointer);
208     }
209     else {
210                 ainfo->storage = ArgInIReg;
211                 ainfo->reg = param_regs [*gr];
212                 (*gr) ++;
213     }
214 }
215
216 #define FLOAT_PARAM_REGS 8
217
218 static void inline
219 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
220 {
221     ainfo->offset = *stack_size;
222
223     if (*gr >= FLOAT_PARAM_REGS) {
224                 ainfo->storage = ArgOnStack;
225                 (*stack_size) += sizeof (gpointer);
226     }
227     else {
228                 /* A double register */
229                 if (is_double)
230                         ainfo->storage = ArgInDoubleSSEReg;
231                 else
232                         ainfo->storage = ArgInFloatSSEReg;
233                 ainfo->reg = *gr;
234                 (*gr) += 1;
235     }
236 }
237
238 typedef enum ArgumentClass {
239         ARG_CLASS_NO_CLASS,
240         ARG_CLASS_MEMORY,
241         ARG_CLASS_INTEGER,
242         ARG_CLASS_SSE
243 } ArgumentClass;
244
245 static ArgumentClass
246 merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
247 {
248         ArgumentClass class2;
249         MonoType *ptype;
250
251         ptype = mono_type_get_underlying_type (type);
252         switch (ptype->type) {
253         case MONO_TYPE_BOOLEAN:
254         case MONO_TYPE_CHAR:
255         case MONO_TYPE_I1:
256         case MONO_TYPE_U1:
257         case MONO_TYPE_I2:
258         case MONO_TYPE_U2:
259         case MONO_TYPE_I4:
260         case MONO_TYPE_U4:
261         case MONO_TYPE_I:
262         case MONO_TYPE_U:
263         case MONO_TYPE_STRING:
264         case MONO_TYPE_OBJECT:
265         case MONO_TYPE_CLASS:
266         case MONO_TYPE_SZARRAY:
267         case MONO_TYPE_PTR:
268         case MONO_TYPE_FNPTR:
269         case MONO_TYPE_ARRAY:
270         case MONO_TYPE_I8:
271         case MONO_TYPE_U8:
272                 class2 = ARG_CLASS_INTEGER;
273                 break;
274         case MONO_TYPE_R4:
275         case MONO_TYPE_R8:
276                 class2 = ARG_CLASS_SSE;
277                 break;
278
279         case MONO_TYPE_TYPEDBYREF:
280                 g_assert_not_reached ();
281
282         case MONO_TYPE_VALUETYPE: {
283                 MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass);
284                 int i;
285
286                 for (i = 0; i < info->num_fields; ++i) {
287                         class2 = class1;
288                         class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
289                 }
290                 break;
291         }
292         default:
293                 g_assert_not_reached ();
294         }
295
296         /* Merge */
297         if (class1 == class2)
298                 ;
299         else if (class1 == ARG_CLASS_NO_CLASS)
300                 class1 = class2;
301         else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY))
302                 class1 = ARG_CLASS_MEMORY;
303         else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER))
304                 class1 = ARG_CLASS_INTEGER;
305         else
306                 class1 = ARG_CLASS_SSE;
307
308         return class1;
309 }
310
311 static void
312 add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
313                gboolean is_return,
314                guint32 *gr, guint32 *fr, guint32 *stack_size)
315 {
316         guint32 size, quad, nquads, i;
317         ArgumentClass args [2];
318         MonoMarshalType *info;
319         MonoClass *klass;
320
321         klass = mono_class_from_mono_type (type);
322         if (sig->pinvoke) 
323                 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
324         else 
325                 size = mono_type_stack_size (&klass->byval_arg, NULL);
326
327         if (!sig->pinvoke || (size == 0) || (size > 16)) {
328                 /* Allways pass in memory */
329                 ainfo->offset = *stack_size;
330                 *stack_size += ALIGN_TO (size, 8);
331                 ainfo->storage = ArgOnStack;
332
333                 return;
334         }
335
336         /* FIXME: Handle structs smaller than 8 bytes */
337         //if ((size % 8) != 0)
338         //      NOT_IMPLEMENTED;
339
340         if (size > 8)
341                 nquads = 2;
342         else
343                 nquads = 1;
344
345         /*
346          * Implement the algorithm from section 3.2.3 of the X86_64 ABI.
347          * The X87 and SSEUP stuff is left out since there are no such types in
348          * the CLR.
349          */
350         info = mono_marshal_load_type_info (klass);
351         g_assert (info);
352         if (info->native_size > 16) {
353                 ainfo->offset = *stack_size;
354                 *stack_size += ALIGN_TO (info->native_size, 8);
355                 ainfo->storage = ArgOnStack;
356
357                 return;
358         }
359
360         for (quad = 0; quad < nquads; ++quad) {
361                 int size, align;
362                 ArgumentClass class1;
363                 
364                 class1 = ARG_CLASS_NO_CLASS;
365                 for (i = 0; i < info->num_fields; ++i) {
366                         size = mono_marshal_type_size (info->fields [i].field->type, 
367                                                                                    info->fields [i].mspec, 
368                                                                                    &align, TRUE, klass->unicode);
369                         if ((info->fields [i].offset < 8) && (info->fields [i].offset + size) > 8) {
370                                 /* Unaligned field */
371                                 NOT_IMPLEMENTED;
372                         }
373
374                         /* Skip fields in other quad */
375                         if ((quad == 0) && (info->fields [i].offset >= 8))
376                                 continue;
377                         if ((quad == 1) && (info->fields [i].offset < 8))
378                                 continue;
379
380                         class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
381                 }
382                 g_assert (class1 != ARG_CLASS_NO_CLASS);
383                 args [quad] = class1;
384         }
385
386         /* Post merger cleanup */
387         if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY))
388                 args [0] = args [1] = ARG_CLASS_MEMORY;
389
390         /* Allocate registers */
391         {
392                 int orig_gr = *gr;
393                 int orig_fr = *fr;
394
395                 ainfo->storage = ArgValuetypeInReg;
396                 ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
397                 for (quad = 0; quad < nquads; ++quad) {
398                         switch (args [quad]) {
399                         case ARG_CLASS_INTEGER:
400                                 if (*gr >= PARAM_REGS)
401                                         args [quad] = ARG_CLASS_MEMORY;
402                                 else {
403                                         ainfo->pair_storage [quad] = ArgInIReg;
404                                         if (is_return)
405                                                 ainfo->pair_regs [quad] = return_regs [*gr];
406                                         else
407                                                 ainfo->pair_regs [quad] = param_regs [*gr];
408                                         (*gr) ++;
409                                 }
410                                 break;
411                         case ARG_CLASS_SSE:
412                                 if (*fr >= FLOAT_PARAM_REGS)
413                                         args [quad] = ARG_CLASS_MEMORY;
414                                 else {
415                                         ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
416                                         ainfo->pair_regs [quad] = *fr;
417                                         (*fr) ++;
418                                 }
419                                 break;
420                         case ARG_CLASS_MEMORY:
421                                 break;
422                         default:
423                                 g_assert_not_reached ();
424                         }
425                 }
426
427                 if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) {
428                         /* Revert possible register assignments */
429                         *gr = orig_gr;
430                         *fr = orig_fr;
431
432                         ainfo->offset = *stack_size;
433                         *stack_size += ALIGN_TO (info->native_size, 8);
434                         ainfo->storage = ArgOnStack;
435                 }
436         }
437 }
438
439 /*
440  * get_call_info:
441  *
442  *  Obtain information about a call according to the calling convention.
443  * For AMD64, see the "System V ABI, x86-64 Architecture Processor Supplement 
444  * Draft Version 0.23" document for more information.
445  */
446 static CallInfo*
447 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
448 {
449         guint32 i, gr, fr;
450         MonoType *ret_type;
451         int n = sig->hasthis + sig->param_count;
452         guint32 stack_size = 0;
453         CallInfo *cinfo;
454
455         cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
456
457         gr = 0;
458         fr = 0;
459
460         /* return value */
461         {
462                 ret_type = mono_type_get_underlying_type (sig->ret);
463                 switch (ret_type->type) {
464                 case MONO_TYPE_BOOLEAN:
465                 case MONO_TYPE_I1:
466                 case MONO_TYPE_U1:
467                 case MONO_TYPE_I2:
468                 case MONO_TYPE_U2:
469                 case MONO_TYPE_CHAR:
470                 case MONO_TYPE_I4:
471                 case MONO_TYPE_U4:
472                 case MONO_TYPE_I:
473                 case MONO_TYPE_U:
474                 case MONO_TYPE_PTR:
475                 case MONO_TYPE_FNPTR:
476                 case MONO_TYPE_CLASS:
477                 case MONO_TYPE_OBJECT:
478                 case MONO_TYPE_SZARRAY:
479                 case MONO_TYPE_ARRAY:
480                 case MONO_TYPE_STRING:
481                         cinfo->ret.storage = ArgInIReg;
482                         cinfo->ret.reg = AMD64_RAX;
483                         break;
484                 case MONO_TYPE_U8:
485                 case MONO_TYPE_I8:
486                         cinfo->ret.storage = ArgInIReg;
487                         cinfo->ret.reg = AMD64_RAX;
488                         break;
489                 case MONO_TYPE_R4:
490                         cinfo->ret.storage = ArgInFloatSSEReg;
491                         cinfo->ret.reg = AMD64_XMM0;
492                         break;
493                 case MONO_TYPE_R8:
494                         cinfo->ret.storage = ArgInDoubleSSEReg;
495                         cinfo->ret.reg = AMD64_XMM0;
496                         break;
497                 case MONO_TYPE_VALUETYPE: {
498                         guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
499
500                         add_valuetype (sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
501                         if (cinfo->ret.storage == ArgOnStack)
502                                 /* The caller passes the address where the value is stored */
503                                 add_general (&gr, &stack_size, &cinfo->ret);
504                         break;
505                 }
506                 case MONO_TYPE_TYPEDBYREF:
507                         /* Same as a valuetype with size 24 */
508                         add_general (&gr, &stack_size, &cinfo->ret);
509                         ;
510                         break;
511                 case MONO_TYPE_VOID:
512                         break;
513                 default:
514                         g_error ("Can't handle as return value 0x%x", sig->ret->type);
515                 }
516         }
517
518         /* this */
519         if (sig->hasthis)
520                 add_general (&gr, &stack_size, cinfo->args + 0);
521
522         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
523                 gr = PARAM_REGS;
524                 fr = FLOAT_PARAM_REGS;
525                 
526                 /* Emit the signature cookie just before the implicit arguments */
527                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
528         }
529
530         for (i = 0; i < sig->param_count; ++i) {
531                 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
532                 MonoType *ptype;
533
534                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
535                         /* We allways pass the sig cookie on the stack for simplicity */
536                         /* 
537                          * Prevent implicit arguments + the sig cookie from being passed 
538                          * in registers.
539                          */
540                         gr = PARAM_REGS;
541                         fr = FLOAT_PARAM_REGS;
542
543                         /* Emit the signature cookie just before the implicit arguments */
544                         add_general (&gr, &stack_size, &cinfo->sig_cookie);
545                 }
546
547                 if (sig->params [i]->byref) {
548                         add_general (&gr, &stack_size, ainfo);
549                         continue;
550                 }
551                 ptype = mono_type_get_underlying_type (sig->params [i]);
552                 switch (ptype->type) {
553                 case MONO_TYPE_BOOLEAN:
554                 case MONO_TYPE_I1:
555                 case MONO_TYPE_U1:
556                         add_general (&gr, &stack_size, ainfo);
557                         break;
558                 case MONO_TYPE_I2:
559                 case MONO_TYPE_U2:
560                 case MONO_TYPE_CHAR:
561                         add_general (&gr, &stack_size, ainfo);
562                         break;
563                 case MONO_TYPE_I4:
564                 case MONO_TYPE_U4:
565                         add_general (&gr, &stack_size, ainfo);
566                         break;
567                 case MONO_TYPE_I:
568                 case MONO_TYPE_U:
569                 case MONO_TYPE_PTR:
570                 case MONO_TYPE_FNPTR:
571                 case MONO_TYPE_CLASS:
572                 case MONO_TYPE_OBJECT:
573                 case MONO_TYPE_STRING:
574                 case MONO_TYPE_SZARRAY:
575                 case MONO_TYPE_ARRAY:
576                         add_general (&gr, &stack_size, ainfo);
577                         break;
578                 case MONO_TYPE_VALUETYPE:
579                         add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
580                         break;
581                 case MONO_TYPE_TYPEDBYREF:
582                         stack_size += sizeof (MonoTypedRef);
583                         ainfo->storage = ArgOnStack;
584                         break;
585                 case MONO_TYPE_U8:
586                 case MONO_TYPE_I8:
587                         add_general (&gr, &stack_size, ainfo);
588                         break;
589                 case MONO_TYPE_R4:
590                         add_float (&fr, &stack_size, ainfo, FALSE);
591                         break;
592                 case MONO_TYPE_R8:
593                         add_float (&fr, &stack_size, ainfo, TRUE);
594                         break;
595                 default:
596                         g_assert_not_reached ();
597                 }
598         }
599
600         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
601                 gr = PARAM_REGS;
602                 fr = FLOAT_PARAM_REGS;
603                 
604                 /* Emit the signature cookie just before the implicit arguments */
605                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
606         }
607
608         if (stack_size & 0x8) {
609                 /* The AMD64 ABI requires each stack frame to be 16 byte aligned */
610                 cinfo->need_stack_align = TRUE;
611                 stack_size += 8;
612         }
613
614         cinfo->stack_usage = stack_size;
615         cinfo->reg_usage = gr;
616         cinfo->freg_usage = fr;
617         return cinfo;
618 }
619
620 /*
621  * mono_arch_get_argument_info:
622  * @csig:  a method signature
623  * @param_count: the number of parameters to consider
624  * @arg_info: an array to store the result infos
625  *
626  * Gathers information on parameters such as size, alignment and
627  * padding. arg_info should be large enought to hold param_count + 1 entries. 
628  *
629  * Returns the size of the argument area on the stack.
630  */
631 int
632 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
633 {
634         int k;
635         CallInfo *cinfo = get_call_info (csig, FALSE);
636         guint32 args_size = cinfo->stack_usage;
637
638         /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
639         if (csig->hasthis) {
640                 arg_info [0].offset = 0;
641         }
642
643         for (k = 0; k < param_count; k++) {
644                 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
645                 /* FIXME: */
646                 arg_info [k + 1].size = 0;
647         }
648
649         g_free (cinfo);
650
651         return args_size;
652 }
653
654 static int 
655 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
656 {
657         return 0;
658 }
659
660 /*
661  * Initialize the cpu to execute managed code.
662  */
663 void
664 mono_arch_cpu_init (void)
665 {
666         guint16 fpcw;
667
668         /* spec compliance requires running with double precision */
669         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
670         fpcw &= ~X86_FPCW_PRECC_MASK;
671         fpcw |= X86_FPCW_PREC_DOUBLE;
672         __asm__  __volatile__ ("fldcw %0\n": : "m" (fpcw));
673         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
674 }
675
676 /*
677  * This function returns the optimizations supported on this cpu.
678  */
679 guint32
680 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
681 {
682         int eax, ebx, ecx, edx;
683         guint32 opts = 0;
684
685         /* FIXME: AMD64 */
686
687         *exclude_mask = 0;
688         /* Feature Flags function, flags returned in EDX. */
689         if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
690                 if (edx & (1 << 15)) {
691                         opts |= MONO_OPT_CMOV;
692                         if (edx & 1)
693                                 opts |= MONO_OPT_FCMOV;
694                         else
695                                 *exclude_mask |= MONO_OPT_FCMOV;
696                 } else
697                         *exclude_mask |= MONO_OPT_CMOV;
698         }
699         return opts;
700 }
701
702 gboolean
703 mono_amd64_is_sse2 (void)
704 {
705         return use_sse2;
706 }
707
708 static gboolean
709 is_regsize_var (MonoType *t) {
710         if (t->byref)
711                 return TRUE;
712         t = mono_type_get_underlying_type (t);
713         switch (t->type) {
714         case MONO_TYPE_I4:
715         case MONO_TYPE_U4:
716         case MONO_TYPE_I:
717         case MONO_TYPE_U:
718         case MONO_TYPE_PTR:
719         case MONO_TYPE_FNPTR:
720                 return TRUE;
721         case MONO_TYPE_OBJECT:
722         case MONO_TYPE_STRING:
723         case MONO_TYPE_CLASS:
724         case MONO_TYPE_SZARRAY:
725         case MONO_TYPE_ARRAY:
726                 return TRUE;
727         case MONO_TYPE_VALUETYPE:
728                 return FALSE;
729         }
730         return FALSE;
731 }
732
733 GList *
734 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
735 {
736         GList *vars = NULL;
737         int i;
738
739         for (i = 0; i < cfg->num_varinfo; i++) {
740                 MonoInst *ins = cfg->varinfo [i];
741                 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
742
743                 /* unused vars */
744                 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
745                         continue;
746
747                 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || 
748                     (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
749                         continue;
750
751                 /* we dont allocate I1 to registers because there is no simply way to sign extend 
752                  * 8bit quantities in caller saved registers on x86 */
753                 if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) || 
754                     (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
755                     (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
756                         g_assert (MONO_VARINFO (cfg, i)->reg == -1);
757                         g_assert (i == vmv->idx);
758                         vars = g_list_prepend (vars, vmv);
759                 }
760         }
761
762         vars = mono_varlist_sort (cfg, vars, 0);
763
764         return vars;
765 }
766
767 GList *
768 mono_arch_get_global_int_regs (MonoCompile *cfg)
769 {
770         GList *regs = NULL;
771
772         /* We use the callee saved registers for global allocation */
773         regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
774         regs = g_list_prepend (regs, (gpointer)AMD64_R12);
775         regs = g_list_prepend (regs, (gpointer)AMD64_R13);
776         regs = g_list_prepend (regs, (gpointer)AMD64_R14);
777         regs = g_list_prepend (regs, (gpointer)AMD64_R15);
778
779         return regs;
780 }
781
782 /*
783  * mono_arch_regalloc_cost:
784  *
785  *  Return the cost, in number of memory references, of the action of 
786  * allocating the variable VMV into a register during global register
787  * allocation.
788  */
789 guint32
790 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
791 {
792         MonoInst *ins = cfg->varinfo [vmv->idx];
793
794         if (cfg->method->save_lmf)
795                 /* The register is already saved */
796                 /* substract 1 for the invisible store in the prolog */
797                 return (ins->opcode == OP_ARG) ? 0 : 1;
798         else
799                 /* push+pop */
800                 return (ins->opcode == OP_ARG) ? 1 : 2;
801 }
802  
803 void
804 mono_arch_allocate_vars (MonoCompile *m)
805 {
806         MonoMethodSignature *sig;
807         MonoMethodHeader *header;
808         MonoInst *inst;
809         int i, offset;
810         guint32 locals_stack_size, locals_stack_align;
811         gint32 *offsets;
812         CallInfo *cinfo;
813
814         header = mono_method_get_header (m->method);
815
816         sig = mono_method_signature (m->method);
817
818         cinfo = get_call_info (sig, FALSE);
819
820         /*
821          * We use the ABI calling conventions for managed code as well.
822          * Exception: valuetypes are never passed or returned in registers.
823          */
824
825         /* Locals are allocated backwards from %fp */
826         m->frame_reg = AMD64_RBP;
827         offset = 0;
828
829         /* Reserve space for caller saved registers */
830         for (i = 0; i < AMD64_NREG; ++i)
831                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (m->used_int_regs & (1 << i))) {
832                         offset += sizeof (gpointer);
833                 }
834
835         if (m->method->save_lmf) {
836                 /* Reserve stack space for saving LMF + argument regs */
837                 offset += sizeof (MonoLMF);
838                 if (lmf_tls_offset == -1)
839                         /* Need to save argument regs too */
840                         offset += (AMD64_NREG * 8) + (8 * 8);
841                 m->arch.lmf_offset = offset;
842         }
843
844         if (sig->ret->type != MONO_TYPE_VOID) {
845                 switch (cinfo->ret.storage) {
846                 case ArgInIReg:
847                 case ArgInFloatSSEReg:
848                 case ArgInDoubleSSEReg:
849                         if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
850                                 /* The register is volatile */
851                                 m->ret->opcode = OP_REGOFFSET;
852                                 m->ret->inst_basereg = AMD64_RBP;
853                                 offset += 8;
854                                 m->ret->inst_offset = - offset;
855                         }
856                         else {
857                                 m->ret->opcode = OP_REGVAR;
858                                 m->ret->inst_c0 = cinfo->ret.reg;
859                         }
860                         break;
861                 case ArgValuetypeInReg:
862                         /* Allocate a local to hold the result, the epilog will copy it to the correct place */
863                         offset += 16;
864                         m->ret->opcode = OP_REGOFFSET;
865                         m->ret->inst_basereg = AMD64_RBP;
866                         m->ret->inst_offset = - offset;
867                         break;
868                 default:
869                         g_assert_not_reached ();
870                 }
871                 m->ret->dreg = m->ret->inst_c0;
872         }
873
874         /* Allocate locals */
875         offsets = mono_allocate_stack_slots (m, &locals_stack_size, &locals_stack_align);
876         if (locals_stack_align) {
877                 offset += (locals_stack_align - 1);
878                 offset &= ~(locals_stack_align - 1);
879         }
880         for (i = m->locals_start; i < m->num_varinfo; i++) {
881                 if (offsets [i] != -1) {
882                         MonoInst *inst = m->varinfo [i];
883                         inst->opcode = OP_REGOFFSET;
884                         inst->inst_basereg = AMD64_RBP;
885                         inst->inst_offset = - (offset + offsets [i]);
886                         //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
887                 }
888         }
889         g_free (offsets);
890         offset += locals_stack_size;
891
892         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
893                 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
894                 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
895         }
896
897         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
898                 inst = m->varinfo [i];
899                 if (inst->opcode != OP_REGVAR) {
900                         ArgInfo *ainfo = &cinfo->args [i];
901                         gboolean inreg = TRUE;
902                         MonoType *arg_type;
903
904                         if (sig->hasthis && (i == 0))
905                                 arg_type = &mono_defaults.object_class->byval_arg;
906                         else
907                                 arg_type = sig->params [i - sig->hasthis];
908
909                         /* FIXME: Allocate volatile arguments to registers */
910                         if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
911                                 inreg = FALSE;
912
913                         /* 
914                          * Under AMD64, all registers used to pass arguments to functions
915                          * are volatile across calls.
916                          * FIXME: Optimize this.
917                          */
918                         if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg))
919                                 inreg = FALSE;
920
921                         inst->opcode = OP_REGOFFSET;
922
923                         switch (ainfo->storage) {
924                         case ArgInIReg:
925                         case ArgInFloatSSEReg:
926                         case ArgInDoubleSSEReg:
927                                 inst->opcode = OP_REGVAR;
928                                 inst->dreg = ainfo->reg;
929                                 break;
930                         case ArgOnStack:
931                                 inst->opcode = OP_REGOFFSET;
932                                 inst->inst_basereg = AMD64_RBP;
933                                 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
934                                 break;
935                         case ArgValuetypeInReg:
936                                 break;
937                         default:
938                                 NOT_IMPLEMENTED;
939                         }
940
941                         if (!inreg && (ainfo->storage != ArgOnStack)) {
942                                 inst->opcode = OP_REGOFFSET;
943                                 inst->inst_basereg = AMD64_RBP;
944                                 /* These arguments are saved to the stack in the prolog */
945                                 if (ainfo->storage == ArgValuetypeInReg)
946                                         offset += 2 * sizeof (gpointer);
947                                 else
948                                         offset += sizeof (gpointer);
949                                 inst->inst_offset = - offset;
950                         }
951                 }
952         }
953
954         m->stack_offset = offset;
955
956         g_free (cinfo);
957 }
958
959 void
960 mono_arch_create_vars (MonoCompile *cfg)
961 {
962         MonoMethodSignature *sig;
963         CallInfo *cinfo;
964
965         sig = mono_method_signature (cfg->method);
966
967         cinfo = get_call_info (sig, FALSE);
968
969         if (cinfo->ret.storage == ArgValuetypeInReg)
970                 cfg->ret_var_is_local = TRUE;
971
972         g_free (cinfo);
973 }
974
975 static void
976 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, MonoInst *arg, ArgStorage storage, int reg, MonoInst *tree)
977 {
978         switch (storage) {
979         case ArgInIReg:
980                 arg->opcode = OP_OUTARG_REG;
981                 arg->inst_left = tree;
982                 arg->inst_right = (MonoInst*)call;
983                 arg->unused = reg;
984                 call->used_iregs |= 1 << reg;
985                 break;
986         case ArgInFloatSSEReg:
987                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R4;
988                 arg->inst_left = tree;
989                 arg->inst_right = (MonoInst*)call;
990                 arg->unused = reg;
991                 call->used_fregs |= 1 << reg;
992                 break;
993         case ArgInDoubleSSEReg:
994                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R8;
995                 arg->inst_left = tree;
996                 arg->inst_right = (MonoInst*)call;
997                 arg->unused = reg;
998                 call->used_fregs |= 1 << reg;
999                 break;
1000         default:
1001                 g_assert_not_reached ();
1002         }
1003 }
1004
1005 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1006  * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info 
1007  */
1008
1009 static int
1010 arg_storage_to_ldind (ArgStorage storage)
1011 {
1012         switch (storage) {
1013         case ArgInIReg:
1014                 return CEE_LDIND_I;
1015         case ArgInDoubleSSEReg:
1016                 return CEE_LDIND_R8;
1017         case ArgInFloatSSEReg:
1018                 return CEE_LDIND_R4;
1019         default:
1020                 g_assert_not_reached ();
1021         }
1022
1023         return -1;
1024 }
1025
1026 /* 
1027  * take the arguments and generate the arch-specific
1028  * instructions to properly call the function in call.
1029  * This includes pushing, moving arguments to the right register
1030  * etc.
1031  * Issue: who does the spilling if needed, and when?
1032  */
1033 MonoCallInst*
1034 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1035         MonoInst *arg, *in;
1036         MonoMethodSignature *sig;
1037         int i, n, stack_size;
1038         CallInfo *cinfo;
1039         ArgInfo *ainfo;
1040
1041         stack_size = 0;
1042
1043         sig = call->signature;
1044         n = sig->param_count + sig->hasthis;
1045
1046         cinfo = get_call_info (sig, sig->pinvoke);
1047
1048         for (i = 0; i < n; ++i) {
1049                 ainfo = cinfo->args + i;
1050
1051                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1052                         MonoMethodSignature *tmp_sig;
1053                         
1054                         /* Emit the signature cookie just before the implicit arguments */
1055                         MonoInst *sig_arg;
1056                         /* FIXME: Add support for signature tokens to AOT */
1057                         cfg->disable_aot = TRUE;
1058
1059                         g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1060
1061                         /*
1062                          * mono_ArgIterator_Setup assumes the signature cookie is 
1063                          * passed first and all the arguments which were before it are
1064                          * passed on the stack after the signature. So compensate by 
1065                          * passing a different signature.
1066                          */
1067                         tmp_sig = mono_metadata_signature_dup (call->signature);
1068                         tmp_sig->param_count -= call->signature->sentinelpos;
1069                         tmp_sig->sentinelpos = 0;
1070                         memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1071
1072                         MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1073                         sig_arg->inst_p0 = tmp_sig;
1074
1075                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1076                         arg->inst_left = sig_arg;
1077                         arg->type = STACK_PTR;
1078
1079                         /* prepend, so they get reversed */
1080                         arg->next = call->out_args;
1081                         call->out_args = arg;
1082                 }
1083
1084                 if (is_virtual && i == 0) {
1085                         /* the argument will be attached to the call instruction */
1086                         in = call->args [i];
1087                 } else {
1088                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1089                         in = call->args [i];
1090                         arg->cil_code = in->cil_code;
1091                         arg->inst_left = in;
1092                         arg->type = in->type;
1093                         /* prepend, so they get reversed */
1094                         arg->next = call->out_args;
1095                         call->out_args = arg;
1096
1097                         if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1098                                 gint align;
1099                                 guint32 size;
1100
1101                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1102                                         size = sizeof (MonoTypedRef);
1103                                         align = sizeof (gpointer);
1104                                 }
1105                                 else
1106                                 if (sig->pinvoke)
1107                                         size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1108                                 else
1109                                         size = mono_type_stack_size (&in->klass->byval_arg, &align);
1110                                 if (ainfo->storage == ArgValuetypeInReg) {
1111                                         if (ainfo->pair_storage [1] == ArgNone) {
1112                                                 MonoInst *load;
1113
1114                                                 /* Simpler case */
1115
1116                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1117                                                 load->inst_left = in;
1118
1119                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1120                                         }
1121                                         else {
1122                                                 /* Trees can't be shared so make a copy */
1123                                                 MonoInst *vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1124                                                 MonoInst *load, *load2, *offset_ins;
1125
1126                                                 /* Reg1 */
1127                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1128                                                 load->ssa_op = MONO_SSA_LOAD;
1129                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1130
1131                                                 NEW_ICONST (cfg, offset_ins, 0);
1132                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1133                                                 load2->inst_left = load;
1134                                                 load2->inst_right = offset_ins;
1135
1136                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1137                                                 load->inst_left = load2;
1138
1139                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1140
1141                                                 /* Reg2 */
1142                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1143                                                 load->ssa_op = MONO_SSA_LOAD;
1144                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1145
1146                                                 NEW_ICONST (cfg, offset_ins, 8);
1147                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1148                                                 load2->inst_left = load;
1149                                                 load2->inst_right = offset_ins;
1150
1151                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [1]));
1152                                                 load->inst_left = load2;
1153
1154                                                 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1155                                                 arg->cil_code = in->cil_code;
1156                                                 arg->type = in->type;
1157                                                 /* prepend, so they get reversed */
1158                                                 arg->next = call->out_args;
1159                                                 call->out_args = arg;
1160
1161                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [1], ainfo->pair_regs [1], load);
1162
1163                                                 /* Prepend a copy inst */
1164                                                 MONO_INST_NEW (cfg, arg, CEE_STIND_I);
1165                                                 arg->cil_code = in->cil_code;
1166                                                 arg->ssa_op = MONO_SSA_STORE;
1167                                                 arg->inst_left = vtaddr;
1168                                                 arg->inst_right = in;
1169                                                 arg->type = in->type;
1170
1171                                                 /* prepend, so they get reversed */
1172                                                 arg->next = call->out_args;
1173                                                 call->out_args = arg;
1174                                         }
1175                                 }
1176                                 else {
1177                                         arg->opcode = OP_OUTARG_VT;
1178                                         arg->klass = in->klass;
1179                                         arg->unused = sig->pinvoke;
1180                                         arg->inst_imm = size;
1181                                 }
1182                         }
1183                         else {
1184                                 switch (ainfo->storage) {
1185                                 case ArgInIReg:
1186                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1187                                         break;
1188                                 case ArgInFloatSSEReg:
1189                                 case ArgInDoubleSSEReg:
1190                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1191                                         break;
1192                                 case ArgOnStack:
1193                                         arg->opcode = OP_OUTARG;
1194                                         if (!sig->params [i - sig->hasthis]->byref) {
1195                                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4)
1196                                                         arg->opcode = OP_OUTARG_R4;
1197                                                 else
1198                                                         if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)
1199                                                                 arg->opcode = OP_OUTARG_R8;
1200                                         }
1201                                         break;
1202                                 default:
1203                                         g_assert_not_reached ();
1204                                 }
1205                         }
1206                 }
1207         }
1208
1209         if (cinfo->need_stack_align) {
1210                 MONO_INST_NEW (cfg, arg, OP_AMD64_OUTARG_ALIGN_STACK);
1211                 /* prepend, so they get reversed */
1212                 arg->next = call->out_args;
1213                 call->out_args = arg;
1214         }
1215
1216         call->stack_usage = cinfo->stack_usage;
1217         cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1218         cfg->flags |= MONO_CFG_HAS_CALLS;
1219
1220         g_free (cinfo);
1221
1222         return call;
1223 }
1224
1225 #define EMIT_COND_BRANCH(ins,cond,sign) \
1226 if (ins->flags & MONO_INST_BRLABEL) { \
1227         if (ins->inst_i0->inst_c0) { \
1228                 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
1229         } else { \
1230                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1231                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1232                     x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
1233                         x86_branch8 (code, cond, 0, sign); \
1234                 else \
1235                         x86_branch32 (code, cond, 0, sign); \
1236         } \
1237 } else { \
1238         if (ins->inst_true_bb->native_offset) { \
1239                 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1240         } else { \
1241                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1242                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1243                     x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1244                         x86_branch8 (code, cond, 0, sign); \
1245                 else \
1246                         x86_branch32 (code, cond, 0, sign); \
1247         } \
1248 }
1249
1250 /* emit an exception if condition is fail */
1251 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name)            \
1252         do {                                                        \
1253                 mono_add_patch_info (cfg, code - cfg->native_code,   \
1254                                     MONO_PATCH_INFO_EXC, exc_name);  \
1255                 x86_branch32 (code, cond, 0, signed);               \
1256         } while (0); 
1257
1258 #define EMIT_FPCOMPARE(code) do { \
1259         amd64_fcompp (code); \
1260         amd64_fnstsw (code); \
1261 } while (0); 
1262
1263 #define EMIT_SSE2_FPFUNC(code, op, dreg, sreg1) do { \
1264     amd64_movsd_membase_reg (code, AMD64_RSP, -8, (sreg1)); \
1265         amd64_fld_membase (code, AMD64_RSP, -8, TRUE); \
1266         amd64_ ##op (code); \
1267         amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE); \
1268         amd64_movsd_reg_membase (code, (dreg), AMD64_RSP, -8); \
1269 } while (0);
1270
1271 static guint8*
1272 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
1273 {
1274         mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
1275
1276         if (mono_compile_aot) {
1277                 amd64_call_membase (code, AMD64_RIP, 0);
1278         }
1279         else {
1280                 gboolean near_call = FALSE;
1281
1282                 /*
1283                  * Indirect calls are expensive so try to make a near call if possible.
1284                  * The caller memory is allocated by the code manager so it is 
1285                  * guaranteed to be at a 32 bit offset.
1286                  */
1287
1288                 if (patch_type != MONO_PATCH_INFO_ABS) {
1289                         /* The target is in memory allocated using the code manager */
1290                         near_call = TRUE;
1291
1292                         if ((patch_type == MONO_PATCH_INFO_METHOD) || (patch_type == MONO_PATCH_INFO_METHOD_JUMP)) {
1293                                 if (((MonoMethod*)data)->klass->image->assembly->aot_module)
1294                                         /* The callee might be an AOT method */
1295                                         near_call = FALSE;
1296                         }
1297                 }
1298                 else {
1299                         if (mono_find_class_init_trampoline_by_addr (data))
1300                                 near_call = TRUE;
1301                         else {
1302                                 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (data);
1303                                 if (info) {
1304                                         if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && 
1305                                                 strstr (cfg->method->name, info->name)) {
1306                                                 /* A call to the wrapped function */
1307                                                 if ((((guint64)data) >> 32) == 0)
1308                                                         near_call = TRUE;
1309                                         }
1310                                         else
1311                                                 near_call = TRUE;
1312                                 }
1313                                 else if ((((guint64)data) >> 32) == 0)
1314                                         near_call = TRUE;
1315                         }
1316                 }
1317
1318                 if (near_call) {
1319                         amd64_call_code (code, 0);
1320                 }
1321                 else {
1322                         amd64_set_reg_template (code, GP_SCRATCH_REG);
1323                         amd64_call_reg (code, GP_SCRATCH_REG);
1324                 }
1325         }
1326
1327         return code;
1328 }
1329
1330 /* FIXME: Add more instructions */
1331 #define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG) || ((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_SETREG) || ((ins)->opcode == OP_ICONST) || ((ins)->opcode == OP_I8CONST) || ((ins)->opcode == OP_LOAD_MEMBASE))
1332
1333 static void
1334 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1335 {
1336         MonoInst *ins, *last_ins = NULL;
1337         ins = bb->code;
1338
1339         while (ins) {
1340
1341                 switch (ins->opcode) {
1342                 case OP_ICONST:
1343                 case OP_I8CONST:
1344                         /* reg = 0 -> XOR (reg, reg) */
1345                         /* XOR sets cflags on x86, so we cant do it always */
1346                         if (ins->inst_c0 == 0 && (ins->next && INST_IGNORES_CFLAGS (ins->next))) {
1347                                 ins->opcode = CEE_XOR;
1348                                 ins->sreg1 = ins->dreg;
1349                                 ins->sreg2 = ins->dreg;
1350                         }
1351                         break;
1352                 case OP_MUL_IMM: 
1353                         /* remove unnecessary multiplication with 1 */
1354                         if (ins->inst_imm == 1) {
1355                                 if (ins->dreg != ins->sreg1) {
1356                                         ins->opcode = OP_MOVE;
1357                                 } else {
1358                                         last_ins->next = ins->next;
1359                                         ins = ins->next;
1360                                         continue;
1361                                 }
1362                         }
1363                         break;
1364                 case OP_COMPARE_IMM:
1365                         /* OP_COMPARE_IMM (reg, 0) 
1366                          * --> 
1367                          * OP_AMD64_TEST_NULL (reg) 
1368                          */
1369                         if (!ins->inst_imm)
1370                                 ins->opcode = OP_AMD64_TEST_NULL;
1371                         break;
1372                 case OP_ICOMPARE_IMM:
1373                         if (!ins->inst_imm)
1374                                 ins->opcode = OP_X86_TEST_NULL;
1375                         break;
1376                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
1377                         /* 
1378                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1379                          * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1380                          * -->
1381                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1382                          * OP_COMPARE_IMM reg, imm
1383                          *
1384                          * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1385                          */
1386                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1387                             ins->inst_basereg == last_ins->inst_destbasereg &&
1388                             ins->inst_offset == last_ins->inst_offset) {
1389                                         ins->opcode = OP_ICOMPARE_IMM;
1390                                         ins->sreg1 = last_ins->sreg1;
1391
1392                                         /* check if we can remove cmp reg,0 with test null */
1393                                         if (!ins->inst_imm)
1394                                                 ins->opcode = OP_X86_TEST_NULL;
1395                                 }
1396
1397                         break;
1398                 case OP_LOAD_MEMBASE:
1399                 case OP_LOADI4_MEMBASE:
1400                         /* 
1401                          * Note: if reg1 = reg2 the load op is removed
1402                          *
1403                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1404                          * OP_LOAD_MEMBASE offset(basereg), reg2
1405                          * -->
1406                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1407                          * OP_MOVE reg1, reg2
1408                          */
1409                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG 
1410                                          || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1411                             ins->inst_basereg == last_ins->inst_destbasereg &&
1412                             ins->inst_offset == last_ins->inst_offset) {
1413                                 if (ins->dreg == last_ins->sreg1) {
1414                                         last_ins->next = ins->next;                             
1415                                         ins = ins->next;                                
1416                                         continue;
1417                                 } else {
1418                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1419                                         ins->opcode = OP_MOVE;
1420                                         ins->sreg1 = last_ins->sreg1;
1421                                 }
1422
1423                         /* 
1424                          * Note: reg1 must be different from the basereg in the second load
1425                          * Note: if reg1 = reg2 is equal then second load is removed
1426                          *
1427                          * OP_LOAD_MEMBASE offset(basereg), reg1
1428                          * OP_LOAD_MEMBASE offset(basereg), reg2
1429                          * -->
1430                          * OP_LOAD_MEMBASE offset(basereg), reg1
1431                          * OP_MOVE reg1, reg2
1432                          */
1433                         } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1434                                            || last_ins->opcode == OP_LOAD_MEMBASE) &&
1435                               ins->inst_basereg != last_ins->dreg &&
1436                               ins->inst_basereg == last_ins->inst_basereg &&
1437                               ins->inst_offset == last_ins->inst_offset) {
1438
1439                                 if (ins->dreg == last_ins->dreg) {
1440                                         last_ins->next = ins->next;                             
1441                                         ins = ins->next;                                
1442                                         continue;
1443                                 } else {
1444                                         ins->opcode = OP_MOVE;
1445                                         ins->sreg1 = last_ins->dreg;
1446                                 }
1447
1448                                 //g_assert_not_reached ();
1449
1450 #if 0
1451                         /* 
1452                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1453                          * OP_LOAD_MEMBASE offset(basereg), reg
1454                          * -->
1455                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1456                          * OP_ICONST reg, imm
1457                          */
1458                         } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1459                                                 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1460                                    ins->inst_basereg == last_ins->inst_destbasereg &&
1461                                    ins->inst_offset == last_ins->inst_offset) {
1462                                 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1463                                 ins->opcode = OP_ICONST;
1464                                 ins->inst_c0 = last_ins->inst_imm;
1465                                 g_assert_not_reached (); // check this rule
1466 #endif
1467                         }
1468                         break;
1469                 case OP_LOADU1_MEMBASE:
1470                 case OP_LOADI1_MEMBASE:
1471                         /* 
1472                          * Note: if reg1 = reg2 the load op is removed
1473                          *
1474                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1475                          * OP_LOAD_MEMBASE offset(basereg), reg2
1476                          * -->
1477                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1478                          * OP_MOVE reg1, reg2
1479                          */
1480                         if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1481                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1482                                         ins->inst_offset == last_ins->inst_offset) {
1483                                 if (ins->dreg == last_ins->sreg1) {
1484                                         last_ins->next = ins->next;                             
1485                                         ins = ins->next;                                
1486                                         continue;
1487                                 } else {
1488                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1489                                         ins->opcode = OP_MOVE;
1490                                         ins->sreg1 = last_ins->sreg1;
1491                                 }
1492                         }
1493                         break;
1494                 case OP_LOADU2_MEMBASE:
1495                 case OP_LOADI2_MEMBASE:
1496                         /* 
1497                          * Note: if reg1 = reg2 the load op is removed
1498                          *
1499                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1500                          * OP_LOAD_MEMBASE offset(basereg), reg2
1501                          * -->
1502                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1503                          * OP_MOVE reg1, reg2
1504                          */
1505                         if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1506                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1507                                         ins->inst_offset == last_ins->inst_offset) {
1508                                 if (ins->dreg == last_ins->sreg1) {
1509                                         last_ins->next = ins->next;                             
1510                                         ins = ins->next;                                
1511                                         continue;
1512                                 } else {
1513                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1514                                         ins->opcode = OP_MOVE;
1515                                         ins->sreg1 = last_ins->sreg1;
1516                                 }
1517                         }
1518                         break;
1519                 case CEE_CONV_I4:
1520                 case CEE_CONV_U4:
1521                 case OP_MOVE:
1522                         /*
1523                          * Removes:
1524                          *
1525                          * OP_MOVE reg, reg 
1526                          */
1527                         if (ins->dreg == ins->sreg1) {
1528                                 if (last_ins)
1529                                         last_ins->next = ins->next;                             
1530                                 ins = ins->next;
1531                                 continue;
1532                         }
1533                         /* 
1534                          * Removes:
1535                          *
1536                          * OP_MOVE sreg, dreg 
1537                          * OP_MOVE dreg, sreg
1538                          */
1539                         if (last_ins && last_ins->opcode == OP_MOVE &&
1540                             ins->sreg1 == last_ins->dreg &&
1541                             ins->dreg == last_ins->sreg1) {
1542                                 last_ins->next = ins->next;                             
1543                                 ins = ins->next;                                
1544                                 continue;
1545                         }
1546                         break;
1547                 }
1548                 last_ins = ins;
1549                 ins = ins->next;
1550         }
1551         bb->last_ins = last_ins;
1552 }
1553
1554 static const int 
1555 branch_cc_table [] = {
1556         X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1557         X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1558         X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1559 };
1560
1561 static int
1562 opcode_to_x86_cond (int opcode)
1563 {
1564         switch (opcode) {
1565         case OP_IBEQ:
1566                 return X86_CC_EQ;
1567         case OP_IBNE_UN:
1568                 return X86_CC_NE;
1569         case OP_IBLT:
1570                 return X86_CC_LT;
1571         case OP_IBLT_UN:
1572                 return X86_CC_LT;
1573         case OP_IBGT:
1574                 return X86_CC_GT;
1575         case OP_IBGT_UN:
1576                 return X86_CC_GT;
1577         case OP_IBGE:
1578                 return X86_CC_GE;
1579         case OP_IBGE_UN:
1580                 return X86_CC_GE;
1581         case OP_IBLE:
1582                 return X86_CC_LE;
1583         case OP_IBLE_UN:
1584                 return X86_CC_LE;
1585         case OP_COND_EXC_IOV:
1586                 return X86_CC_O;
1587         case OP_COND_EXC_IC:
1588                 return X86_CC_C;
1589         default:
1590                 g_assert_not_reached ();
1591         }
1592
1593         return -1;
1594 }
1595
1596 /*
1597  * returns the offset used by spillvar. It allocates a new
1598  * spill variable if necessary. 
1599  */
1600 static int
1601 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1602 {
1603         MonoSpillInfo **si, *info;
1604         int i = 0;
1605
1606         si = &cfg->spill_info; 
1607         
1608         while (i <= spillvar) {
1609
1610                 if (!*si) {
1611                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1612                         info->next = NULL;
1613                         cfg->stack_offset += sizeof (gpointer);
1614                         info->offset = - cfg->stack_offset;
1615                 }
1616
1617                 if (i == spillvar)
1618                         return (*si)->offset;
1619
1620                 i++;
1621                 si = &(*si)->next;
1622         }
1623
1624         g_assert_not_reached ();
1625         return 0;
1626 }
1627
1628 /*
1629  * returns the offset used by spillvar. It allocates a new
1630  * spill float variable if necessary. 
1631  * (same as mono_spillvar_offset but for float)
1632  */
1633 static int
1634 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1635 {
1636         MonoSpillInfo **si, *info;
1637         int i = 0;
1638
1639         si = &cfg->spill_info_float; 
1640         
1641         while (i <= spillvar) {
1642
1643                 if (!*si) {
1644                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1645                         info->next = NULL;
1646                         cfg->stack_offset += sizeof (double);
1647                         info->offset = - cfg->stack_offset;
1648                 }
1649
1650                 if (i == spillvar)
1651                         return (*si)->offset;
1652
1653                 i++;
1654                 si = &(*si)->next;
1655         }
1656
1657         g_assert_not_reached ();
1658         return 0;
1659 }
1660
1661 /*
1662  * Creates a store for spilled floating point items
1663  */
1664 static MonoInst*
1665 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1666 {
1667         MonoInst *store;
1668         MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
1669         store->sreg1 = reg;
1670         store->inst_destbasereg = AMD64_RBP;
1671         store->inst_offset = mono_spillvar_offset_float (cfg, spill);
1672
1673         DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)store->inst_offset, reg));
1674         return store;
1675 }
1676
1677 /*
1678  * Creates a load for spilled floating point items 
1679  */
1680 static MonoInst*
1681 create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1682 {
1683         MonoInst *load;
1684         MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
1685         load->dreg = reg;
1686         load->inst_basereg = AMD64_RBP;
1687         load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1688
1689         DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)load->inst_offset, reg));
1690         return load;
1691 }
1692
1693 #define is_global_ireg(r) ((r) >= 0 && (r) <= 15 && AMD64_IS_CALLEE_SAVED_REG ((r)))
1694 #define ireg_is_freeable(r) ((r) >= 0 && (r) <= 15 && AMD64_IS_CALLEE_REG ((r)))
1695 #define freg_is_freeable(r) ((r) >= 0 && (r) <= AMD64_XMM_NREG)
1696
1697 #define reg_is_freeable(r,fp) ((fp) ? freg_is_freeable ((r)) : ireg_is_freeable ((r)))
1698 #define reg_is_hard(r,fp) ((fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
1699 #define reg_is_soft(r,fp) (!reg_is_hard((r),(fp)))
1700 #define rassign(cfg,reg,fp) ((fp) ? (cfg)->rs->fassign [(reg)] : (cfg)->rs->iassign [(reg)])
1701 #define sreg1_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC1] == 'f')
1702 #define sreg2_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC2] == 'f')
1703 #define dreg_is_fp(ins)  (ins_spec [(ins)->opcode] [MONO_INST_DEST] == 'f')
1704
1705 typedef struct {
1706         int born_in;
1707         int killed_in;
1708         int last_use;
1709         int prev_use;
1710         int flags;              /* used to track fp spill/load */
1711 } RegTrack;
1712
1713 static const char*const * ins_spec = amd64_desc;
1714
1715 static void
1716 print_ins (int i, MonoInst *ins)
1717 {
1718         const char *spec = ins_spec [ins->opcode];
1719         g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1720         if (!spec)
1721                 g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
1722         if (spec [MONO_INST_DEST]) {
1723                 gboolean fp = (spec [MONO_INST_DEST] == 'f');
1724                 if (reg_is_soft (ins->dreg, fp))
1725                         g_print (" R%d <-", ins->dreg);
1726                 else
1727                         g_print (" %s <-", mono_amd64_regname (ins->dreg, fp));
1728         }
1729         if (spec [MONO_INST_SRC1]) {
1730                 gboolean fp = (spec [MONO_INST_SRC1] == 'f');
1731                 if (reg_is_soft (ins->sreg1, fp))
1732                         g_print (" R%d", ins->sreg1);
1733                 else
1734                         g_print (" %s", mono_amd64_regname (ins->sreg1, fp));
1735         }
1736         if (spec [MONO_INST_SRC2]) {
1737                 gboolean fp = (spec [MONO_INST_SRC2] == 'f');
1738                 if (reg_is_soft (ins->sreg2, fp))
1739                         g_print (" R%d", ins->sreg2);
1740                 else
1741                         g_print (" %s", mono_amd64_regname (ins->sreg2, fp));
1742         }
1743         if (spec [MONO_INST_CLOB])
1744                 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1745         g_print ("\n");
1746 }
1747
1748 static void
1749 print_regtrack (RegTrack *t, int num)
1750 {
1751         int i;
1752         char buf [32];
1753         const char *r;
1754         
1755         for (i = 0; i < num; ++i) {
1756                 if (!t [i].born_in)
1757                         continue;
1758                 if (i >= MONO_MAX_IREGS) {
1759                         g_snprintf (buf, sizeof(buf), "R%d", i);
1760                         r = buf;
1761                 } else
1762                         r = mono_arch_regname (i);
1763                 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1764         }
1765 }
1766
1767 typedef struct InstList InstList;
1768
1769 struct InstList {
1770         InstList *prev;
1771         InstList *next;
1772         MonoInst *data;
1773 };
1774
1775 static inline InstList*
1776 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1777 {
1778         InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1779         item->data = data;
1780         item->prev = NULL;
1781         item->next = list;
1782         if (list)
1783                 list->prev = item;
1784         return item;
1785 }
1786
1787 /*
1788  * Force the spilling of the variable in the symbolic register 'reg'.
1789  */
1790 static int
1791 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg, gboolean fp)
1792 {
1793         MonoInst *load;
1794         int i, sel, spill;
1795         int *assign, *symbolic;
1796
1797         if (fp) {
1798                 assign = cfg->rs->fassign;
1799                 symbolic = cfg->rs->fsymbolic;
1800         }
1801         else {
1802                 assign = cfg->rs->iassign;
1803                 symbolic = cfg->rs->isymbolic;
1804         }       
1805         
1806         sel = assign [reg];
1807         /*i = cfg->rs->isymbolic [sel];
1808         g_assert (i == reg);*/
1809         i = reg;
1810         spill = ++cfg->spill_count;
1811         assign [i] = -spill - 1;
1812         if (fp)
1813                 mono_regstate_free_float (cfg->rs, sel);
1814         else
1815                 mono_regstate_free_int (cfg->rs, sel);
1816         /* we need to create a spill var and insert a load to sel after the current instruction */
1817         if (fp)
1818                 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1819         else
1820                 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1821         load->dreg = sel;
1822         load->inst_basereg = AMD64_RBP;
1823         load->inst_offset = mono_spillvar_offset (cfg, spill);
1824         if (item->prev) {
1825                 while (ins->next != item->prev->data)
1826                         ins = ins->next;
1827         }
1828         load->next = ins->next;
1829         ins->next = load;
1830         DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1831         if (fp)
1832                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1833         else
1834                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1835         g_assert (i == sel);
1836
1837         return sel;
1838 }
1839
1840 static int
1841 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg, gboolean fp)
1842 {
1843         MonoInst *load;
1844         int i, sel, spill;
1845         int *assign, *symbolic;
1846
1847         if (fp) {
1848                 assign = cfg->rs->fassign;
1849                 symbolic = cfg->rs->fsymbolic;
1850         }
1851         else {
1852                 assign = cfg->rs->iassign;
1853                 symbolic = cfg->rs->isymbolic;
1854         }
1855
1856         DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1857         /* exclude the registers in the current instruction */
1858         if ((sreg1_is_fp (ins) == fp) && (reg != ins->sreg1) && (reg_is_freeable (ins->sreg1, fp) || (reg_is_soft (ins->sreg1, fp) && rassign (cfg, ins->sreg1, fp) >= 0))) {
1859                 if (reg_is_soft (ins->sreg1, fp))
1860                         regmask &= ~ (1 << rassign (cfg, ins->sreg1, fp));
1861                 else
1862                         regmask &= ~ (1 << ins->sreg1);
1863                 DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_amd64_regname (ins->sreg1, fp)));
1864         }
1865         if ((sreg2_is_fp (ins) == fp) && (reg != ins->sreg2) && (reg_is_freeable (ins->sreg2, fp) || (reg_is_soft (ins->sreg2, fp) && rassign (cfg, ins->sreg2, fp) >= 0))) {
1866                 if (reg_is_soft (ins->sreg2, fp))
1867                         regmask &= ~ (1 << rassign (cfg, ins->sreg2, fp));
1868                 else
1869                         regmask &= ~ (1 << ins->sreg2);
1870                 DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_amd64_regname (ins->sreg2, fp), ins->sreg2));
1871         }
1872         if ((dreg_is_fp (ins) == fp) && (reg != ins->dreg) && reg_is_freeable (ins->dreg, fp)) {
1873                 regmask &= ~ (1 << ins->dreg);
1874                 DEBUG (g_print ("\t\texcluding dreg %s\n", mono_amd64_regname (ins->dreg, fp)));
1875         }
1876
1877         DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
1878         g_assert (regmask); /* need at least a register we can free */
1879         sel = -1;
1880         /* we should track prev_use and spill the register that's farther */
1881         if (fp) {
1882                 for (i = 0; i < MONO_MAX_FREGS; ++i) {
1883                         if (regmask & (1 << i)) {
1884                                 sel = i;
1885                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_fregname (sel), cfg->rs->fassign [sel]));
1886                                 break;
1887                         }
1888                 }
1889
1890                 i = cfg->rs->fsymbolic [sel];
1891                 spill = ++cfg->spill_count;
1892                 cfg->rs->fassign [i] = -spill - 1;
1893                 mono_regstate_free_float (cfg->rs, sel);
1894         }
1895         else {
1896                 for (i = 0; i < MONO_MAX_IREGS; ++i) {
1897                         if (regmask & (1 << i)) {
1898                                 sel = i;
1899                                 DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1900                                 break;
1901                         }
1902                 }
1903
1904                 i = cfg->rs->isymbolic [sel];
1905                 spill = ++cfg->spill_count;
1906                 cfg->rs->iassign [i] = -spill - 1;
1907                 mono_regstate_free_int (cfg->rs, sel);
1908         }
1909
1910         /* we need to create a spill var and insert a load to sel after the current instruction */
1911         MONO_INST_NEW (cfg, load, fp ? OP_LOADR8_MEMBASE : OP_LOAD_MEMBASE);
1912         load->dreg = sel;
1913         load->inst_basereg = AMD64_RBP;
1914         load->inst_offset = mono_spillvar_offset (cfg, spill);
1915         if (item->prev) {
1916                 while (ins->next != item->prev->data)
1917                         ins = ins->next;
1918         }
1919         load->next = ins->next;
1920         ins->next = load;
1921         DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
1922         if (fp)
1923                 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1924         else
1925                 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1926         g_assert (i == sel);
1927         
1928         return sel;
1929 }
1930
1931 static MonoInst*
1932 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins, gboolean fp)
1933 {
1934         MonoInst *copy;
1935
1936         if (fp)
1937                 MONO_INST_NEW (cfg, copy, OP_FMOVE);
1938         else
1939                 MONO_INST_NEW (cfg, copy, OP_MOVE);
1940
1941         copy->dreg = dest;
1942         copy->sreg1 = src;
1943         if (ins) {
1944                 copy->next = ins->next;
1945                 ins->next = copy;
1946         }
1947         DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
1948         return copy;
1949 }
1950
1951 static MonoInst*
1952 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins, gboolean fp)
1953 {
1954         MonoInst *store;
1955         MONO_INST_NEW (cfg, store, fp ? OP_STORER8_MEMBASE_REG : OP_STORE_MEMBASE_REG);
1956         store->sreg1 = reg;
1957         store->inst_destbasereg = AMD64_RBP;
1958         store->inst_offset = mono_spillvar_offset (cfg, spill);
1959         if (ins) {
1960                 store->next = ins->next;
1961                 ins->next = store;
1962         }
1963         DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_amd64_regname (reg, fp)));
1964         return store;
1965 }
1966
1967 static void
1968 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
1969 {
1970         MonoInst *prev;
1971         if (item->next) {
1972                 prev = item->next->data;
1973
1974                 while (prev->next != ins)
1975                         prev = prev->next;
1976                 to_insert->next = ins;
1977                 prev->next = to_insert;
1978         } else {
1979                 to_insert->next = ins;
1980         }
1981         /* 
1982          * needed otherwise in the next instruction we can add an ins to the 
1983          * end and that would get past this instruction.
1984          */
1985         item->data = to_insert; 
1986 }
1987
1988 /* flags used in reginfo->flags */
1989 enum {
1990         MONO_X86_FP_NEEDS_LOAD_SPILL    = 1 << 0,
1991         MONO_X86_FP_NEEDS_SPILL                 = 1 << 1,
1992         MONO_X86_FP_NEEDS_LOAD                  = 1 << 2,
1993         MONO_X86_REG_NOT_ECX                    = 1 << 3,
1994         MONO_X86_REG_EAX                                = 1 << 4,
1995         MONO_X86_REG_EDX                                = 1 << 5,
1996         MONO_X86_REG_ECX                                = 1 << 6
1997 };
1998
1999 static int
2000 mono_amd64_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
2001 {
2002         int val;
2003         int test_mask = dest_mask;
2004
2005         if (flags & MONO_X86_REG_EAX)
2006                 test_mask &= (1 << AMD64_RAX);
2007         else if (flags & MONO_X86_REG_EDX)
2008                 test_mask &= (1 << AMD64_RDX);
2009         else if (flags & MONO_X86_REG_ECX)
2010                 test_mask &= (1 << AMD64_RCX);
2011         else if (flags & MONO_X86_REG_NOT_ECX)
2012                 test_mask &= ~ (1 << AMD64_RCX);
2013
2014         val = mono_regstate_alloc_int (cfg->rs, test_mask);
2015         if (val >= 0 && test_mask != dest_mask)
2016                 DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
2017
2018         if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
2019                 DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
2020                 val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << AMD64_RCX)));
2021         }
2022
2023         if (val < 0) {
2024                 val = mono_regstate_alloc_int (cfg->rs, dest_mask);
2025                 if (val < 0)
2026                         val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, FALSE);
2027         }
2028
2029         return val;
2030 }
2031
2032 static int
2033 mono_amd64_alloc_float_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg)
2034 {
2035         int val;
2036
2037         val = mono_regstate_alloc_float (cfg->rs, dest_mask);
2038
2039         if (val < 0) {
2040                 val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, TRUE);
2041         }
2042
2043         return val;
2044 }
2045
2046 static inline void
2047 assign_ireg (MonoRegState *rs, int reg, int hreg)
2048 {
2049         g_assert (reg >= MONO_MAX_IREGS);
2050         g_assert (hreg < MONO_MAX_IREGS);
2051         g_assert (! is_global_ireg (hreg));
2052
2053         rs->iassign [reg] = hreg;
2054         rs->isymbolic [hreg] = reg;
2055         rs->ifree_mask &= ~ (1 << hreg);
2056 }
2057
2058 /*#include "cprop.c"*/
2059
2060 /*
2061  * Local register allocation.
2062  * We first scan the list of instructions and we save the liveness info of
2063  * each register (when the register is first used, when it's value is set etc.).
2064  * We also reverse the list of instructions (in the InstList list) because assigning
2065  * registers backwards allows for more tricks to be used.
2066  */
2067 void
2068 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
2069 {
2070         MonoInst *ins;
2071         MonoRegState *rs = cfg->rs;
2072         int i, val, fpcount;
2073         RegTrack *reginfo, *reginfof;
2074         RegTrack *reginfo1, *reginfo2, *reginfod;
2075         InstList *tmp, *reversed = NULL;
2076         const char *spec;
2077         guint32 src1_mask, src2_mask, dest_mask;
2078         GList *fspill_list = NULL;
2079         int fspill = 0;
2080
2081         if (!bb->code)
2082                 return;
2083         rs->next_vireg = bb->max_ireg;
2084         rs->next_vfreg = bb->max_freg;
2085         mono_regstate_assign (rs);
2086         reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
2087         reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
2088         rs->ifree_mask = AMD64_CALLEE_REGS;
2089         rs->ffree_mask = AMD64_CALLEE_FREGS;
2090
2091         if (!use_sse2)
2092                 /* The fp stack is 6 entries deep */
2093                 rs->ffree_mask = 0x3f;
2094
2095         ins = bb->code;
2096
2097         /*if (cfg->opt & MONO_OPT_COPYPROP)
2098                 local_copy_prop (cfg, ins);*/
2099
2100         i = 1;
2101         fpcount = 0;
2102         DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
2103         /* forward pass on the instructions to collect register liveness info */
2104         while (ins) {
2105                 spec = ins_spec [ins->opcode];
2106                 
2107                 DEBUG (print_ins (i, ins));
2108
2109                 if (spec [MONO_INST_SRC1]) {
2110                         if (spec [MONO_INST_SRC1] == 'f') {
2111                                 reginfo1 = reginfof;
2112
2113                                 if (!use_sse2) {
2114                                         GList *spill;
2115
2116                                         spill = g_list_first (fspill_list);
2117                                         if (spill && fpcount < FPSTACK_SIZE) {
2118                                                 reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
2119                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2120                                         } else
2121                                                 fpcount--;
2122                                 }
2123                         }
2124                         else
2125                                 reginfo1 = reginfo;
2126                         reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
2127                         reginfo1 [ins->sreg1].last_use = i;
2128                         if (spec [MONO_INST_SRC1] == 'L') {
2129                                 /* The virtual register is allocated sequentially */
2130                                 reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
2131                                 reginfo1 [ins->sreg1 + 1].last_use = i;
2132                                 if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
2133                                         reginfo1 [ins->sreg1 + 1].born_in = i;
2134
2135                                 reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
2136                                 reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
2137                         }
2138                 } else {
2139                         ins->sreg1 = -1;
2140                 }
2141                 if (spec [MONO_INST_SRC2]) {
2142                         if (spec [MONO_INST_SRC2] == 'f') {
2143                                 reginfo2 = reginfof;
2144
2145                                 if (!use_sse2) {
2146                                         GList *spill;
2147
2148                                         spill = g_list_first (fspill_list);
2149                                         if (spill) {
2150                                                 reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
2151                                                 fspill_list = g_list_remove (fspill_list, spill->data);
2152                                                 if (fpcount >= FPSTACK_SIZE) {
2153                                                         fspill++;
2154                                                         fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2155                                                         reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
2156                                                 }
2157                                         } else
2158                                                 fpcount--;
2159                                 }
2160                         }
2161                         else
2162                                 reginfo2 = reginfo;
2163                         reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
2164                         reginfo2 [ins->sreg2].last_use = i;
2165                         if (spec [MONO_INST_SRC2] == 'L') {
2166                                 /* The virtual register is allocated sequentially */
2167                                 reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
2168                                 reginfo2 [ins->sreg2 + 1].last_use = i;
2169                                 if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
2170                                         reginfo2 [ins->sreg2 + 1].born_in = i;
2171                         }
2172                         if (spec [MONO_INST_CLOB] == 's') {
2173                                 reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
2174                                 reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
2175                         }
2176                 } else {
2177                         ins->sreg2 = -1;
2178                 }
2179                 if (spec [MONO_INST_DEST]) {
2180                         if (spec [MONO_INST_DEST] == 'f') {
2181                                 reginfod = reginfof;
2182                                 if (!use_sse2 && (spec [MONO_INST_CLOB] != 'm')) {
2183                                         if (fpcount >= FPSTACK_SIZE) {
2184                                                 reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
2185                                                 fspill++;
2186                                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2187                                                 fpcount--;
2188                                         }
2189                                         fpcount++;
2190                                 }
2191                         }
2192                         else
2193                                 reginfod = reginfo;
2194                         if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
2195                                 reginfod [ins->dreg].killed_in = i;
2196                         reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
2197                         reginfod [ins->dreg].last_use = i;
2198                         if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
2199                                 reginfod [ins->dreg].born_in = i;
2200                         if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
2201                                 /* The virtual register is allocated sequentially */
2202                                 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
2203                                 reginfod [ins->dreg + 1].last_use = i;
2204                                 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
2205                                         reginfod [ins->dreg + 1].born_in = i;
2206
2207                                 reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
2208                                 reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
2209                         }
2210                 } else {
2211                         ins->dreg = -1;
2212                 }
2213
2214                 if (spec [MONO_INST_CLOB] == 'c') {
2215                         /* A call instruction implicitly uses all registers in call->out_ireg_args */
2216
2217                         MonoCallInst *call = (MonoCallInst*)ins;
2218                         GSList *list;
2219
2220                         list = call->out_ireg_args;
2221                         if (list) {
2222                                 while (list) {
2223                                         guint64 regpair;
2224                                         int reg, hreg;
2225
2226                                         regpair = (guint64) (list->data);
2227                                         hreg = regpair >> 32;
2228                                         reg = regpair & 0xffffffff;
2229
2230                                         reginfo [reg].prev_use = reginfo [reg].last_use;
2231                                         reginfo [reg].last_use = i;
2232
2233                                         list = g_slist_next (list);
2234                                 }
2235                         }
2236
2237                         list = call->out_freg_args;
2238                         if (use_sse2 && list) {
2239                                 while (list) {
2240                                         guint64 regpair;
2241                                         int reg, hreg;
2242
2243                                         regpair = (guint64) (list->data);
2244                                         hreg = regpair >> 32;
2245                                         reg = regpair & 0xffffffff;
2246
2247                                         reginfof [reg].prev_use = reginfof [reg].last_use;
2248                                         reginfof [reg].last_use = i;
2249
2250                                         list = g_slist_next (list);
2251                                 }
2252                         }
2253                 }
2254
2255                 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
2256                 ++i;
2257                 ins = ins->next;
2258         }
2259
2260         // todo: check if we have anything left on fp stack, in verify mode?
2261         fspill = 0;
2262
2263         DEBUG (print_regtrack (reginfo, rs->next_vireg));
2264         DEBUG (print_regtrack (reginfof, rs->next_vfreg));
2265         tmp = reversed;
2266         while (tmp) {
2267                 int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
2268                 dest_mask = src1_mask = src2_mask = AMD64_CALLEE_REGS;
2269                 --i;
2270                 ins = tmp->data;
2271                 spec = ins_spec [ins->opcode];
2272                 prev_dreg = -1;
2273                 clob_dreg = -1;
2274                 DEBUG (g_print ("processing:"));
2275                 DEBUG (print_ins (i, ins));
2276                 if (spec [MONO_INST_CLOB] == 's') {
2277                         /*
2278                          * Shift opcodes, SREG2 must be RCX
2279                          */
2280                         if (rs->ifree_mask & (1 << AMD64_RCX)) {
2281                                 if (ins->sreg2 < MONO_MAX_IREGS) {
2282                                         /* Argument already in hard reg, need to copy */
2283                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2284                                         insert_before_ins (ins, tmp, copy);
2285                                 }
2286                                 else {
2287                                         DEBUG (g_print ("\tshortcut assignment of R%d to ECX\n", ins->sreg2));
2288                                         assign_ireg (rs, ins->sreg2, AMD64_RCX);
2289                                 }
2290                         } else {
2291                                 int need_ecx_spill = TRUE;
2292                                 /* 
2293                                  * we first check if src1/dreg is already assigned a register
2294                                  * and then we force a spill of the var assigned to ECX.
2295                                  */
2296                                 /* the destination register can't be ECX */
2297                                 dest_mask &= ~ (1 << AMD64_RCX);
2298                                 src1_mask &= ~ (1 << AMD64_RCX);
2299                                 val = rs->iassign [ins->dreg];
2300                                 /* 
2301                                  * the destination register is already assigned to ECX:
2302                                  * we need to allocate another register for it and then
2303                                  * copy from this to ECX.
2304                                  */
2305                                 if (val == AMD64_RCX && ins->dreg != ins->sreg2) {
2306                                         int new_dest;
2307                                         new_dest = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2308                                         g_assert (new_dest >= 0);
2309                                         DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
2310
2311                                         rs->isymbolic [new_dest] = ins->dreg;
2312                                         rs->iassign [ins->dreg] = new_dest;
2313                                         clob_dreg = ins->dreg;
2314                                         ins->dreg = new_dest;
2315                                         create_copy_ins (cfg, AMD64_RCX, new_dest, ins, FALSE);
2316                                         need_ecx_spill = FALSE;
2317                                         /*DEBUG (g_print ("\tforced spill of R%d\n", ins->dreg));
2318                                         val = get_register_force_spilling (cfg, tmp, ins, ins->dreg);
2319                                         rs->iassign [ins->dreg] = val;
2320                                         rs->isymbolic [val] = prev_dreg;
2321                                         ins->dreg = val;*/
2322                                 }
2323                                 if (is_global_ireg (ins->sreg2)) {
2324                                         MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
2325                                         insert_before_ins (ins, tmp, copy);
2326                                 }
2327                                 else {
2328                                         val = rs->iassign [ins->sreg2];
2329                                         if (val >= 0 && val != AMD64_RCX) {
2330                                                 MonoInst *move = create_copy_ins (cfg, AMD64_RCX, val, NULL, FALSE);
2331                                                 DEBUG (g_print ("\tmoved arg from R%d (%d) to ECX\n", val, ins->sreg2));
2332                                                 move->next = ins;
2333                                                 g_assert_not_reached ();
2334                                                 /* FIXME: where is move connected to the instruction list? */
2335                                                 //tmp->prev->data->next = move;
2336                                         }
2337                                         else {
2338                                                 if (val == AMD64_RCX)
2339                                                 need_ecx_spill = FALSE;
2340                                         }
2341                                 }
2342                                 if (need_ecx_spill && !(rs->ifree_mask & (1 << AMD64_RCX))) {
2343                                         DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RCX]));
2344                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RCX], FALSE);
2345                                         mono_regstate_free_int (rs, AMD64_RCX);
2346                                 }
2347                                 if (!is_global_ireg (ins->sreg2))
2348                                         /* force-set sreg2 */
2349                                         assign_ireg (rs, ins->sreg2, AMD64_RCX);
2350                         }
2351                         ins->sreg2 = AMD64_RCX;
2352                 } else if (spec [MONO_INST_CLOB] == 'd') { 
2353                         /*
2354                          * DIVISION/REMAINER
2355                          */
2356                         int dest_reg = AMD64_RAX;
2357                         int clob_reg = AMD64_RDX;
2358                         if (spec [MONO_INST_DEST] == 'd') {
2359                                 dest_reg = AMD64_RDX; /* reminder */
2360                                 clob_reg = AMD64_RAX;
2361                         }
2362                         if (is_global_ireg (ins->dreg))
2363                                 val = ins->dreg;
2364                         else
2365                                 val = rs->iassign [ins->dreg];
2366                         if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
2367                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2368                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2369                                 mono_regstate_free_int (rs, dest_reg);
2370                         }
2371                         if (val < 0) {
2372                                 if (val < -1) {
2373                                         /* the register gets spilled after this inst */
2374                                         int spill = -val -1;
2375                                         dest_mask = 1 << clob_reg;
2376                                         prev_dreg = ins->dreg;
2377                                         val = mono_regstate_alloc_int (rs, dest_mask);
2378                                         if (val < 0)
2379                                                 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg, FALSE);
2380                                         rs->iassign [ins->dreg] = val;
2381                                         if (spill)
2382                                                 create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2383                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2384                                         rs->isymbolic [val] = prev_dreg;
2385                                         ins->dreg = val;
2386                                 } else {
2387                                         DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
2388                                         prev_dreg = ins->dreg;
2389                                         assign_ireg (rs, ins->dreg, dest_reg);
2390                                         ins->dreg = dest_reg;
2391                                         val = dest_reg;
2392                                 }
2393                         }
2394
2395                         //DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
2396                         if (val != dest_reg) { /* force a copy */
2397                                 create_copy_ins (cfg, val, dest_reg, ins, FALSE);
2398                                 if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
2399                                         DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
2400                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
2401                                         mono_regstate_free_int (rs, dest_reg);
2402                                 }
2403                         }
2404                         if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= MONO_MAX_IREGS)) {
2405                                 DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
2406                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg], FALSE);
2407                                 mono_regstate_free_int (rs, clob_reg);
2408                         }
2409                         src1_mask = 1 << AMD64_RAX;
2410                         src2_mask = 1 << AMD64_RCX;
2411                 }
2412                 if (spec [MONO_INST_DEST] == 'l') {
2413                         int hreg;
2414                         val = rs->iassign [ins->dreg];
2415                         /* check special case when dreg have been moved from ecx (clob shift) */
2416                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2417                                 hreg = clob_dreg + 1;
2418                         else
2419                                 hreg = ins->dreg + 1;
2420
2421                         /* base prev_dreg on fixed hreg, handle clob case */
2422                         val = hreg - 1;
2423
2424                         if (val != rs->isymbolic [AMD64_RAX] && !(rs->ifree_mask & (1 << AMD64_RAX))) {
2425                                 DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2426                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2427                                 mono_regstate_free_int (rs, AMD64_RAX);
2428                         }
2429                         if (hreg != rs->isymbolic [AMD64_RDX] && !(rs->ifree_mask & (1 << AMD64_RDX))) {
2430                                 DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [AMD64_RDX]));
2431                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RDX], FALSE);
2432                                 mono_regstate_free_int (rs, AMD64_RDX);
2433                         }
2434                 }
2435
2436                 /*
2437                  * TRACK DREG
2438                  */
2439                 if (spec [MONO_INST_DEST] == 'f') {
2440                         if (use_sse2) {
2441                                 /* Allocate an XMM reg the same way as an int reg */
2442                                 if (reg_is_soft (ins->dreg, TRUE)) {
2443                                         val = rs->fassign [ins->dreg];
2444                                         prev_dreg = ins->dreg;
2445                                         
2446                                         if (val < 0) {
2447                                                 int spill = 0;
2448                                                 if (val < -1) {
2449                                                         /* the register gets spilled after this inst */
2450                                                         spill = -val -1;
2451                                                 }
2452                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->dreg);
2453                                                 rs->fassign [ins->dreg] = val;
2454                                                 if (spill)
2455                                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, TRUE);
2456                                         }
2457                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_amd64_regname (val, TRUE), ins->dreg));
2458                                         rs->fsymbolic [val] = prev_dreg;
2459                                         ins->dreg = val;
2460                                 }
2461                         }
2462                         else if (spec [MONO_INST_CLOB] != 'm') {
2463                                 if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
2464                                         GList *spill_node;
2465                                         MonoInst *store;
2466                                         spill_node = g_list_first (fspill_list);
2467                                         g_assert (spill_node);
2468
2469                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
2470                                         insert_before_ins (ins, tmp, store);
2471                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2472                                         fspill--;
2473                                 }
2474                         }
2475                 } else if (spec [MONO_INST_DEST] == 'L') {
2476                         int hreg;
2477                         val = rs->iassign [ins->dreg];
2478                         /* check special case when dreg have been moved from ecx (clob shift) */
2479                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2480                                 hreg = clob_dreg + 1;
2481                         else
2482                                 hreg = ins->dreg + 1;
2483
2484                         /* base prev_dreg on fixed hreg, handle clob case */
2485                         prev_dreg = hreg - 1;
2486
2487                         if (val < 0) {
2488                                 int spill = 0;
2489                                 if (val < -1) {
2490                                         /* the register gets spilled after this inst */
2491                                         spill = -val -1;
2492                                 }
2493                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2494                                 rs->iassign [ins->dreg] = val;
2495                                 if (spill)
2496                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2497                         }
2498
2499                         DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
2500  
2501                         rs->isymbolic [val] = hreg - 1;
2502                         ins->dreg = val;
2503                         
2504                         val = rs->iassign [hreg];
2505                         if (val < 0) {
2506                                 int spill = 0;
2507                                 if (val < -1) {
2508                                         /* the register gets spilled after this inst */
2509                                         spill = -val -1;
2510                                 }
2511                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2512                                 rs->iassign [hreg] = val;
2513                                 if (spill)
2514                                         create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2515                         }
2516
2517                         DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
2518                         rs->isymbolic [val] = hreg;
2519                         /* save reg allocating into unused */
2520                         ins->unused = val;
2521
2522                         /* check if we can free our long reg */
2523                         if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2524                                 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
2525                                 mono_regstate_free_int (rs, val);
2526                         }
2527                 }
2528                 else if (ins->dreg >= MONO_MAX_IREGS) {
2529                         int hreg;
2530                         val = rs->iassign [ins->dreg];
2531                         if (spec [MONO_INST_DEST] == 'l') {
2532                                 /* check special case when dreg have been moved from ecx (clob shift) */
2533                                 if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2534                                         hreg = clob_dreg + 1;
2535                                 else
2536                                         hreg = ins->dreg + 1;
2537
2538                                 /* base prev_dreg on fixed hreg, handle clob case */
2539                                 prev_dreg = hreg - 1;
2540                         } else
2541                                 prev_dreg = ins->dreg;
2542
2543                         if (val < 0) {
2544                                 int spill = 0;
2545                                 if (val < -1) {
2546                                         /* the register gets spilled after this inst */
2547                                         spill = -val -1;
2548                                 }
2549                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2550                                 rs->iassign [ins->dreg] = val;
2551                                 if (spill)
2552                                         create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
2553                         }
2554                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2555                         rs->isymbolic [val] = prev_dreg;
2556                         ins->dreg = val;
2557                         /* handle cases where lreg needs to be eax:edx */
2558                         if (spec [MONO_INST_DEST] == 'l') {
2559                                 /* check special case when dreg have been moved from ecx (clob shift) */
2560                                 int hreg = prev_dreg + 1;
2561                                 val = rs->iassign [hreg];
2562                                 if (val < 0) {
2563                                         int spill = 0;
2564                                         if (val < -1) {
2565                                                 /* the register gets spilled after this inst */
2566                                                 spill = -val -1;
2567                                         }
2568                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2569                                         rs->iassign [hreg] = val;
2570                                         if (spill)
2571                                                 create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
2572                                 }
2573                                 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
2574                                 rs->isymbolic [val] = hreg;
2575                                 if (ins->dreg == AMD64_RAX) {
2576                                         if (val != AMD64_RDX)
2577                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2578                                 } else if (ins->dreg == AMD64_RDX) {
2579                                         if (val == AMD64_RAX) {
2580                                                 /* swap */
2581                                                 g_assert_not_reached ();
2582                                         } else {
2583                                                 /* two forced copies */
2584                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2585                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2586                                         }
2587                                 } else {
2588                                         if (val == AMD64_RDX) {
2589                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2590                                         } else {
2591                                                 /* two forced copies */
2592                                                 create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
2593                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2594                                         }
2595                                 }
2596                                 if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2597                                         DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
2598                                         mono_regstate_free_int (rs, val);
2599                                 }
2600                         } else if (spec [MONO_INST_DEST] == 'a' && ins->dreg != AMD64_RAX && spec [MONO_INST_CLOB] != 'd') {
2601                                 /* this instruction only outputs to EAX, need to copy */
2602                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
2603                         } else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != AMD64_RDX && spec [MONO_INST_CLOB] != 'd') {
2604                                 create_copy_ins (cfg, ins->dreg, AMD64_RDX, ins, FALSE);
2605                         }
2606                 }
2607
2608                 if (use_sse2 && spec [MONO_INST_DEST] == 'f' && reg_is_freeable (ins->dreg, TRUE) && prev_dreg >= 0 && reginfof [prev_dreg].born_in >= i) {
2609                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_fregname (ins->dreg), prev_dreg, reginfof [prev_dreg].born_in));
2610                         mono_regstate_free_float (rs, ins->dreg);
2611                 }
2612                 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg, FALSE) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
2613                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2614                         mono_regstate_free_int (rs, ins->dreg);
2615                 }
2616
2617                 /* put src1 in EAX if it needs to be */
2618                 if (spec [MONO_INST_SRC1] == 'a') {
2619                         if (!(rs->ifree_mask & (1 << AMD64_RAX))) {
2620                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2621                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
2622                                 mono_regstate_free_int (rs, AMD64_RAX);
2623                         }
2624                         if (ins->sreg1 < MONO_MAX_IREGS) {
2625                                 /* The argument is already in a hard reg, need to copy */
2626                                 MonoInst *copy = create_copy_ins (cfg, AMD64_RAX, ins->sreg1, NULL, FALSE);
2627                                 insert_before_ins (ins, tmp, copy);
2628                         }
2629                         else
2630                                 /* force-set sreg1 */
2631                                 assign_ireg (rs, ins->sreg1, AMD64_RAX);
2632                         ins->sreg1 = AMD64_RAX;
2633                 }
2634
2635                 /*
2636                  * TRACK SREG1
2637                  */
2638                 if (spec [MONO_INST_SRC1] == 'f') {
2639                         if (use_sse2) {
2640                                 if (reg_is_soft (ins->sreg1, TRUE)) {
2641                                         val = rs->fassign [ins->sreg1];
2642                                         prev_sreg1 = ins->sreg1;
2643                                         if (val < 0) {
2644                                                 int spill = 0;
2645                                                 if (val < -1) {
2646                                                         /* the register gets spilled after this inst */
2647                                                         spill = -val -1;
2648                                                 }
2649                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg1);
2650                                                 rs->fassign [ins->sreg1] = val;
2651                                                 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_fregname (val), ins->sreg1));
2652                                                 if (spill) {
2653                                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, TRUE);
2654                                                         insert_before_ins (ins, tmp, store);
2655                                                 }
2656                                         }
2657                                         rs->fsymbolic [val] = prev_sreg1;
2658                                         ins->sreg1 = val;
2659                                 } else {
2660                                         prev_sreg1 = -1;
2661                                 }
2662                         }
2663                         else
2664                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
2665                                 MonoInst *load;
2666                                 MonoInst *store = NULL;
2667
2668                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2669                                         GList *spill_node;
2670                                         spill_node = g_list_first (fspill_list);
2671                                         g_assert (spill_node);
2672
2673                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);          
2674                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2675                                 }
2676
2677                                 fspill++;
2678                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2679                                 load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
2680                                 insert_before_ins (ins, tmp, load);
2681                                 if (store) 
2682                                         insert_before_ins (load, tmp, store);
2683                         }
2684                 } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
2685                         /* force source to be same as dest */
2686                         rs->iassign [ins->sreg1] = ins->dreg;
2687                         rs->iassign [ins->sreg1 + 1] = ins->unused;
2688
2689                         DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
2690                         DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
2691
2692                         ins->sreg1 = ins->dreg;
2693                         /* 
2694                          * No need for saving the reg, we know that src1=dest in this cases
2695                          * ins->inst_c0 = ins->unused;
2696                          */
2697
2698                         /* make sure that we remove them from free mask */
2699                         rs->ifree_mask &= ~ (1 << ins->dreg);
2700                         rs->ifree_mask &= ~ (1 << ins->unused);
2701                 }
2702                 else if (ins->sreg1 >= MONO_MAX_IREGS) {
2703                         val = rs->iassign [ins->sreg1];
2704                         prev_sreg1 = ins->sreg1;
2705                         if (val < 0) {
2706                                 int spill = 0;
2707                                 if (val < -1) {
2708                                         /* the register gets spilled after this inst */
2709                                         spill = -val -1;
2710                                 }
2711                                 if (0 && (ins->opcode == OP_MOVE)) {
2712                                         /* 
2713                                          * small optimization: the dest register is already allocated
2714                                          * but the src one is not: we can simply assign the same register
2715                                          * here and peephole will get rid of the instruction later.
2716                                          * This optimization may interfere with the clobbering handling:
2717                                          * it removes a mov operation that will be added again to handle clobbering.
2718                                          * There are also some other issues that should with make testjit.
2719                                          */
2720                                         mono_regstate_alloc_int (rs, 1 << ins->dreg);
2721                                         val = rs->iassign [ins->sreg1] = ins->dreg;
2722                                         //g_assert (val >= 0);
2723                                         DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2724                                 } else {
2725                                         //g_assert (val == -1); /* source cannot be spilled */
2726                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
2727                                         rs->iassign [ins->sreg1] = val;
2728                                         DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2729                                 }
2730                                 if (spill) {
2731                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, FALSE);
2732                                         insert_before_ins (ins, tmp, store);
2733                                 }
2734                         }
2735                         rs->isymbolic [val] = prev_sreg1;
2736                         ins->sreg1 = val;
2737                 } else {
2738                         prev_sreg1 = -1;
2739                 }
2740
2741                 /* handle clobbering of sreg1 */
2742                 if (((spec [MONO_INST_DEST] == 'f' && spec [MONO_INST_SRC1] == 'f' && use_sse2) || spec [MONO_INST_CLOB] == '1' || spec [MONO_INST_CLOB] == 's') && ins->dreg != ins->sreg1) {
2743                         MonoInst *sreg2_copy = NULL;
2744                         MonoInst *copy;
2745                         gboolean fp = (spec [MONO_INST_SRC1] == 'f');
2746
2747                         if (ins->dreg == ins->sreg2) {
2748                                 /* 
2749                                  * copying sreg1 to dreg could clobber sreg2, so allocate a new
2750                                  * register for it.
2751                                  */
2752                                 int reg2 = 0;
2753
2754                                 if (fp)
2755                                         reg2 = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2756                                 else
2757                                         reg2 = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->sreg2, 0);
2758
2759                                 DEBUG (g_print ("\tneed to copy sreg2 %s to reg %s\n", mono_amd64_regname (ins->sreg2, fp), mono_amd64_regname (reg2, fp)));
2760                                 sreg2_copy = create_copy_ins (cfg, reg2, ins->sreg2, NULL, fp);
2761                                 prev_sreg2 = ins->sreg2 = reg2;
2762
2763                                 if (fp)
2764                                         mono_regstate_free_float (rs, reg2);
2765                                 else
2766                                         mono_regstate_free_int (rs, reg2);
2767                         }
2768
2769                         copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL, fp);
2770                         DEBUG (g_print ("\tneed to copy sreg1 %s to dreg %s\n", mono_amd64_regname (ins->sreg1, fp), mono_amd64_regname (ins->dreg, fp)));
2771                         insert_before_ins (ins, tmp, copy);
2772
2773                         if (sreg2_copy)
2774                                 insert_before_ins (copy, tmp, sreg2_copy);
2775
2776                         /*
2777                          * Need to prevent sreg2 to be allocated to sreg1, since that
2778                          * would screw up the previous copy.
2779                          */
2780                         src2_mask &= ~ (1 << ins->sreg1);
2781                         /* we set sreg1 to dest as well */
2782                         prev_sreg1 = ins->sreg1 = ins->dreg;
2783                         src2_mask &= ~ (1 << ins->dreg);
2784                 }
2785
2786                 /*
2787                  * TRACK SREG2
2788                  */
2789                 if (spec [MONO_INST_SRC2] == 'f') {
2790                         if (use_sse2) {
2791                                 if (reg_is_soft (ins->sreg2, TRUE)) {
2792                                         val = rs->fassign [ins->sreg2];
2793                                         prev_sreg2 = ins->sreg2;
2794                                         if (val < 0) {
2795                                                 int spill = 0;
2796                                                 if (val < -1) {
2797                                                         /* the register gets spilled after this inst */
2798                                                         spill = -val -1;
2799                                                 }
2800                                                 val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
2801                                                 rs->fassign [ins->sreg2] = val;
2802                                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_fregname (val), ins->sreg2));
2803                                                 if (spill)
2804                                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, TRUE);
2805                                         }
2806                                         rs->fsymbolic [val] = prev_sreg2;
2807                                         ins->sreg2 = val;
2808                                 } else {
2809                                         prev_sreg2 = -1;
2810                                 }
2811                         }
2812                         else
2813                         if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
2814                                 MonoInst *load;
2815                                 MonoInst *store = NULL;
2816
2817                                 if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2818                                         GList *spill_node;
2819
2820                                         spill_node = g_list_first (fspill_list);
2821                                         g_assert (spill_node);
2822                                         if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
2823                                                 spill_node = g_list_next (spill_node);
2824         
2825                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
2826                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2827                                 } 
2828                                 
2829                                 fspill++;
2830                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2831                                 load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
2832                                 insert_before_ins (ins, tmp, load);
2833                                 if (store) 
2834                                         insert_before_ins (load, tmp, store);
2835                         }
2836                 } 
2837                 else if (ins->sreg2 >= MONO_MAX_IREGS) {
2838                         val = rs->iassign [ins->sreg2];
2839                         prev_sreg2 = ins->sreg2;
2840                         if (val < 0) {
2841                                 int spill = 0;
2842                                 if (val < -1) {
2843                                         /* the register gets spilled after this inst */
2844                                         spill = -val -1;
2845                                 }
2846                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
2847                                 rs->iassign [ins->sreg2] = val;
2848                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2849                                 if (spill)
2850                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins, FALSE);
2851                         }
2852                         rs->isymbolic [val] = prev_sreg2;
2853                         ins->sreg2 = val;
2854                         if (spec [MONO_INST_CLOB] == 's' && ins->sreg2 != AMD64_RCX) {
2855                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d, but ECX is needed (R%d)\n", mono_arch_regname (val), ins->sreg2, rs->iassign [AMD64_RCX]));
2856                         }
2857                 } else {
2858                         prev_sreg2 = -1;
2859                 }
2860
2861                 if (spec [MONO_INST_CLOB] == 'c') {
2862                         int j, s;
2863                         MonoCallInst *call = (MonoCallInst*)ins;
2864                         GSList *list;
2865                         guint32 clob_mask = AMD64_CALLEE_REGS;
2866
2867                         for (j = 0; j < MONO_MAX_IREGS; ++j) {
2868                                 s = 1 << j;
2869                                 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2870                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [j], FALSE);
2871                                         mono_regstate_free_int (rs, j);
2872                                         //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2873                                 }
2874                         }
2875
2876                         if (use_sse2) {
2877                                 clob_mask = AMD64_CALLEE_FREGS;
2878
2879                                 for (j = 0; j < MONO_MAX_FREGS; ++j) {
2880                                         s = 1 << j;
2881                                         if ((clob_mask & s) && !(rs->ffree_mask & s) && j != ins->sreg1) {
2882                                                 get_register_force_spilling (cfg, tmp, ins, rs->fsymbolic [j], TRUE);
2883                                                 mono_regstate_free_float (rs, j);
2884                                                 //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2885                                         }
2886                                 }
2887                         }
2888
2889                         /* 
2890                          * Assign all registers in call->out_reg_args to the proper 
2891                          * argument registers.
2892                          */
2893
2894                         list = call->out_ireg_args;
2895                         if (list) {
2896                                 while (list) {
2897                                         guint64 regpair;
2898                                         int reg, hreg;
2899
2900                                         regpair = (guint64) (list->data);
2901                                         hreg = regpair >> 32;
2902                                         reg = regpair & 0xffffffff;
2903
2904                                         assign_ireg (rs, reg, hreg);
2905
2906                                         DEBUG (g_print ("\tassigned arg reg %s to R%d\n", mono_arch_regname (hreg), reg));
2907
2908                                         list = g_slist_next (list);
2909                                 }
2910                                 g_slist_free (call->out_ireg_args);
2911                         }
2912
2913                         list = call->out_freg_args;
2914                         if (list && use_sse2) {
2915                                 while (list) {
2916                                         guint64 regpair;
2917                                         int reg, hreg;
2918
2919                                         regpair = (guint64) (list->data);
2920                                         hreg = regpair >> 32;
2921                                         reg = regpair & 0xffffffff;
2922
2923                                         rs->fassign [reg] = hreg;
2924                                         rs->fsymbolic [hreg] = reg;
2925                                         rs->ffree_mask &= ~ (1 << hreg);
2926
2927                                         list = g_slist_next (list);
2928                                 }
2929                         }
2930                         if (call->out_freg_args)
2931                                 g_slist_free (call->out_freg_args);
2932                 }
2933
2934                 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2935                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2936                         mono_regstate_free_int (rs, ins->sreg1);
2937                 }
2938                 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2939                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2940                         mono_regstate_free_int (rs, ins->sreg2);
2941                 }*/
2942         
2943                 DEBUG (print_ins (i, ins));
2944                 /* this may result from a insert_before call */
2945                 if (!tmp->next)
2946                         bb->code = tmp->data;
2947                 tmp = tmp->next;
2948         }
2949
2950         g_free (reginfo);
2951         g_free (reginfof);
2952         g_list_free (fspill_list);
2953 }
2954
2955 static unsigned char*
2956 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2957 {
2958         if (use_sse2) {
2959                 amd64_sse_cvttsd2si_reg_reg (code, dreg, sreg);
2960         }
2961         else {
2962                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
2963                 x86_fnstcw_membase(code, AMD64_RSP, 0);
2964                 amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
2965                 amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
2966                 amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
2967                 amd64_fldcw_membase (code, AMD64_RSP, 2);
2968                 amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
2969                 amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
2970                 amd64_pop_reg (code, dreg);
2971                 amd64_fldcw_membase (code, AMD64_RSP, 0);
2972                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
2973         }
2974
2975         if (size == 1)
2976                 amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
2977         else if (size == 2)
2978                 amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
2979         return code;
2980 }
2981
2982 static unsigned char*
2983 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
2984 {
2985         int sreg = tree->sreg1;
2986         int need_touch = FALSE;
2987
2988 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
2989         if (!tree->flags & MONO_INST_INIT)
2990                 need_touch = TRUE;
2991 #endif
2992
2993         if (need_touch) {
2994                 guint8* br[5];
2995
2996                 /*
2997                  * Under Windows:
2998                  * If requested stack size is larger than one page,
2999                  * perform stack-touch operation
3000                  */
3001                 /*
3002                  * Generate stack probe code.
3003                  * Under Windows, it is necessary to allocate one page at a time,
3004                  * "touching" stack after each successful sub-allocation. This is
3005                  * because of the way stack growth is implemented - there is a
3006                  * guard page before the lowest stack page that is currently commited.
3007                  * Stack normally grows sequentially so OS traps access to the
3008                  * guard page and commits more pages when needed.
3009                  */
3010                 amd64_test_reg_imm (code, sreg, ~0xFFF);
3011                 br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
3012
3013                 br[2] = code; /* loop */
3014                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
3015                 amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
3016                 amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
3017                 amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
3018                 br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
3019                 amd64_patch (br[3], br[2]);
3020                 amd64_test_reg_reg (code, sreg, sreg);
3021                 br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
3022                 amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
3023
3024                 br[1] = code; x86_jump8 (code, 0);
3025
3026                 amd64_patch (br[0], code);
3027                 amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
3028                 amd64_patch (br[1], code);
3029                 amd64_patch (br[4], code);
3030         }
3031         else
3032                 amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
3033
3034         if (tree->flags & MONO_INST_INIT) {
3035                 int offset = 0;
3036                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
3037                         amd64_push_reg (code, AMD64_RAX);
3038                         offset += 8;
3039                 }
3040                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
3041                         amd64_push_reg (code, AMD64_RCX);
3042                         offset += 8;
3043                 }
3044                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
3045                         amd64_push_reg (code, AMD64_RDI);
3046                         offset += 8;
3047                 }
3048                 
3049                 amd64_shift_reg_imm (code, X86_SHR, sreg, 4);
3050                 if (sreg != AMD64_RCX)
3051                         amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
3052                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3053                                 
3054                 amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
3055                 amd64_cld (code);
3056                 amd64_prefix (code, X86_REP_PREFIX);
3057                 amd64_stosl (code);
3058                 
3059                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
3060                         amd64_pop_reg (code, AMD64_RDI);
3061                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
3062                         amd64_pop_reg (code, AMD64_RCX);
3063                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
3064                         amd64_pop_reg (code, AMD64_RAX);
3065         }
3066         return code;
3067 }
3068
3069 static guint8*
3070 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
3071 {
3072         CallInfo *cinfo;
3073         guint32 offset, quad;
3074
3075         /* Move return value to the target register */
3076         /* FIXME: do this in the local reg allocator */
3077         switch (ins->opcode) {
3078         case CEE_CALL:
3079         case OP_CALL_REG:
3080         case OP_CALL_MEMBASE:
3081         case OP_LCALL:
3082         case OP_LCALL_REG:
3083         case OP_LCALL_MEMBASE:
3084                 if (ins->dreg != AMD64_RAX)
3085                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, 8);
3086                 break;
3087         case OP_FCALL:
3088         case OP_FCALL_REG:
3089         case OP_FCALL_MEMBASE:
3090                 /* FIXME: optimize this */
3091                 offset = mono_spillvar_offset_float (cfg, 0);
3092                 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
3093                         if (use_sse2)
3094                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
3095                         else {
3096                                 amd64_movss_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
3097                                 amd64_fld_membase (code, AMD64_RBP, offset, FALSE);
3098                         }
3099                 }
3100                 else {
3101                         if (use_sse2) {
3102                                 if (ins->dreg != AMD64_XMM0)
3103                                         amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
3104                         }
3105                         else {
3106                                 amd64_movsd_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
3107                                 amd64_fld_membase (code, AMD64_RBP, offset, TRUE);
3108                         }
3109                 }
3110                 break;
3111         case OP_VCALL:
3112         case OP_VCALL_REG:
3113         case OP_VCALL_MEMBASE:
3114                 cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
3115                 if (cinfo->ret.storage == ArgValuetypeInReg) {
3116                         /* Pop the destination address from the stack */
3117                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3118                         amd64_pop_reg (code, AMD64_RCX);
3119                         
3120                         for (quad = 0; quad < 2; quad ++) {
3121                                 switch (cinfo->ret.pair_storage [quad]) {
3122                                 case ArgInIReg:
3123                                         amd64_mov_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad], 8);
3124                                         break;
3125                                 case ArgInFloatSSEReg:
3126                                         amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3127                                         break;
3128                                 case ArgInDoubleSSEReg:
3129                                         amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
3130                                         break;
3131                                 case ArgNone:
3132                                         break;
3133                                 default:
3134                                         NOT_IMPLEMENTED;
3135                                 }
3136                         }
3137                 }
3138                 g_free (cinfo);
3139                 break;
3140         }
3141
3142         return code;
3143 }
3144
3145 /*
3146  * emit_load_volatile_arguments:
3147  *
3148  *  Load volatile arguments from the stack to the original input registers.
3149  * Required before a tail call.
3150  */
3151 static guint8*
3152 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3153 {
3154         MonoMethod *method = cfg->method;
3155         MonoMethodSignature *sig;
3156         MonoInst *inst;
3157         CallInfo *cinfo;
3158         guint32 i;
3159
3160         /* FIXME: Generate intermediate code instead */
3161
3162         sig = mono_method_signature (method);
3163
3164         cinfo = get_call_info (sig, FALSE);
3165         
3166         /* This is the opposite of the code in emit_prolog */
3167
3168         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3169                 ArgInfo *ainfo = cinfo->args + i;
3170                 MonoType *arg_type;
3171                 inst = cfg->varinfo [i];
3172
3173                 if (sig->hasthis && (i == 0))
3174                         arg_type = &mono_defaults.object_class->byval_arg;
3175                 else
3176                         arg_type = sig->params [i - sig->hasthis];
3177
3178                 if (inst->opcode != OP_REGVAR) {
3179                         switch (ainfo->storage) {
3180                         case ArgInIReg: {
3181                                 guint32 size = 8;
3182
3183                                 /* FIXME: I1 etc */
3184                                 amd64_mov_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset, size);
3185                                 break;
3186                         }
3187                         case ArgInFloatSSEReg:
3188                                 amd64_movss_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3189                                 break;
3190                         case ArgInDoubleSSEReg:
3191                                 amd64_movsd_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3192                                 break;
3193                         default:
3194                                 break;
3195                         }
3196                 }
3197         }
3198
3199         g_free (cinfo);
3200
3201         return code;
3202 }
3203
3204 #define REAL_PRINT_REG(text,reg) \
3205 mono_assert (reg >= 0); \
3206 amd64_push_reg (code, AMD64_RAX); \
3207 amd64_push_reg (code, AMD64_RDX); \
3208 amd64_push_reg (code, AMD64_RCX); \
3209 amd64_push_reg (code, reg); \
3210 amd64_push_imm (code, reg); \
3211 amd64_push_imm (code, text " %d %p\n"); \
3212 amd64_mov_reg_imm (code, AMD64_RAX, printf); \
3213 amd64_call_reg (code, AMD64_RAX); \
3214 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 3*4); \
3215 amd64_pop_reg (code, AMD64_RCX); \
3216 amd64_pop_reg (code, AMD64_RDX); \
3217 amd64_pop_reg (code, AMD64_RAX);
3218
3219 /* benchmark and set based on cpu */
3220 #define LOOP_ALIGNMENT 8
3221 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
3222
3223 void
3224 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3225 {
3226         MonoInst *ins;
3227         MonoCallInst *call;
3228         guint offset;
3229         guint8 *code = cfg->native_code + cfg->code_len;
3230         MonoInst *last_ins = NULL;
3231         guint last_offset = 0;
3232         int max_len, cpos;
3233
3234         if (cfg->opt & MONO_OPT_PEEPHOLE)
3235                 peephole_pass (cfg, bb);
3236
3237         if (cfg->opt & MONO_OPT_LOOP) {
3238                 int pad, align = LOOP_ALIGNMENT;
3239                 /* set alignment depending on cpu */
3240                 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
3241                         pad = align - pad;
3242                         /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
3243                         amd64_padding (code, pad);
3244                         cfg->code_len += pad;
3245                         bb->native_offset = cfg->code_len;
3246                 }
3247         }
3248
3249         if (cfg->verbose_level > 2)
3250                 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3251
3252         cpos = bb->max_offset;
3253
3254         if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3255                 MonoProfileCoverageInfo *cov = cfg->coverage_info;
3256                 g_assert (!mono_compile_aot);
3257                 cpos += 6;
3258
3259                 cov->data [bb->dfn].cil_code = bb->cil_code;
3260                 /* this is not thread save, but good enough */
3261                 amd64_inc_mem (code, (guint64)&cov->data [bb->dfn].count); 
3262         }
3263
3264         offset = code - cfg->native_code;
3265
3266         ins = bb->code;
3267         while (ins) {
3268                 offset = code - cfg->native_code;
3269
3270                 max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
3271
3272                 if (offset > (cfg->code_size - max_len - 16)) {
3273                         cfg->code_size *= 2;
3274                         cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3275                         code = cfg->native_code + offset;
3276                         mono_jit_stats.code_reallocs++;
3277                 }
3278
3279                 mono_debug_record_line_number (cfg, ins, offset);
3280
3281                 switch (ins->opcode) {
3282                 case OP_BIGMUL:
3283                         amd64_mul_reg (code, ins->sreg2, TRUE);
3284                         break;
3285                 case OP_BIGMUL_UN:
3286                         amd64_mul_reg (code, ins->sreg2, FALSE);
3287                         break;
3288                 case OP_X86_SETEQ_MEMBASE:
3289                         amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
3290                         break;
3291                 case OP_STOREI1_MEMBASE_IMM:
3292                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
3293                         break;
3294                 case OP_STOREI2_MEMBASE_IMM:
3295                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
3296                         break;
3297                 case OP_STOREI4_MEMBASE_IMM:
3298                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
3299                         break;
3300                 case OP_STOREI1_MEMBASE_REG:
3301                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
3302                         break;
3303                 case OP_STOREI2_MEMBASE_REG:
3304                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
3305                         break;
3306                 case OP_STORE_MEMBASE_REG:
3307                 case OP_STOREI8_MEMBASE_REG:
3308                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
3309                         break;
3310                 case OP_STOREI4_MEMBASE_REG:
3311                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
3312                         break;
3313                 case OP_STORE_MEMBASE_IMM:
3314                 case OP_STOREI8_MEMBASE_IMM:
3315                         if (amd64_is_imm32 (ins->inst_imm))
3316                                 amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
3317                         else {
3318                                 amd64_mov_reg_imm (code, GP_SCRATCH_REG, ins->inst_imm);
3319                                 amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, GP_SCRATCH_REG, 8);
3320                         }
3321                         break;
3322                 case CEE_LDIND_I:
3323                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, sizeof (gpointer));
3324                         break;
3325                 case CEE_LDIND_I4:
3326                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3327                         break;
3328                 case CEE_LDIND_U4:
3329                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
3330                         break;
3331                 case OP_LOADU4_MEM:
3332                         amd64_mov_reg_imm (code, ins->dreg, ins->inst_p0);
3333                         amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
3334                         break;
3335                 case OP_LOAD_MEMBASE:
3336                 case OP_LOADI8_MEMBASE:
3337                         if (amd64_is_imm32 (ins->inst_offset)) {
3338                                 amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
3339                         }
3340                         else {
3341                                 amd64_mov_reg_imm_size (code, GP_SCRATCH_REG, ins->inst_offset, 8);
3342                                 amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, GP_SCRATCH_REG, 0, 8);
3343                         }
3344                         break;
3345                 case OP_LOADI4_MEMBASE:
3346                         amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3347                         break;
3348                 case OP_LOADU4_MEMBASE:
3349                         amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
3350                         break;
3351                 case OP_LOADU1_MEMBASE:
3352                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
3353                         break;
3354                 case OP_LOADI1_MEMBASE:
3355                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
3356                         break;
3357                 case OP_LOADU2_MEMBASE:
3358                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
3359                         break;
3360                 case OP_LOADI2_MEMBASE:
3361                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
3362                         break;
3363                 case CEE_CONV_I1:
3364                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3365                         break;
3366                 case CEE_CONV_I2:
3367                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3368                         break;
3369                 case CEE_CONV_U1:
3370                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
3371                         break;
3372                 case CEE_CONV_U2:
3373                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
3374                         break;
3375                 case CEE_CONV_U8:
3376                 case CEE_CONV_U:
3377                         /* Clean out the upper word */
3378                         amd64_mov_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
3379                         break;
3380                 case CEE_CONV_I8:
3381                 case CEE_CONV_I:
3382                         amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
3383                         break;                  
3384                 case OP_COMPARE:
3385                 case OP_LCOMPARE:
3386                         amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
3387                         break;
3388                 case OP_COMPARE_IMM:
3389                         if (!amd64_is_imm32 (ins->inst_imm)) {
3390                                 amd64_mov_reg_imm (code, AMD64_R11, ins->inst_imm);
3391                                 amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, AMD64_R11);
3392                         } else {
3393                                 amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
3394                         }
3395                         break;
3396                 case OP_X86_COMPARE_REG_MEMBASE:
3397                         amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
3398                         break;
3399                 case OP_X86_TEST_NULL:
3400                         amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4);
3401                         break;
3402                 case OP_AMD64_TEST_NULL:
3403                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
3404                         break;
3405                 case OP_X86_ADD_MEMBASE_IMM:
3406                         /* FIXME: Make a 64 version too */
3407                         amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3408                         break;
3409                 case OP_X86_ADD_MEMBASE:
3410                         amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3411                         break;
3412                 case OP_X86_SUB_MEMBASE_IMM:
3413                         g_assert (amd64_is_imm32 (ins->inst_imm));
3414                         amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3415                         break;
3416                 case OP_X86_SUB_MEMBASE:
3417                         amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3418                         break;
3419                 case OP_X86_INC_MEMBASE:
3420                         amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3421                         break;
3422                 case OP_X86_INC_REG:
3423                         amd64_inc_reg_size (code, ins->dreg, 4);
3424                         break;
3425                 case OP_X86_DEC_MEMBASE:
3426                         amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
3427                         break;
3428                 case OP_X86_DEC_REG:
3429                         amd64_dec_reg_size (code, ins->dreg, 4);
3430                         break;
3431                 case OP_X86_MUL_MEMBASE:
3432                         amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3433                         break;
3434                 case OP_AMD64_ICOMPARE_MEMBASE_REG:
3435                         amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
3436                         break;
3437                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
3438                         amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
3439                         break;
3440                 case OP_AMD64_ICOMPARE_REG_MEMBASE:
3441                         amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
3442                         break;
3443                 case CEE_BREAK:
3444                         amd64_breakpoint (code);
3445                         break;
3446                 case OP_ADDCC:
3447                 case CEE_ADD:
3448                         amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
3449                         break;
3450                 case OP_ADC:
3451                         amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
3452                         break;
3453                 case OP_ADD_IMM:
3454                         g_assert (amd64_is_imm32 (ins->inst_imm));
3455                         amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
3456                         break;
3457                 case OP_ADC_IMM:
3458                         g_assert (amd64_is_imm32 (ins->inst_imm));
3459                         amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
3460                         break;
3461                 case OP_SUBCC:
3462                 case CEE_SUB:
3463                         amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
3464                         break;
3465                 case OP_SBB:
3466                         amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
3467                         break;
3468                 case OP_SUB_IMM:
3469                         g_assert (amd64_is_imm32 (ins->inst_imm));
3470                         amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
3471                         break;
3472                 case OP_SBB_IMM:
3473                         g_assert (amd64_is_imm32 (ins->inst_imm));
3474                         amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
3475                         break;
3476                 case CEE_AND:
3477                         amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
3478                         break;
3479                 case OP_AND_IMM:
3480                         g_assert (amd64_is_imm32 (ins->inst_imm));
3481                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
3482                         break;
3483                 case CEE_MUL:
3484                 case OP_LMUL:
3485                         amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
3486                         break;
3487                 case OP_MUL_IMM:
3488                 case OP_LMUL_IMM:
3489                         amd64_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
3490                         break;
3491                 case CEE_DIV:
3492                 case OP_LDIV:
3493                         amd64_cdq (code);
3494                         amd64_div_reg (code, ins->sreg2, TRUE);
3495                         break;
3496                 case CEE_DIV_UN:
3497                 case OP_LDIV_UN:
3498                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3499                         amd64_div_reg (code, ins->sreg2, FALSE);
3500                         break;
3501                 case OP_DIV_IMM:
3502                         g_assert (amd64_is_imm32 (ins->inst_imm));
3503                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3504                         amd64_cdq (code);
3505                         amd64_div_reg (code, ins->sreg2, TRUE);
3506                         break;
3507                 case CEE_REM:
3508                 case OP_LREM:
3509                         amd64_cdq (code);
3510                         amd64_div_reg (code, ins->sreg2, TRUE);
3511                         break;
3512                 case CEE_REM_UN:
3513                 case OP_LREM_UN:
3514                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3515                         amd64_div_reg (code, ins->sreg2, FALSE);
3516                         break;
3517                 case OP_REM_IMM:
3518                         g_assert (amd64_is_imm32 (ins->inst_imm));
3519                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3520                         amd64_cdq (code);
3521                         amd64_div_reg (code, ins->sreg2, TRUE);
3522                         break;
3523                 case OP_LMUL_OVF:
3524                         amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
3525                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3526                         break;
3527                 case CEE_OR:
3528                         amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
3529                         break;
3530                 case OP_OR_IMM
3531 :                       g_assert (amd64_is_imm32 (ins->inst_imm));
3532                         amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
3533                         break;
3534                 case CEE_XOR:
3535                         amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
3536                         break;
3537                 case OP_XOR_IMM:
3538                         g_assert (amd64_is_imm32 (ins->inst_imm));
3539                         amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
3540                         break;
3541                 case CEE_SHL:
3542                 case OP_LSHL:
3543                         g_assert (ins->sreg2 == AMD64_RCX);
3544                         amd64_shift_reg (code, X86_SHL, ins->dreg);
3545                         break;
3546                 case CEE_SHR:
3547                 case OP_LSHR:
3548                         g_assert (ins->sreg2 == AMD64_RCX);
3549                         amd64_shift_reg (code, X86_SAR, ins->dreg);
3550                         break;
3551                 case OP_SHR_IMM:
3552                         g_assert (amd64_is_imm32 (ins->inst_imm));
3553                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3554                         break;
3555                 case OP_LSHR_IMM:
3556                         g_assert (amd64_is_imm32 (ins->inst_imm));
3557                         amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
3558                         break;
3559                 case OP_SHR_UN_IMM:
3560                         g_assert (amd64_is_imm32 (ins->inst_imm));
3561                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3562                         break;
3563                 case OP_LSHR_UN_IMM:
3564                         g_assert (amd64_is_imm32 (ins->inst_imm));
3565                         amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
3566                         break;
3567                 case CEE_SHR_UN:
3568                         g_assert (ins->sreg2 == AMD64_RCX);
3569                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3570                         break;
3571                 case OP_LSHR_UN:
3572                         g_assert (ins->sreg2 == AMD64_RCX);
3573                         amd64_shift_reg (code, X86_SHR, ins->dreg);
3574                         break;
3575                 case OP_SHL_IMM:
3576                         g_assert (amd64_is_imm32 (ins->inst_imm));
3577                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3578                         break;
3579                 case OP_LSHL_IMM:
3580                         g_assert (amd64_is_imm32 (ins->inst_imm));
3581                         amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
3582                         break;
3583
3584                 case OP_IADDCC:
3585                 case OP_IADD:
3586                         amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4);
3587                         break;
3588                 case OP_IADC:
3589                         amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4);
3590                         break;
3591                 case OP_IADD_IMM:
3592                         amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4);
3593                         break;
3594                 case OP_IADC_IMM:
3595                         amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4);
3596                         break;
3597                 case OP_ISUBCC:
3598                 case OP_ISUB:
3599                         amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4);
3600                         break;
3601                 case OP_ISBB:
3602                         amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4);
3603                         break;
3604                 case OP_ISUB_IMM:
3605                         amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4);
3606                         break;
3607                 case OP_ISBB_IMM:
3608                         amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4);
3609                         break;
3610                 case OP_IAND:
3611                         amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4);
3612                         break;
3613                 case OP_IAND_IMM:
3614                         amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4);
3615                         break;
3616                 case OP_IOR:
3617                         amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4);
3618                         break;
3619                 case OP_IOR_IMM:
3620                         amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4);
3621                         break;
3622                 case OP_IXOR:
3623                         amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4);
3624                         break;
3625                 case OP_IXOR_IMM:
3626                         amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4);
3627                         break;
3628                 case OP_INEG:
3629                         amd64_neg_reg_size (code, ins->sreg1, 4);
3630                         break;
3631                 case OP_INOT:
3632                         amd64_not_reg_size (code, ins->sreg1, 4);
3633                         break;
3634                 case OP_ISHL:
3635                         g_assert (ins->sreg2 == AMD64_RCX);
3636                         amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4);
3637                         break;
3638                 case OP_ISHR:
3639                         g_assert (ins->sreg2 == AMD64_RCX);
3640                         amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4);
3641                         break;
3642                 case OP_ISHR_IMM:
3643                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
3644                         break;
3645                 case OP_ISHR_UN_IMM:
3646                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
3647                         break;
3648                 case OP_ISHR_UN:
3649                         g_assert (ins->sreg2 == AMD64_RCX);
3650                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
3651                         break;
3652                 case OP_ISHL_IMM:
3653                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
3654                         break;
3655                 case OP_IMUL:
3656                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3657                         break;
3658                 case OP_IMUL_IMM:
3659                         amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, 4);
3660                         break;
3661                 case OP_IMUL_OVF:
3662                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
3663                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3664                         break;
3665                 case OP_IMUL_OVF_UN:
3666                 case OP_LMUL_OVF_UN: {
3667                         /* the mul operation and the exception check should most likely be split */
3668                         int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
3669                         int size = (ins->opcode == OP_IMUL_OVF_UN) ? 4 : 8;
3670                         /*g_assert (ins->sreg2 == X86_EAX);
3671                         g_assert (ins->dreg == X86_EAX);*/
3672                         if (ins->sreg2 == X86_EAX) {
3673                                 non_eax_reg = ins->sreg1;
3674                         } else if (ins->sreg1 == X86_EAX) {
3675                                 non_eax_reg = ins->sreg2;
3676                         } else {
3677                                 /* no need to save since we're going to store to it anyway */
3678                                 if (ins->dreg != X86_EAX) {
3679                                         saved_eax = TRUE;
3680                                         amd64_push_reg (code, X86_EAX);
3681                                 }
3682                                 amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, size);
3683                                 non_eax_reg = ins->sreg2;
3684                         }
3685                         if (ins->dreg == X86_EDX) {
3686                                 if (!saved_eax) {
3687                                         saved_eax = TRUE;
3688                                         amd64_push_reg (code, X86_EAX);
3689                                 }
3690                         } else if (ins->dreg != X86_EAX) {
3691                                 saved_edx = TRUE;
3692                                 amd64_push_reg (code, X86_EDX);
3693                         }
3694                         amd64_mul_reg_size (code, non_eax_reg, FALSE, size);
3695                         /* save before the check since pop and mov don't change the flags */
3696                         if (ins->dreg != X86_EAX)
3697                                 amd64_mov_reg_reg (code, ins->dreg, X86_EAX, size);
3698                         if (saved_edx)
3699                                 amd64_pop_reg (code, X86_EDX);
3700                         if (saved_eax)
3701                                 amd64_pop_reg (code, X86_EAX);
3702                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
3703                         break;
3704                 }
3705                 case OP_IDIV:
3706                         amd64_cdq_size (code, 4);
3707                         amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
3708                         break;
3709                 case OP_IDIV_UN:
3710                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3711                         amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
3712                         break;
3713                 case OP_IDIV_IMM:
3714                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3715                         amd64_cdq_size (code, 4);
3716                         amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
3717                         break;
3718                 case OP_IREM:
3719                         amd64_cdq_size (code, 4);
3720                         amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
3721                         break;
3722                 case OP_IREM_UN:
3723                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
3724                         amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
3725                         break;
3726                 case OP_IREM_IMM:
3727                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
3728                         amd64_cdq_size (code, 4);
3729                         amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
3730                         break;
3731
3732                 case OP_ICOMPARE:
3733                         amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
3734                         break;
3735                 case OP_ICOMPARE_IMM:
3736                         amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
3737                         break;
3738
3739                 case OP_IBEQ:
3740                 case OP_IBLT:
3741                 case OP_IBGT:
3742                 case OP_IBGE:
3743                 case OP_IBLE:
3744                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), TRUE);
3745                         break;
3746                 case OP_IBNE_UN:
3747                 case OP_IBLT_UN:
3748                 case OP_IBGT_UN:
3749                 case OP_IBGE_UN:
3750                 case OP_IBLE_UN:
3751                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), FALSE);
3752                         break;
3753                 case OP_COND_EXC_IOV:
3754                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3755                                                                                 TRUE, ins->inst_p1);
3756                         break;
3757                 case OP_COND_EXC_IC:
3758                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
3759                                                                                 FALSE, ins->inst_p1);
3760                         break;
3761                 case CEE_NOT:
3762                         amd64_not_reg (code, ins->sreg1);
3763                         break;
3764                 case CEE_NEG:
3765                         amd64_neg_reg (code, ins->sreg1);
3766                         break;
3767                 case OP_SEXT_I1:
3768                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
3769                         break;
3770                 case OP_SEXT_I2:
3771                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
3772                         break;
3773                 case OP_ICONST:
3774                 case OP_I8CONST:
3775                         if ((((guint64)ins->inst_c0) >> 32) == 0)
3776                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
3777                         else
3778                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
3779                         break;
3780                 case OP_AOTCONST:
3781                         mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3782                         amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, 8);
3783                         break;
3784                 case CEE_CONV_I4:
3785                 case CEE_CONV_U4:
3786                 case OP_MOVE:
3787                 case OP_SETREG:
3788                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
3789                         break;
3790                 case OP_AMD64_SET_XMMREG_R4: {
3791                         if (use_sse2) {
3792                                 amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
3793                         }
3794                         else {
3795                                 amd64_fst_membase (code, AMD64_RSP, -8, FALSE, TRUE);
3796                                 /* ins->dreg is set to -1 by the reg allocator */
3797                                 amd64_movss_reg_membase (code, ins->unused, AMD64_RSP, -8);
3798                         }
3799                         break;
3800                 }
3801                 case OP_AMD64_SET_XMMREG_R8: {
3802                         if (use_sse2) {
3803                                 if (ins->dreg != ins->sreg1)
3804                                         amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
3805                         }
3806                         else {
3807                                 amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE);
3808                                 /* ins->dreg is set to -1 by the reg allocator */
3809                                 amd64_movsd_reg_membase (code, ins->unused, AMD64_RSP, -8);
3810                         }
3811                         break;
3812                 }
3813                 case CEE_JMP: {
3814                         /*
3815                          * Note: this 'frame destruction' logic is useful for tail calls, too.
3816                          * Keep in sync with the code in emit_epilog.
3817                          */
3818                         int pos = 0, i;
3819
3820                         /* FIXME: no tracing support... */
3821                         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3822                                 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
3823
3824                         g_assert (!cfg->method->save_lmf);
3825
3826                         code = emit_load_volatile_arguments (cfg, code);
3827
3828                         for (i = 0; i < AMD64_NREG; ++i)
3829                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
3830                                         pos -= sizeof (gpointer);
3831                         
3832                         if (pos)
3833                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
3834
3835                         /* Pop registers in reverse order */
3836                         for (i = AMD64_NREG - 1; i > 0; --i)
3837                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
3838                                         amd64_pop_reg (code, i);
3839                                 }
3840
3841                         amd64_leave (code);
3842                         offset = code - cfg->native_code;
3843                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3844                         if (mono_compile_aot)
3845                                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
3846                         else
3847                                 amd64_set_reg_template (code, AMD64_R11);
3848                         amd64_jump_reg (code, AMD64_R11);
3849                         break;
3850                 }
3851                 case OP_CHECK_THIS:
3852                         /* ensure ins->sreg1 is not NULL */
3853                         amd64_alu_membase_imm (code, X86_CMP, ins->sreg1, 0, 0);
3854                         break;
3855                 case OP_ARGLIST: {
3856                         amd64_lea_membase (code, AMD64_R11, AMD64_RBP, cfg->sig_cookie);
3857                         amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
3858                         break;
3859                 }
3860                 case OP_FCALL:
3861                 case OP_LCALL:
3862                 case OP_VCALL:
3863                 case OP_VOIDCALL:
3864                 case CEE_CALL:
3865                         call = (MonoCallInst*)ins;
3866                         /*
3867                          * The AMD64 ABI forces callers to know about varargs.
3868                          */
3869                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke))
3870                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3871                         else if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (cfg->method->klass->image != mono_defaults.corlib)) {
3872                                 /* 
3873                                  * Since the unmanaged calling convention doesn't contain a 
3874                                  * 'vararg' entry, we have to treat every pinvoke call as a
3875                                  * potential vararg call.
3876                                  */
3877                                 guint32 nregs, i;
3878                                 nregs = 0;
3879                                 for (i = 0; i < AMD64_XMM_NREG; ++i)
3880                                         if (call->used_fregs & (1 << i))
3881                                                 nregs ++;
3882                                 if (!nregs)
3883                                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3884                                 else
3885                                         amd64_mov_reg_imm (code, AMD64_RAX, nregs);
3886                         }
3887
3888                         if (ins->flags & MONO_INST_HAS_METHOD)
3889                                 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
3890                         else
3891                                 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
3892                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3893                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3894                         code = emit_move_return_value (cfg, ins, code);
3895                         break;
3896                 case OP_FCALL_REG:
3897                 case OP_LCALL_REG:
3898                 case OP_VCALL_REG:
3899                 case OP_VOIDCALL_REG:
3900                 case OP_CALL_REG:
3901                         call = (MonoCallInst*)ins;
3902
3903                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3904                                 amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
3905                                 ins->sreg1 = AMD64_R11;
3906                         }
3907
3908                         /*
3909                          * The AMD64 ABI forces callers to know about varargs.
3910                          */
3911                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke)) {
3912                                 if (ins->sreg1 == AMD64_RAX) {
3913                                         amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
3914                                         ins->sreg1 = AMD64_R11;
3915                                 }
3916                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
3917                         }
3918                         amd64_call_reg (code, ins->sreg1);
3919                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3920                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3921                         code = emit_move_return_value (cfg, ins, code);
3922                         break;
3923                 case OP_FCALL_MEMBASE:
3924                 case OP_LCALL_MEMBASE:
3925                 case OP_VCALL_MEMBASE:
3926                 case OP_VOIDCALL_MEMBASE:
3927                 case OP_CALL_MEMBASE:
3928                         call = (MonoCallInst*)ins;
3929
3930                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
3931                                 /* 
3932                                  * Can't use R11 because it is clobbered by the trampoline 
3933                                  * code, and the reg value is needed by get_vcall_slot_addr.
3934                                  */
3935                                 amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
3936                                 ins->sreg1 = AMD64_RAX;
3937                         }
3938
3939                         amd64_call_membase (code, ins->sreg1, ins->inst_offset);
3940                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3941                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3942                         code = emit_move_return_value (cfg, ins, code);
3943                         break;
3944                 case OP_OUTARG:
3945                 case OP_X86_PUSH:
3946                         amd64_push_reg (code, ins->sreg1);
3947                         break;
3948                 case OP_X86_PUSH_IMM:
3949                         g_assert (amd64_is_imm32 (ins->inst_imm));
3950                         amd64_push_imm (code, ins->inst_imm);
3951                         break;
3952                 case OP_X86_PUSH_MEMBASE:
3953                         amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
3954                         break;
3955                 case OP_X86_PUSH_OBJ: 
3956                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ins->inst_imm);
3957                         amd64_push_reg (code, AMD64_RDI);
3958                         amd64_push_reg (code, AMD64_RSI);
3959                         amd64_push_reg (code, AMD64_RCX);
3960                         if (ins->inst_offset)
3961                                 amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset);
3962                         else
3963                                 amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8);
3964                         amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, 3 * 8);
3965                         amd64_mov_reg_imm (code, AMD64_RCX, (ins->inst_imm >> 3));
3966                         amd64_cld (code);
3967                         amd64_prefix (code, X86_REP_PREFIX);
3968                         amd64_movsd (code);
3969                         amd64_pop_reg (code, AMD64_RCX);
3970                         amd64_pop_reg (code, AMD64_RSI);
3971                         amd64_pop_reg (code, AMD64_RDI);
3972                         break;
3973                 case OP_X86_LEA:
3974                         amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
3975                         break;
3976                 case OP_X86_LEA_MEMBASE:
3977                         amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
3978                         break;
3979                 case OP_X86_XCHG:
3980                         amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
3981                         break;
3982                 case OP_LOCALLOC:
3983                         /* keep alignment */
3984                         amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
3985                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
3986                         code = mono_emit_stack_alloc (code, ins);
3987                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
3988                         break;
3989                 case CEE_RET:
3990                         amd64_ret (code);
3991                         break;
3992                 case CEE_THROW: {
3993                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
3994                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3995                                              (gpointer)"mono_arch_throw_exception");
3996                         break;
3997                 }
3998                 case OP_RETHROW: {
3999                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
4000                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
4001                                              (gpointer)"mono_arch_rethrow_exception");
4002                         break;
4003                 }
4004                 case OP_CALL_HANDLER: 
4005                         /* Align stack */
4006                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
4007                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4008                         amd64_call_imm (code, 0);
4009                         /* Restore stack alignment */
4010                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4011                         break;
4012                 case OP_LABEL:
4013                         ins->inst_c0 = code - cfg->native_code;
4014                         break;
4015                 case CEE_BR:
4016                         //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
4017                         //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
4018                         //break;
4019                         if (ins->flags & MONO_INST_BRLABEL) {
4020                                 if (ins->inst_i0->inst_c0) {
4021                                         amd64_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
4022                                 } else {
4023                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
4024                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
4025                                             x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
4026                                                 x86_jump8 (code, 0);
4027                                         else 
4028                                                 x86_jump32 (code, 0);
4029                                 }
4030                         } else {
4031                                 if (ins->inst_target_bb->native_offset) {
4032                                         amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); 
4033                                 } else {
4034                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4035                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
4036                                             x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
4037                                                 x86_jump8 (code, 0);
4038                                         else 
4039                                                 x86_jump32 (code, 0);
4040                                 } 
4041                         }
4042                         break;
4043                 case OP_BR_REG:
4044                         amd64_jump_reg (code, ins->sreg1);
4045                         break;
4046                 case OP_CEQ:
4047                 case OP_ICEQ:
4048                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4049                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4050                         break;
4051                 case OP_CLT:
4052                 case OP_ICLT:
4053                         amd64_set_reg (code, X86_CC_LT, ins->dreg, TRUE);
4054                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4055                         break;
4056                 case OP_CLT_UN:
4057                 case OP_ICLT_UN:
4058                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4059                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4060                         break;
4061                 case OP_CGT:
4062                 case OP_ICGT:
4063                         amd64_set_reg (code, X86_CC_GT, ins->dreg, TRUE);
4064                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4065                         break;
4066                 case OP_CGT_UN:
4067                 case OP_ICGT_UN:
4068                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4069                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4070                         break;
4071                 case OP_COND_EXC_EQ:
4072                 case OP_COND_EXC_NE_UN:
4073                 case OP_COND_EXC_LT:
4074                 case OP_COND_EXC_LT_UN:
4075                 case OP_COND_EXC_GT:
4076                 case OP_COND_EXC_GT_UN:
4077                 case OP_COND_EXC_GE:
4078                 case OP_COND_EXC_GE_UN:
4079                 case OP_COND_EXC_LE:
4080                 case OP_COND_EXC_LE_UN:
4081                 case OP_COND_EXC_OV:
4082                 case OP_COND_EXC_NO:
4083                 case OP_COND_EXC_C:
4084                 case OP_COND_EXC_NC:
4085                         EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], 
4086                                                     (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
4087                         break;
4088                 case CEE_BEQ:
4089                 case CEE_BNE_UN:
4090                 case CEE_BLT:
4091                 case CEE_BLT_UN:
4092                 case CEE_BGT:
4093                 case CEE_BGT_UN:
4094                 case CEE_BGE:
4095                 case CEE_BGE_UN:
4096                 case CEE_BLE:
4097                 case CEE_BLE_UN:
4098                         EMIT_COND_BRANCH (ins, branch_cc_table [ins->opcode - CEE_BEQ], (ins->opcode < CEE_BNE_UN));
4099                         break;
4100
4101                 /* floating point opcodes */
4102                 case OP_R8CONST: {
4103                         double d = *(double *)ins->inst_p0;
4104
4105                         if (use_sse2) {
4106                                 if ((d == 0.0) && (mono_signbit (d) == 0)) {
4107                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
4108                                 }
4109                                 else {
4110                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
4111                                         amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
4112                                 }
4113                         }
4114                         else if ((d == 0.0) && (mono_signbit (d) == 0)) {
4115                                 amd64_fldz (code);
4116                         } else if (d == 1.0) {
4117                                 x86_fld1 (code);
4118                         } else {
4119                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
4120                                 amd64_fld_membase (code, AMD64_RIP, 0, TRUE);
4121                         }
4122                         break;
4123                 }
4124                 case OP_R4CONST: {
4125                         float f = *(float *)ins->inst_p0;
4126
4127                         if (use_sse2) {
4128                                 if ((f == 0.0) && (mono_signbit (f) == 0)) {
4129                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
4130                                 }
4131                                 else {
4132                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
4133                                         amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0);
4134                                         amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
4135                                 }
4136                         }
4137                         else if ((f == 0.0) && (mono_signbit (f) == 0)) {
4138                                 amd64_fldz (code);
4139                         } else if (f == 1.0) {
4140                                 x86_fld1 (code);
4141                         } else {
4142                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
4143                                 amd64_fld_membase (code, AMD64_RIP, 0, FALSE);
4144                         }
4145                         break;
4146                 }
4147                 case OP_STORER8_MEMBASE_REG:
4148                         if (use_sse2)
4149                                 amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
4150                         else
4151                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
4152                         break;
4153                 case OP_LOADR8_SPILL_MEMBASE:
4154                         if (use_sse2)
4155                                 g_assert_not_reached ();
4156                         amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4157                         amd64_fxch (code, 1);
4158                         break;
4159                 case OP_LOADR8_MEMBASE:
4160                         if (use_sse2)
4161                                 amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4162                         else
4163                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4164                         break;
4165                 case OP_STORER4_MEMBASE_REG:
4166                         if (use_sse2) {
4167                                 /* This requires a double->single conversion */
4168                                 amd64_sse_cvtsd2ss_reg_reg (code, AMD64_XMM15, ins->sreg1);
4169                                 amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, AMD64_XMM15);
4170                         }
4171                         else
4172                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
4173                         break;
4174                 case OP_LOADR4_MEMBASE:
4175                         if (use_sse2) {
4176                                 amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4177                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
4178                         }
4179                         else
4180                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4181                         break;
4182                 case CEE_CONV_R4: /* FIXME: change precision */
4183                 case CEE_CONV_R8:
4184                         if (use_sse2)
4185                                 amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
4186                         else {
4187                                 amd64_push_reg (code, ins->sreg1);
4188                                 amd64_fild_membase (code, AMD64_RSP, 0, FALSE);
4189                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4190                         }
4191                         break;
4192                 case CEE_CONV_R_UN:
4193                         /* Emulated */
4194                         g_assert_not_reached ();
4195                         break;
4196                 case OP_LCONV_TO_R4: /* FIXME: change precision */
4197                 case OP_LCONV_TO_R8:
4198                         if (use_sse2)
4199                                 amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
4200                         else {
4201                                 amd64_push_reg (code, ins->sreg1);
4202                                 amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4203                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4204                         }
4205                         break;
4206                 case OP_X86_FP_LOAD_I8:
4207                         if (use_sse2)
4208                                 g_assert_not_reached ();
4209                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
4210                         break;
4211                 case OP_X86_FP_LOAD_I4:
4212                         if (use_sse2)
4213                                 g_assert_not_reached ();
4214                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
4215                         break;
4216                 case OP_FCONV_TO_I1:
4217                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4218                         break;
4219                 case OP_FCONV_TO_U1:
4220                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4221                         break;
4222                 case OP_FCONV_TO_I2:
4223                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4224                         break;
4225                 case OP_FCONV_TO_U2:
4226                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4227                         break;
4228                 case OP_FCONV_TO_I4:
4229                 case OP_FCONV_TO_I:
4230                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4231                         break;
4232                 case OP_FCONV_TO_I8:
4233                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
4234                         break;
4235                 case OP_LCONV_TO_R_UN: { 
4236                         static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
4237                         guint8 *br;
4238
4239                         if (use_sse2)
4240                                 g_assert_not_reached ();
4241
4242                         /* load 64bit integer to FP stack */
4243                         amd64_push_imm (code, 0);
4244                         amd64_push_reg (code, ins->sreg2);
4245                         amd64_push_reg (code, ins->sreg1);
4246                         amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
4247                         /* store as 80bit FP value */
4248                         x86_fst80_membase (code, AMD64_RSP, 0);
4249                         
4250                         /* test if lreg is negative */
4251                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4252                         br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
4253         
4254                         /* add correction constant mn */
4255                         x86_fld80_mem (code, mn);
4256                         x86_fld80_membase (code, AMD64_RSP, 0);
4257                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4258                         x86_fst80_membase (code, AMD64_RSP, 0);
4259
4260                         amd64_patch (br, code);
4261
4262                         x86_fld80_membase (code, AMD64_RSP, 0);
4263                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 12);
4264
4265                         break;
4266                 }
4267                 case OP_LCONV_TO_OVF_I: {
4268                         guint8 *br [3], *label [1];
4269
4270                         if (use_sse2)
4271                                 g_assert_not_reached ();
4272
4273                         /* 
4274                          * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4275                          */
4276                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
4277
4278                         /* If the low word top bit is set, see if we are negative */
4279                         br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
4280                         /* We are not negative (no top bit set, check for our top word to be zero */
4281                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
4282                         br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
4283                         label [0] = code;
4284
4285                         /* throw exception */
4286                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
4287                         x86_jump32 (code, 0);
4288         
4289                         amd64_patch (br [0], code);
4290                         /* our top bit is set, check that top word is 0xfffffff */
4291                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
4292                 
4293                         amd64_patch (br [1], code);
4294                         /* nope, emit exception */
4295                         br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
4296                         amd64_patch (br [2], label [0]);
4297
4298                         if (ins->dreg != ins->sreg1)
4299                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
4300                         break;
4301                 }
4302                 case CEE_CONV_OVF_U4:
4303                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0);
4304                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException");
4305                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4306                         break;
4307                 case CEE_CONV_OVF_I4_UN:
4308                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff);
4309                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException");
4310                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
4311                         break;
4312                 case OP_FMOVE:
4313                         if (use_sse2 && (ins->dreg != ins->sreg1))
4314                                 amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
4315                         break;
4316                 case OP_FADD:
4317                         if (use_sse2)
4318                                 amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2);
4319                         else
4320                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4321                         break;
4322                 case OP_FSUB:
4323                         if (use_sse2)
4324                                 amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2);
4325                         else
4326                                 amd64_fp_op_reg (code, X86_FSUB, 1, TRUE);
4327                         break;          
4328                 case OP_FMUL:
4329                         if (use_sse2)
4330                                 amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2);
4331                         else
4332                                 amd64_fp_op_reg (code, X86_FMUL, 1, TRUE);
4333                         break;          
4334                 case OP_FDIV:
4335                         if (use_sse2)
4336                                 amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2);
4337                         else
4338                                 amd64_fp_op_reg (code, X86_FDIV, 1, TRUE);
4339                         break;          
4340                 case OP_FNEG:
4341                         if (use_sse2) {
4342                                 amd64_mov_reg_imm_size (code, AMD64_R11, 0x8000000000000000, 8);
4343                                 amd64_push_reg (code, AMD64_R11);
4344                                 amd64_push_reg (code, AMD64_R11);
4345                                 amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RSP, 0);
4346                         }
4347                         else
4348                                 amd64_fchs (code);
4349                         break;          
4350                 case OP_SIN:
4351                         if (use_sse2) {
4352                                 EMIT_SSE2_FPFUNC (code, fsin, ins->dreg, ins->sreg1);
4353                         }
4354                         else {
4355                                 amd64_fsin (code);
4356                                 amd64_fldz (code);
4357                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4358                         }
4359                         break;          
4360                 case OP_COS:
4361                         if (use_sse2) {
4362                                 EMIT_SSE2_FPFUNC (code, fcos, ins->dreg, ins->sreg1);
4363                         }
4364                         else {
4365                                 amd64_fcos (code);
4366                                 amd64_fldz (code);
4367                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4368                         }
4369                         break;          
4370                 case OP_ABS:
4371                         if (use_sse2) {
4372                                 EMIT_SSE2_FPFUNC (code, fabs, ins->dreg, ins->sreg1);
4373                         }
4374                         else
4375                                 amd64_fabs (code);
4376                         break;          
4377                 case OP_TAN: {
4378                         /* 
4379                          * it really doesn't make sense to inline all this code,
4380                          * it's here just to show that things may not be as simple 
4381                          * as they appear.
4382                          */
4383                         guchar *check_pos, *end_tan, *pop_jump;
4384                         if (use_sse2)
4385                                 g_assert_not_reached ();
4386                         amd64_push_reg (code, AMD64_RAX);
4387                         amd64_fptan (code);
4388                         amd64_fnstsw (code);
4389                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4390                         check_pos = code;
4391                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4392                         amd64_fstp (code, 0); /* pop the 1.0 */
4393                         end_tan = code;
4394                         x86_jump8 (code, 0);
4395                         amd64_fldpi (code);
4396                         amd64_fp_op (code, X86_FADD, 0);
4397                         amd64_fxch (code, 1);
4398                         x86_fprem1 (code);
4399                         amd64_fstsw (code);
4400                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
4401                         pop_jump = code;
4402                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
4403                         amd64_fstp (code, 1);
4404                         amd64_fptan (code);
4405                         amd64_patch (pop_jump, code);
4406                         amd64_fstp (code, 0); /* pop the 1.0 */
4407                         amd64_patch (check_pos, code);
4408                         amd64_patch (end_tan, code);
4409                         amd64_fldz (code);
4410                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4411                         amd64_pop_reg (code, AMD64_RAX);
4412                         break;
4413                 }
4414                 case OP_ATAN:
4415                         if (use_sse2)
4416                                 g_assert_not_reached ();
4417                         x86_fld1 (code);
4418                         amd64_fpatan (code);
4419                         amd64_fldz (code);
4420                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
4421                         break;          
4422                 case OP_SQRT:
4423                         if (use_sse2) {
4424                                 EMIT_SSE2_FPFUNC (code, fsqrt, ins->dreg, ins->sreg1);
4425                         }
4426                         else
4427                                 amd64_fsqrt (code);
4428                         break;          
4429                 case OP_X86_FPOP:
4430                         if (!use_sse2)
4431                                 amd64_fstp (code, 0);
4432                         break;          
4433                 case OP_FREM: {
4434                         guint8 *l1, *l2;
4435
4436                         if (use_sse2)
4437                                 g_assert_not_reached ();
4438                         amd64_push_reg (code, AMD64_RAX);
4439                         /* we need to exchange ST(0) with ST(1) */
4440                         amd64_fxch (code, 1);
4441
4442                         /* this requires a loop, because fprem somtimes 
4443                          * returns a partial remainder */
4444                         l1 = code;
4445                         /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
4446                         /* x86_fprem1 (code); */
4447                         amd64_fprem (code);
4448                         amd64_fnstsw (code);
4449                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_C2);
4450                         l2 = code + 2;
4451                         x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
4452
4453                         /* pop result */
4454                         amd64_fstp (code, 1);
4455
4456                         amd64_pop_reg (code, AMD64_RAX);
4457                         break;
4458                 }
4459                 case OP_FCOMPARE:
4460                         if (use_sse2) {
4461                                 /* 
4462                                  * The two arguments are swapped because the fbranch instructions
4463                                  * depend on this for the non-sse case to work.
4464                                  */
4465                                 amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
4466                                 break;
4467                         }
4468                         if (cfg->opt & MONO_OPT_FCMOV) {
4469                                 amd64_fcomip (code, 1);
4470                                 amd64_fstp (code, 0);
4471                                 break;
4472                         }
4473                         /* this overwrites EAX */
4474                         EMIT_FPCOMPARE(code);
4475                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4476                         break;
4477                 case OP_FCEQ:
4478                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4479                                 /* zeroing the register at the start results in 
4480                                  * shorter and faster code (we can also remove the widening op)
4481                                  */
4482                                 guchar *unordered_check;
4483                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4484                                 
4485                                 if (use_sse2)
4486                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
4487                                 else {
4488                                         amd64_fcomip (code, 1);
4489                                         amd64_fstp (code, 0);
4490                                 }
4491                                 unordered_check = code;
4492                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4493                                 amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
4494                                 amd64_patch (unordered_check, code);
4495                                 break;
4496                         }
4497                         if (ins->dreg != AMD64_RAX) 
4498                                 amd64_push_reg (code, AMD64_RAX);
4499
4500                         EMIT_FPCOMPARE(code);
4501                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4502                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4503                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4504                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4505
4506                         if (ins->dreg != AMD64_RAX) 
4507                                 amd64_pop_reg (code, AMD64_RAX);
4508                         break;
4509                 case OP_FCLT:
4510                 case OP_FCLT_UN:
4511                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4512                                 /* zeroing the register at the start results in 
4513                                  * shorter and faster code (we can also remove the widening op)
4514                                  */
4515                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4516                                 if (use_sse2)
4517                                         amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
4518                                 else {
4519                                         amd64_fcomip (code, 1);
4520                                         amd64_fstp (code, 0);
4521                                 }
4522                                 if (ins->opcode == OP_FCLT_UN) {
4523                                         guchar *unordered_check = code;
4524                                         guchar *jump_to_end;
4525                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4526                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4527                                         jump_to_end = code;
4528                                         x86_jump8 (code, 0);
4529                                         amd64_patch (unordered_check, code);
4530                                         amd64_inc_reg (code, ins->dreg);
4531                                         amd64_patch (jump_to_end, code);
4532                                 } else {
4533                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
4534                                 }
4535                                 break;
4536                         }
4537                         if (ins->dreg != AMD64_RAX) 
4538                                 amd64_push_reg (code, AMD64_RAX);
4539
4540                         EMIT_FPCOMPARE(code);
4541                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4542                         if (ins->opcode == OP_FCLT_UN) {
4543                                 guchar *is_not_zero_check, *end_jump;
4544                                 is_not_zero_check = code;
4545                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4546                                 end_jump = code;
4547                                 x86_jump8 (code, 0);
4548                                 amd64_patch (is_not_zero_check, code);
4549                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4550
4551                                 amd64_patch (end_jump, code);
4552                         }
4553                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4554                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4555
4556                         if (ins->dreg != AMD64_RAX) 
4557                                 amd64_pop_reg (code, AMD64_RAX);
4558                         break;
4559                 case OP_FCGT:
4560                 case OP_FCGT_UN:
4561                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4562                                 /* zeroing the register at the start results in 
4563                                  * shorter and faster code (we can also remove the widening op)
4564                                  */
4565                                 guchar *unordered_check;
4566                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4567                                 if (use_sse2)
4568                                         amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
4569                                 else {
4570                                         amd64_fcomip (code, 1);
4571                                         amd64_fstp (code, 0);
4572                                 }
4573                                 if (ins->opcode == OP_FCGT) {
4574                                         unordered_check = code;
4575                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4576                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4577                                         amd64_patch (unordered_check, code);
4578                                 } else {
4579                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
4580                                 }
4581                                 break;
4582                         }
4583                         if (ins->dreg != AMD64_RAX) 
4584                                 amd64_push_reg (code, AMD64_RAX);
4585
4586                         EMIT_FPCOMPARE(code);
4587                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
4588                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4589                         if (ins->opcode == OP_FCGT_UN) {
4590                                 guchar *is_not_zero_check, *end_jump;
4591                                 is_not_zero_check = code;
4592                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4593                                 end_jump = code;
4594                                 x86_jump8 (code, 0);
4595                                 amd64_patch (is_not_zero_check, code);
4596                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4597
4598                                 amd64_patch (end_jump, code);
4599                         }
4600                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
4601                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
4602
4603                         if (ins->dreg != AMD64_RAX) 
4604                                 amd64_pop_reg (code, AMD64_RAX);
4605                         break;
4606                 case OP_FCLT_MEMBASE:
4607                 case OP_FCGT_MEMBASE:
4608                 case OP_FCLT_UN_MEMBASE:
4609                 case OP_FCGT_UN_MEMBASE:
4610                 case OP_FCEQ_MEMBASE: {
4611                         guchar *unordered_check, *jump_to_end;
4612                         int x86_cond;
4613                         g_assert (use_sse2);
4614
4615                         amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
4616                         amd64_sse_comisd_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
4617
4618                         switch (ins->opcode) {
4619                         case OP_FCEQ_MEMBASE:
4620                                 x86_cond = X86_CC_EQ;
4621                                 break;
4622                         case OP_FCLT_MEMBASE:
4623                         case OP_FCLT_UN_MEMBASE:
4624                                 x86_cond = X86_CC_LT;
4625                                 break;
4626                         case OP_FCGT_MEMBASE:
4627                         case OP_FCGT_UN_MEMBASE:
4628                                 x86_cond = X86_CC_GT;
4629                                 break;
4630                         default:
4631                                 g_assert_not_reached ();
4632                         }
4633
4634                         unordered_check = code;
4635                         x86_branch8 (code, X86_CC_P, 0, FALSE);
4636                         amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
4637
4638                         switch (ins->opcode) {
4639                         case OP_FCEQ_MEMBASE:
4640                         case OP_FCLT_MEMBASE:
4641                         case OP_FCGT_MEMBASE:
4642                                 amd64_patch (unordered_check, code);
4643                                 break;
4644                         case OP_FCLT_UN_MEMBASE:
4645                         case OP_FCGT_UN_MEMBASE:
4646                                 jump_to_end = code;
4647                                 x86_jump8 (code, 0);
4648                                 amd64_patch (unordered_check, code);
4649                                 amd64_inc_reg (code, ins->dreg);
4650                                 amd64_patch (jump_to_end, code);
4651                                 break;
4652                         default:
4653                                 break;
4654                         }
4655                         break;
4656                 }
4657                 case OP_FBEQ:
4658                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4659                                 guchar *jump = code;
4660                                 x86_branch8 (code, X86_CC_P, 0, TRUE);
4661                                 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4662                                 amd64_patch (jump, code);
4663                                 break;
4664                         }
4665                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
4666                         EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
4667                         break;
4668                 case OP_FBNE_UN:
4669                         /* Branch if C013 != 100 */
4670                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4671                                 /* branch if !ZF or (PF|CF) */
4672                                 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4673                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4674                                 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
4675                                 break;
4676                         }
4677                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4678                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4679                         break;
4680                 case OP_FBLT:
4681                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4682                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4683                                 break;
4684                         }
4685                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4686                         break;
4687                 case OP_FBLT_UN:
4688                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4689                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4690                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
4691                                 break;
4692                         }
4693                         if (ins->opcode == OP_FBLT_UN) {
4694                                 guchar *is_not_zero_check, *end_jump;
4695                                 is_not_zero_check = code;
4696                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4697                                 end_jump = code;
4698                                 x86_jump8 (code, 0);
4699                                 amd64_patch (is_not_zero_check, code);
4700                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4701
4702                                 amd64_patch (end_jump, code);
4703                         }
4704                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4705                         break;
4706                 case OP_FBGT:
4707                 case OP_FBGT_UN:
4708                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4709                                 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
4710                                 break;
4711                         }
4712                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4713                         if (ins->opcode == OP_FBGT_UN) {
4714                                 guchar *is_not_zero_check, *end_jump;
4715                                 is_not_zero_check = code;
4716                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
4717                                 end_jump = code;
4718                                 x86_jump8 (code, 0);
4719                                 amd64_patch (is_not_zero_check, code);
4720                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
4721
4722                                 amd64_patch (end_jump, code);
4723                         }
4724                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4725                         break;
4726                 case OP_FBGE:
4727                         /* Branch if C013 == 100 or 001 */
4728                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4729                                 guchar *br1;
4730
4731                                 /* skip branch if C1=1 */
4732                                 br1 = code;
4733                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4734                                 /* branch if (C0 | C3) = 1 */
4735                                 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
4736                                 amd64_patch (br1, code);
4737                                 break;
4738                         }
4739                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4740                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4741                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
4742                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4743                         break;
4744                 case OP_FBGE_UN:
4745                         /* Branch if C013 == 000 */
4746                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4747                                 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
4748                                 break;
4749                         }
4750                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4751                         break;
4752                 case OP_FBLE:
4753                         /* Branch if C013=000 or 100 */
4754                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4755                                 guchar *br1;
4756
4757                                 /* skip branch if C1=1 */
4758                                 br1 = code;
4759                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
4760                                 /* branch if C0=0 */
4761                                 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
4762                                 amd64_patch (br1, code);
4763                                 break;
4764                         }
4765                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, (X86_FP_C0|X86_FP_C1));
4766                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4767                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
4768                         break;
4769                 case OP_FBLE_UN:
4770                         /* Branch if C013 != 001 */
4771                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
4772                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
4773                                 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
4774                                 break;
4775                         }
4776                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4777                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
4778                         break;
4779                 case CEE_CKFINITE: {
4780                         if (use_sse2) {
4781                                 /* Transfer value to the fp stack */
4782                                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
4783                                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
4784                                 amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
4785                         }
4786                         amd64_push_reg (code, AMD64_RAX);
4787                         amd64_fxam (code);
4788                         amd64_fnstsw (code);
4789                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100);
4790                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
4791                         amd64_pop_reg (code, AMD64_RAX);
4792                         if (use_sse2) {
4793                                 amd64_fstp (code, 0);
4794                         }                               
4795                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
4796                         break;
4797                 }
4798                 case OP_TLS_GET: {
4799                         x86_prefix (code, X86_FS_PREFIX);
4800                         amd64_mov_reg_mem (code, ins->dreg, ins->inst_offset, 8);
4801                         break;
4802                 }
4803                 case OP_ATOMIC_ADD_I4:
4804                 case OP_ATOMIC_ADD_I8: {
4805                         int dreg = ins->dreg;
4806                         guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
4807
4808                         if (dreg == ins->inst_basereg)
4809                                 dreg = AMD64_R11;
4810                         
4811                         if (dreg != ins->sreg2)
4812                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg2, size);
4813
4814                         x86_prefix (code, X86_LOCK_PREFIX);
4815                         amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
4816
4817                         if (dreg != ins->dreg)
4818                                 amd64_mov_reg_reg (code, ins->dreg, dreg, size);
4819
4820                         break;
4821                 }
4822                 case OP_ATOMIC_ADD_NEW_I4:
4823                 case OP_ATOMIC_ADD_NEW_I8: {
4824                         int dreg = ins->dreg;
4825                         guint32 size = (ins->opcode == OP_ATOMIC_ADD_NEW_I4) ? 4 : 8;
4826
4827                         if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
4828                                 dreg = AMD64_R11;
4829
4830                         amd64_mov_reg_reg (code, dreg, ins->sreg2, size);
4831                         amd64_prefix (code, X86_LOCK_PREFIX);
4832                         amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
4833                         /* dreg contains the old value, add with sreg2 value */
4834                         amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size);
4835                         
4836                         if (ins->dreg != dreg)
4837                                 amd64_mov_reg_reg (code, ins->dreg, dreg, size);
4838
4839                         break;
4840                 }
4841                 case OP_ATOMIC_EXCHANGE_I4:
4842                 case OP_ATOMIC_EXCHANGE_I8: {
4843                         guchar *br[2];
4844                         int sreg2 = ins->sreg2;
4845                         int breg = ins->inst_basereg;
4846                         guint32 size = (ins->opcode == OP_ATOMIC_EXCHANGE_I4) ? 4 : 8;
4847
4848                         /* 
4849                          * See http://msdn.microsoft.com/msdnmag/issues/0700/Win32/ for
4850                          * an explanation of how this works.
4851                          */
4852
4853                         /* cmpxchg uses eax as comperand, need to make sure we can use it
4854                          * hack to overcome limits in x86 reg allocator 
4855                          * (req: dreg == eax and sreg2 != eax and breg != eax) 
4856                          */
4857                         if (ins->dreg != AMD64_RAX)
4858                                 amd64_push_reg (code, AMD64_RAX);
4859                         
4860                         /* We need the EAX reg for the cmpxchg */
4861                         if (ins->sreg2 == AMD64_RAX) {
4862                                 amd64_push_reg (code, AMD64_RDX);
4863                                 amd64_mov_reg_reg (code, AMD64_RDX, AMD64_RAX, size);
4864                                 sreg2 = AMD64_RDX;
4865                         }
4866
4867                         if (breg == AMD64_RAX) {
4868                                 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, size);
4869                                 breg = AMD64_R11;
4870                         }
4871
4872                         amd64_mov_reg_membase (code, AMD64_RAX, breg, ins->inst_offset, size);
4873
4874                         br [0] = code; amd64_prefix (code, X86_LOCK_PREFIX);
4875                         amd64_cmpxchg_membase_reg_size (code, breg, ins->inst_offset, sreg2, size);
4876                         br [1] = code; amd64_branch8 (code, X86_CC_NE, -1, FALSE);
4877                         amd64_patch (br [1], br [0]);
4878
4879                         if (ins->dreg != AMD64_RAX) {
4880                                 amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
4881                                 amd64_pop_reg (code, AMD64_RAX);
4882                         }
4883
4884                         if (ins->sreg2 != sreg2)
4885                                 amd64_pop_reg (code, AMD64_RDX);
4886
4887                         break;
4888                 }
4889                 default:
4890                         g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4891                         g_assert_not_reached ();
4892                 }
4893
4894                 if ((code - cfg->native_code - offset) > max_len) {
4895                         g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4896                                    mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4897                         g_assert_not_reached ();
4898                 }
4899                
4900                 cpos += max_len;
4901
4902                 last_ins = ins;
4903                 last_offset = offset;
4904                 
4905                 ins = ins->next;
4906         }
4907
4908         cfg->code_len = code - cfg->native_code;
4909 }
4910
4911 void
4912 mono_arch_register_lowlevel_calls (void)
4913 {
4914 }
4915
4916 void
4917 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4918 {
4919         MonoJumpInfo *patch_info;
4920
4921         for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4922                 unsigned char *ip = patch_info->ip.i + code;
4923                 const unsigned char *target;
4924
4925                 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4926
4927                 if (mono_compile_aot) {
4928                         switch (patch_info->type) {
4929                         case MONO_PATCH_INFO_BB:
4930                         case MONO_PATCH_INFO_LABEL:
4931                                 break;
4932                         default: {
4933                                 /* Just to make code run at aot time work */
4934                                 const unsigned char **tmp;
4935
4936                                 mono_domain_lock (domain);
4937                                 tmp = mono_code_manager_reserve (domain->code_mp, sizeof (gpointer));
4938                                 mono_domain_unlock (domain);
4939
4940                                 *tmp = target;
4941                                 target = (const unsigned char*)(guint64)((guint8*)tmp - (guint8*)ip);
4942                                 break;
4943                         }
4944                         }
4945                 }
4946
4947                 switch (patch_info->type) {
4948                 case MONO_PATCH_INFO_NONE:
4949                         continue;
4950                 case MONO_PATCH_INFO_CLASS_INIT: {
4951                         /* Might already been changed to a nop */
4952                         guint8* ip2 = ip;
4953                         if (mono_compile_aot)
4954                                 amd64_call_membase (ip2, AMD64_RIP, 0);
4955                         else {
4956                                 amd64_call_code (ip2, 0);
4957                         }
4958                         break;
4959                 }
4960                 case MONO_PATCH_INFO_METHOD_REL:
4961                 case MONO_PATCH_INFO_R8:
4962                 case MONO_PATCH_INFO_R4:
4963                         g_assert_not_reached ();
4964                         continue;
4965                 case MONO_PATCH_INFO_BB:
4966                         break;
4967                 default:
4968                         break;
4969                 }
4970                 amd64_patch (ip, (gpointer)target);
4971         }
4972 }
4973
4974 guint8 *
4975 mono_arch_emit_prolog (MonoCompile *cfg)
4976 {
4977         MonoMethod *method = cfg->method;
4978         MonoBasicBlock *bb;
4979         MonoMethodSignature *sig;
4980         MonoInst *inst;
4981         int alloc_size, pos, max_offset, i, quad;
4982         guint8 *code;
4983         CallInfo *cinfo;
4984
4985         cfg->code_size =  MAX (((MonoMethodNormal *)method)->header->code_size * 4, 512);
4986         code = cfg->native_code = g_malloc (cfg->code_size);
4987
4988         amd64_push_reg (code, AMD64_RBP);
4989         amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
4990
4991         /* Stack alignment check */
4992 #if 0
4993         {
4994                 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8);
4995                 amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf);
4996                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4997                 x86_branch8 (code, X86_CC_EQ, 2, FALSE);
4998                 amd64_breakpoint (code);
4999         }
5000 #endif
5001
5002         alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
5003         pos = 0;
5004
5005         if (method->save_lmf) {
5006                 gint32 lmf_offset;
5007
5008                 pos = ALIGN_TO (pos + sizeof (MonoLMF), 16);
5009
5010                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, pos);
5011
5012                 lmf_offset = - cfg->arch.lmf_offset;
5013
5014                 /* Save ip */
5015                 amd64_lea_membase (code, AMD64_R11, AMD64_RIP, 0);
5016                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
5017                 /* Save fp */
5018                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), AMD64_RBP, 8);
5019                 /* Save method */
5020                 /* FIXME: add a relocation for this */
5021                 if (IS_IMM32 (cfg->method))
5022                         amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), (guint64)cfg->method, 8);
5023                 else {
5024                         amd64_mov_reg_imm (code, AMD64_R11, cfg->method);
5025                         amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
5026                 }
5027                 /* Save callee saved regs */
5028                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
5029                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
5030                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
5031                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
5032                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
5033         } else {
5034
5035                 for (i = 0; i < AMD64_NREG; ++i)
5036                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
5037                                 amd64_push_reg (code, i);
5038                                 pos += sizeof (gpointer);
5039                         }
5040         }
5041
5042         alloc_size -= pos;
5043
5044         if (alloc_size) {
5045                 /* See mono_emit_stack_alloc */
5046 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
5047                 guint32 remaining_size = alloc_size;
5048                 while (remaining_size >= 0x1000) {
5049                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
5050                         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
5051                         remaining_size -= 0x1000;
5052                 }
5053                 if (remaining_size)
5054                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
5055 #else
5056                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
5057 #endif
5058         }
5059
5060         /* compute max_offset in order to use short forward jumps */
5061         max_offset = 0;
5062         if (cfg->opt & MONO_OPT_BRANCH) {
5063                 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5064                         MonoInst *ins = bb->code;
5065                         bb->max_offset = max_offset;
5066
5067                         if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5068                                 max_offset += 6;
5069                         /* max alignment for loops */
5070                         if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
5071                                 max_offset += LOOP_ALIGNMENT;
5072
5073                         while (ins) {
5074                                 if (ins->opcode == OP_LABEL)
5075                                         ins->inst_c1 = max_offset;
5076                                 
5077                                 max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
5078                                 ins = ins->next;
5079                         }
5080                 }
5081         }
5082
5083         sig = mono_method_signature (method);
5084         pos = 0;
5085
5086         cinfo = get_call_info (sig, FALSE);
5087
5088         if (sig->ret->type != MONO_TYPE_VOID) {
5089                 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
5090                         /* Save volatile arguments to the stack */
5091                         amd64_mov_membase_reg (code, cfg->ret->inst_basereg, cfg->ret->inst_offset, cinfo->ret.reg, 8);
5092                 }
5093         }
5094
5095         /* Keep this in sync with emit_load_volatile_arguments */
5096         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5097                 ArgInfo *ainfo = cinfo->args + i;
5098                 gint32 stack_offset;
5099                 MonoType *arg_type;
5100                 inst = cfg->varinfo [i];
5101
5102                 if (sig->hasthis && (i == 0))
5103                         arg_type = &mono_defaults.object_class->byval_arg;
5104                 else
5105                         arg_type = sig->params [i - sig->hasthis];
5106
5107                 stack_offset = ainfo->offset + ARGS_OFFSET;
5108
5109                 /* Save volatile arguments to the stack */
5110                 if (inst->opcode != OP_REGVAR) {
5111                         switch (ainfo->storage) {
5112                         case ArgInIReg: {
5113                                 guint32 size = 8;
5114
5115                                 /* FIXME: I1 etc */
5116                                 /*
5117                                 if (stack_offset & 0x1)
5118                                         size = 1;
5119                                 else if (stack_offset & 0x2)
5120                                         size = 2;
5121                                 else if (stack_offset & 0x4)
5122                                         size = 4;
5123                                 else
5124                                         size = 8;
5125                                 */
5126                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg, size);
5127                                 break;
5128                         }
5129                         case ArgInFloatSSEReg:
5130                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
5131                                 break;
5132                         case ArgInDoubleSSEReg:
5133                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
5134                                 break;
5135                         case ArgValuetypeInReg:
5136                                 for (quad = 0; quad < 2; quad ++) {
5137                                         switch (ainfo->pair_storage [quad]) {
5138                                         case ArgInIReg:
5139                                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
5140                                                 break;
5141                                         case ArgInFloatSSEReg:
5142                                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
5143                                                 break;
5144                                         case ArgInDoubleSSEReg:
5145                                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
5146                                                 break;
5147                                         case ArgNone:
5148                                                 break;
5149                                         default:
5150                                                 g_assert_not_reached ();
5151                                         }
5152                                 }
5153                                 break;
5154                         default:
5155                                 break;
5156                         }
5157                 }
5158
5159                 if (inst->opcode == OP_REGVAR) {
5160                         /* Argument allocated to (non-volatile) register */
5161                         switch (ainfo->storage) {
5162                         case ArgInIReg:
5163                                 amd64_mov_reg_reg (code, inst->dreg, ainfo->reg, 8);
5164                                 break;
5165                         case ArgOnStack:
5166                                 amd64_mov_reg_membase (code, inst->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
5167                                 break;
5168                         default:
5169                                 g_assert_not_reached ();
5170                         }
5171                 }
5172         }
5173
5174         if (method->save_lmf) {
5175                 gint32 lmf_offset;
5176
5177                 if (lmf_tls_offset != -1) {
5178                         /* Load lmf quicky using the FS register */
5179                         x86_prefix (code, X86_FS_PREFIX);
5180                         amd64_mov_reg_mem (code, AMD64_RAX, lmf_tls_offset, 8);
5181                 }
5182                 else {
5183                         /* 
5184                          * The call might clobber argument registers, but they are already
5185                          * saved to the stack/global regs.
5186                          */
5187
5188                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
5189                                                                  (gpointer)"mono_get_lmf_addr");                
5190                 }
5191
5192                 lmf_offset = - cfg->arch.lmf_offset;
5193
5194                 /* Save lmf_addr */
5195                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
5196                 /* Save previous_lmf */
5197                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
5198                 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
5199                 /* Set new lmf */
5200                 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
5201                 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
5202         }
5203
5204
5205         g_free (cinfo);
5206
5207         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5208                 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5209
5210         cfg->code_len = code - cfg->native_code;
5211
5212         g_assert (cfg->code_len < cfg->code_size);
5213
5214         return code;
5215 }
5216
5217 void
5218 mono_arch_emit_epilog (MonoCompile *cfg)
5219 {
5220         MonoMethod *method = cfg->method;
5221         int quad, pos, i;
5222         guint8 *code;
5223         int max_epilog_size = 16;
5224         CallInfo *cinfo;
5225         
5226         if (cfg->method->save_lmf)
5227                 max_epilog_size += 256;
5228         
5229         if (mono_jit_trace_calls != NULL)
5230                 max_epilog_size += 50;
5231
5232         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5233                 max_epilog_size += 50;
5234
5235         max_epilog_size += (AMD64_NREG * 2);
5236
5237         while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5238                 cfg->code_size *= 2;
5239                 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5240                 mono_jit_stats.code_reallocs++;
5241         }
5242
5243         code = cfg->native_code + cfg->code_len;
5244
5245         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5246                 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5247
5248         /* the code restoring the registers must be kept in sync with CEE_JMP */
5249         pos = 0;
5250         
5251         if (method->save_lmf) {
5252                 gint32 lmf_offset = - cfg->arch.lmf_offset;
5253
5254                 /* Restore previous lmf */
5255                 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
5256                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
5257                 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
5258
5259                 /* Restore caller saved regs */
5260                 if (cfg->used_int_regs & (1 << AMD64_RBX)) {
5261                         amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
5262                 }
5263                 if (cfg->used_int_regs & (1 << AMD64_R12)) {
5264                         amd64_mov_reg_membase (code, AMD64_R12, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
5265                 }
5266                 if (cfg->used_int_regs & (1 << AMD64_R13)) {
5267                         amd64_mov_reg_membase (code, AMD64_R13, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
5268                 }
5269                 if (cfg->used_int_regs & (1 << AMD64_R14)) {
5270                         amd64_mov_reg_membase (code, AMD64_R14, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
5271                 }
5272                 if (cfg->used_int_regs & (1 << AMD64_R15)) {
5273                         amd64_mov_reg_membase (code, AMD64_R15, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
5274                 }
5275         } else {
5276
5277                 for (i = 0; i < AMD64_NREG; ++i)
5278                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
5279                                 pos -= sizeof (gpointer);
5280
5281                 if (pos) {
5282                         if (pos == - sizeof (gpointer)) {
5283                                 /* Only one register, so avoid lea */
5284                                 for (i = AMD64_NREG - 1; i > 0; --i)
5285                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
5286                                                 amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
5287                                         }
5288                         }
5289                         else {
5290                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
5291
5292                                 /* Pop registers in reverse order */
5293                                 for (i = AMD64_NREG - 1; i > 0; --i)
5294                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
5295                                                 amd64_pop_reg (code, i);
5296                                         }
5297                         }
5298                 }
5299         }
5300
5301         /* Load returned vtypes into registers if needed */
5302         cinfo = get_call_info (mono_method_signature (method), FALSE);
5303         if (cinfo->ret.storage == ArgValuetypeInReg) {
5304                 ArgInfo *ainfo = &cinfo->ret;
5305                 MonoInst *inst = cfg->ret;
5306
5307                 for (quad = 0; quad < 2; quad ++) {
5308                         switch (ainfo->pair_storage [quad]) {
5309                         case ArgInIReg:
5310                                 amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), sizeof (gpointer));
5311                                 break;
5312                         case ArgInFloatSSEReg:
5313                                 amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
5314                                 break;
5315                         case ArgInDoubleSSEReg:
5316                                 amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
5317                                 break;
5318                         case ArgNone:
5319                                 break;
5320                         default:
5321                                 g_assert_not_reached ();
5322                         }
5323                 }
5324         }
5325         g_free (cinfo);
5326
5327         amd64_leave (code);
5328         amd64_ret (code);
5329
5330         cfg->code_len = code - cfg->native_code;
5331
5332         g_assert (cfg->code_len < cfg->code_size);
5333
5334 }
5335
5336 void
5337 mono_arch_emit_exceptions (MonoCompile *cfg)
5338 {
5339         MonoJumpInfo *patch_info;
5340         int nthrows, i;
5341         guint8 *code;
5342         MonoClass *exc_classes [16];
5343         guint8 *exc_throw_start [16], *exc_throw_end [16];
5344         guint32 code_size = 0;
5345
5346         /* Compute needed space */
5347         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5348                 if (patch_info->type == MONO_PATCH_INFO_EXC)
5349                         code_size += 40;
5350                 if (patch_info->type == MONO_PATCH_INFO_R8)
5351                         code_size += 8 + 7; /* sizeof (double) + alignment */
5352                 if (patch_info->type == MONO_PATCH_INFO_R4)
5353                         code_size += 4 + 7; /* sizeof (float) + alignment */
5354         }
5355
5356         while (cfg->code_len + code_size > (cfg->code_size - 16)) {
5357                 cfg->code_size *= 2;
5358                 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5359                 mono_jit_stats.code_reallocs++;
5360         }
5361
5362         code = cfg->native_code + cfg->code_len;
5363
5364         /* add code to raise exceptions */
5365         nthrows = 0;
5366         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5367                 switch (patch_info->type) {
5368                 case MONO_PATCH_INFO_EXC: {
5369                         MonoClass *exc_class;
5370                         guint8 *buf, *buf2;
5371                         guint32 throw_ip;
5372
5373                         amd64_patch (patch_info->ip.i + cfg->native_code, code);
5374
5375                         exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5376                         g_assert (exc_class);
5377                         throw_ip = patch_info->ip.i;
5378
5379                         //x86_breakpoint (code);
5380                         /* Find a throw sequence for the same exception class */
5381                         for (i = 0; i < nthrows; ++i)
5382                                 if (exc_classes [i] == exc_class)
5383                                         break;
5384                         if (i < nthrows) {
5385                                 amd64_mov_reg_imm (code, AMD64_RSI, (exc_throw_end [i] - cfg->native_code) - throw_ip);
5386                                 x86_jump_code (code, exc_throw_start [i]);
5387                                 patch_info->type = MONO_PATCH_INFO_NONE;
5388                         }
5389                         else {
5390                                 buf = code;
5391                                 amd64_mov_reg_imm_size (code, AMD64_RSI, 0xf0f0f0f0, 4);
5392                                 buf2 = code;
5393
5394                                 if (nthrows < 16) {
5395                                         exc_classes [nthrows] = exc_class;
5396                                         exc_throw_start [nthrows] = code;
5397                                 }
5398
5399                                 amd64_mov_reg_imm (code, AMD64_RDI, exc_class->type_token);
5400                                 patch_info->data.name = "mono_arch_throw_corlib_exception";
5401                                 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5402                                 patch_info->ip.i = code - cfg->native_code;
5403
5404                                 if (mono_compile_aot) {
5405                                         amd64_mov_reg_membase (code, GP_SCRATCH_REG, AMD64_RIP, 0, 8);
5406                                         amd64_call_reg (code, GP_SCRATCH_REG);
5407                                 } else {
5408                                         /* The callee is in memory allocated using the code manager */
5409                                         amd64_call_code (code, 0);
5410                                 }
5411
5412                                 amd64_mov_reg_imm (buf, AMD64_RSI, (code - cfg->native_code) - throw_ip);
5413                                 while (buf < buf2)
5414                                         x86_nop (buf);
5415
5416                                 if (nthrows < 16) {
5417                                         exc_throw_end [nthrows] = code;
5418                                         nthrows ++;
5419                                 }
5420                         }
5421                         break;
5422                 }
5423                 default:
5424                         /* do nothing */
5425                         break;
5426                 }
5427         }
5428
5429         /* Handle relocations with RIP relative addressing */
5430         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5431                 gboolean remove = FALSE;
5432
5433                 switch (patch_info->type) {
5434                 case MONO_PATCH_INFO_R8: {
5435                         guint8 *pos;
5436
5437                         code = (guint8*)ALIGN_TO (code, 8);
5438
5439                         pos = cfg->native_code + patch_info->ip.i;
5440
5441                         *(double*)code = *(double*)patch_info->data.target;
5442
5443                         if (use_sse2)
5444                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
5445                         else
5446                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
5447                         code += 8;
5448
5449                         remove = TRUE;
5450                         break;
5451                 }
5452                 case MONO_PATCH_INFO_R4: {
5453                         guint8 *pos;
5454
5455                         code = (guint8*)ALIGN_TO (code, 8);
5456
5457                         pos = cfg->native_code + patch_info->ip.i;
5458
5459                         *(float*)code = *(float*)patch_info->data.target;
5460
5461                         if (use_sse2)
5462                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
5463                         else
5464                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
5465                         code += 4;
5466
5467                         remove = TRUE;
5468                         break;
5469                 }
5470                 default:
5471                         break;
5472                 }
5473
5474                 if (remove) {
5475                         if (patch_info == cfg->patch_info)
5476                                 cfg->patch_info = patch_info->next;
5477                         else {
5478                                 MonoJumpInfo *tmp;
5479
5480                                 for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next)
5481                                         ;
5482                                 tmp->next = patch_info->next;
5483                         }
5484                 }
5485         }
5486
5487         cfg->code_len = code - cfg->native_code;
5488
5489         g_assert (cfg->code_len < cfg->code_size);
5490
5491 }
5492
5493 void*
5494 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5495 {
5496         guchar *code = p;
5497         CallInfo *cinfo;
5498         MonoMethodSignature *sig;
5499         MonoInst *inst;
5500         int i, n, stack_area = 0;
5501
5502         /* Keep this in sync with mono_arch_get_argument_info */
5503
5504         if (enable_arguments) {
5505                 /* Allocate a new area on the stack and save arguments there */
5506                 sig = mono_method_signature (cfg->method);
5507
5508                 cinfo = get_call_info (sig, FALSE);
5509
5510                 n = sig->param_count + sig->hasthis;
5511
5512                 stack_area = ALIGN_TO (n * 8, 16);
5513
5514                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_area);
5515
5516                 for (i = 0; i < n; ++i) {
5517                         inst = cfg->varinfo [i];
5518
5519                         if (inst->opcode == OP_REGVAR)
5520                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), inst->dreg, 8);
5521                         else {
5522                                 amd64_mov_reg_membase (code, AMD64_R11, inst->inst_basereg, inst->inst_offset, 8);
5523                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), AMD64_R11, 8);
5524                         }
5525                 }
5526         }
5527
5528         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
5529         amd64_set_reg_template (code, AMD64_RDI);
5530         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RSP, 8);
5531         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5532
5533         if (enable_arguments) {
5534                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, stack_area);
5535
5536                 g_free (cinfo);
5537         }
5538
5539         return code;
5540 }
5541
5542 enum {
5543         SAVE_NONE,
5544         SAVE_STRUCT,
5545         SAVE_EAX,
5546         SAVE_EAX_EDX,
5547         SAVE_XMM
5548 };
5549
5550 void*
5551 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
5552 {
5553         guchar *code = p;
5554         int save_mode = SAVE_NONE;
5555         MonoMethod *method = cfg->method;
5556         int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
5557         
5558         switch (rtype) {
5559         case MONO_TYPE_VOID:
5560                 /* special case string .ctor icall */
5561                 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
5562                         save_mode = SAVE_EAX;
5563                 else
5564                         save_mode = SAVE_NONE;
5565                 break;
5566         case MONO_TYPE_I8:
5567         case MONO_TYPE_U8:
5568                 save_mode = SAVE_EAX;
5569                 break;
5570         case MONO_TYPE_R4:
5571         case MONO_TYPE_R8:
5572                 save_mode = SAVE_XMM;
5573                 break;
5574         case MONO_TYPE_VALUETYPE:
5575                 save_mode = SAVE_STRUCT;
5576                 break;
5577         default:
5578                 save_mode = SAVE_EAX;
5579                 break;
5580         }
5581
5582         /* Save the result and copy it into the proper argument register */
5583         switch (save_mode) {
5584         case SAVE_EAX:
5585                 amd64_push_reg (code, AMD64_RAX);
5586                 /* Align stack */
5587                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5588                 if (enable_arguments)
5589                         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RAX, 8);
5590                 break;
5591         case SAVE_STRUCT:
5592                 /* FIXME: */
5593                 if (enable_arguments)
5594                         amd64_mov_reg_imm (code, AMD64_RSI, 0);
5595                 break;
5596         case SAVE_XMM:
5597                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5598                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
5599                 /* Align stack */
5600                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
5601                 /* 
5602                  * The result is already in the proper argument register so no copying
5603                  * needed.
5604                  */
5605                 break;
5606         case SAVE_NONE:
5607                 break;
5608         default:
5609                 g_assert_not_reached ();
5610         }
5611
5612         /* Set %al since this is a varargs call */
5613         if (save_mode == SAVE_XMM)
5614                 amd64_mov_reg_imm (code, AMD64_RAX, 1);
5615         else
5616                 amd64_mov_reg_imm (code, AMD64_RAX, 0);
5617
5618         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
5619         amd64_set_reg_template (code, AMD64_RDI);
5620         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
5621
5622         /* Restore result */
5623         switch (save_mode) {
5624         case SAVE_EAX:
5625                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5626                 amd64_pop_reg (code, AMD64_RAX);
5627                 break;
5628         case SAVE_STRUCT:
5629                 /* FIXME: */
5630                 break;
5631         case SAVE_XMM:
5632                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5633                 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
5634                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
5635                 break;
5636         case SAVE_NONE:
5637                 break;
5638         default:
5639                 g_assert_not_reached ();
5640         }
5641
5642         return code;
5643 }
5644
5645 void
5646 mono_arch_flush_icache (guint8 *code, gint size)
5647 {
5648         /* Not needed */
5649 }
5650
5651 void
5652 mono_arch_flush_register_windows (void)
5653 {
5654 }
5655
5656 gboolean 
5657 mono_arch_is_inst_imm (gint64 imm)
5658 {
5659         return amd64_is_imm32 (imm);
5660 }
5661
5662 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
5663
5664 static int reg_to_ucontext_reg [] = {
5665         REG_RAX, REG_RCX, REG_RDX, REG_RBX, REG_RSP, REG_RBP, REG_RSI, REG_RDI,
5666         REG_R8, REG_R9, REG_R10, REG_R11, REG_R12, REG_R13, REG_R14, REG_R15,
5667         REG_RIP
5668 };
5669
5670 /*
5671  * Determine whenever the trap whose info is in SIGINFO is caused by
5672  * integer overflow.
5673  */
5674 gboolean
5675 mono_arch_is_int_overflow (void *sigctx, void *info)
5676 {
5677         ucontext_t *ctx = (ucontext_t*)sigctx;
5678         guint8* rip;
5679         int reg;
5680
5681         rip = (guint8*)ctx->uc_mcontext.gregs [REG_RIP];
5682
5683         if (IS_REX (rip [0])) {
5684                 reg = amd64_rex_b (rip [0]);
5685                 rip ++;
5686         }
5687         else
5688                 reg = 0;
5689
5690         if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) {
5691                 /* idiv REG */
5692                 reg += x86_modrm_rm (rip [1]);
5693
5694                 if (ctx->uc_mcontext.gregs [reg_to_ucontext_reg [reg]] == -1)
5695                         return TRUE;
5696         }
5697
5698         return FALSE;
5699 }
5700
5701 guint32
5702 mono_arch_get_patch_offset (guint8 *code)
5703 {
5704         return 3;
5705 }
5706
5707 gpointer*
5708 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
5709 {
5710         guint32 reg;
5711         guint32 disp;
5712         guint8 rex = 0;
5713
5714         /* go to the start of the call instruction
5715          *
5716          * address_byte = (m << 6) | (o << 3) | reg
5717          * call opcode: 0xff address_byte displacement
5718          * 0xff m=1,o=2 imm8
5719          * 0xff m=2,o=2 imm32
5720          */
5721         code -= 7;
5722
5723         /* 
5724          * A given byte sequence can match more than case here, so we have to be
5725          * really careful about the ordering of the cases. Longer sequences
5726          * come first.
5727          */
5728         if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
5729                 /* call OFFSET(%rip) */
5730                 return NULL;
5731         }
5732         else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
5733                 /* call *[reg+disp32] */
5734                 if (IS_REX (code [0]))
5735                         rex = code [0];
5736                 reg = amd64_modrm_rm (code [2]);
5737                 disp = *(guint32*)(code + 3);
5738                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5739         }
5740         else if (code [2] == 0xe8) {
5741                 /* call <ADDR> */
5742                 return NULL;
5743         }
5744         else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
5745                 /* call *%reg */
5746                 return NULL;
5747         }
5748         else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
5749                 /* call *[reg+disp8] */
5750                 if (IS_REX (code [3]))
5751                         rex = code [3];
5752                 reg = amd64_modrm_rm (code [5]);
5753                 disp = *(guint8*)(code + 6);
5754                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
5755         }
5756         else if ((code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x0)) {
5757                         /*
5758                          * This is a interface call: should check the above code can't catch it earlier 
5759                          * 8b 40 30   mov    0x30(%eax),%eax
5760                          * ff 10      call   *(%eax)
5761                          */
5762                 if (IS_REX (code [4]))
5763                         rex = code [4];
5764                 reg = amd64_modrm_rm (code [6]);
5765                 disp = 0;
5766         }
5767         else
5768                 g_assert_not_reached ();
5769
5770         reg += amd64_rex_b (rex);
5771
5772         /* R11 is clobbered by the trampoline code */
5773         g_assert (reg != AMD64_R11);
5774
5775         return (gpointer)(((guint64)(regs [reg])) + disp);
5776 }
5777
5778 gpointer*
5779 mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
5780 {
5781         guint32 reg;
5782         guint32 disp;
5783
5784         code -= 10;
5785
5786         if (IS_REX (code [0]) && (code [1] == 0x8b) && (code [3] == 0x48) && (code [4] == 0x8b) && (code [5] == 0x40) && (code [7] == 0x48) && (code [8] == 0xff) && (code [9] == 0xd0)) {
5787                 /* mov REG, %rax; mov <OFFSET>(%rax), %rax; call *%rax */
5788                 reg = amd64_rex_b (code [0]) + amd64_modrm_rm (code [2]);
5789                 disp = code [6];
5790
5791                 if (reg == AMD64_RAX)
5792                         return NULL;
5793                 else
5794                         return (gpointer*)(((guint64)(regs [reg])) + disp);
5795         }
5796
5797         return NULL;
5798 }
5799
5800 /*
5801  * Support for fast access to the thread-local lmf structure using the GS
5802  * segment register on NPTL + kernel 2.6.x.
5803  */
5804
5805 static gboolean tls_offset_inited = FALSE;
5806
5807 /* code should be simply return <tls var>; */
5808 static int 
5809 read_tls_offset_from_method (void* method)
5810 {
5811         guint8 *code = (guint8*)method;
5812
5813         /* 
5814          * Determine the offset of mono_lfm_addr inside the TLS structures
5815          * by disassembling the function above.
5816          */
5817         /* This is generated by gcc 3.3.2 */
5818         if ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5819                 (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5820                 (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5821                 (code [9] == 0x00) && (code [10] == 0x00) && (code [11] == 0x00) &&
5822                 (code [12] == 0x0) && (code [13] == 0x48) && (code [14] == 0x8b) &&
5823                 (code [15] == 0x80)) {
5824                 return *(gint32*)&(code [16]);
5825         } else if
5826                 /* This is generated by gcc-3.3.2 with -O=2 */
5827                 /* mov fs:0, %rax ; mov <offset>(%rax), %rax ; retq */
5828                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5829                  (code [3] == 0x04) && (code [4] == 0x25) &&
5830                  (code [9] == 0x48) && (code [10] == 0x8b) && (code [11] == 0x80) &&
5831                  (code [16] == 0xc3)) {
5832                         return *(gint32*)&(code [12]);
5833         } else if 
5834                 /* This is generated by gcc-3.4.1 */
5835                 ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
5836                  (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
5837                  (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
5838                  (code [13] == 0xc9) && (code [14] == 0xc3)) {
5839                         return *(gint32*)&(code [9]);
5840         } else if
5841                 /* This is generated by gcc-3.4.1 with -O=2 */
5842                 ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
5843                  (code [3] == 0x04) && (code [4] == 0x25)) {
5844                 return *(gint32*)&(code [5]);
5845         }
5846
5847         return -1;
5848 }
5849
5850 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5851
5852 static void
5853 setup_stack (MonoJitTlsData *tls)
5854 {
5855         pthread_t self = pthread_self();
5856         pthread_attr_t attr;
5857         size_t stsize = 0;
5858         struct sigaltstack sa;
5859         guint8 *staddr = NULL;
5860         guint8 *current = (guint8*)&staddr;
5861
5862         if (mono_running_on_valgrind ())
5863                 return;
5864
5865         /* Determine stack boundaries */
5866 #ifdef HAVE_PTHREAD_GETATTR_NP
5867         pthread_getattr_np( self, &attr );
5868 #else
5869 #ifdef HAVE_PTHREAD_ATTR_GET_NP
5870         pthread_attr_get_np( self, &attr );
5871 #elif defined(sun)
5872         pthread_attr_init( &attr );
5873         pthread_attr_getstacksize( &attr, &stsize );
5874 #else
5875 #error "Not implemented"
5876 #endif
5877 #endif
5878 #ifndef sun
5879         pthread_attr_getstack( &attr, (void**)&staddr, &stsize );
5880 #endif
5881
5882         g_assert (staddr);
5883
5884         g_assert ((current > staddr) && (current < staddr + stsize));
5885
5886         tls->end_of_stack = staddr + stsize;
5887
5888         /*
5889          * threads created by nptl does not seem to have a guard page, and
5890          * since the main thread is not created by us, we can't even set one.
5891          * Increasing stsize fools the SIGSEGV signal handler into thinking this
5892          * is a stack overflow exception.
5893          */
5894         tls->stack_size = stsize + getpagesize ();
5895
5896         /* Setup an alternate signal stack */
5897         tls->signal_stack = mmap (0, SIGNAL_STACK_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
5898         tls->signal_stack_size = SIGNAL_STACK_SIZE;
5899
5900         g_assert (tls->signal_stack);
5901
5902         sa.ss_sp = tls->signal_stack;
5903         sa.ss_size = SIGNAL_STACK_SIZE;
5904         sa.ss_flags = SS_ONSTACK;
5905         sigaltstack (&sa, NULL);
5906 }
5907
5908 #endif
5909
5910 void
5911 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5912 {
5913         if (!tls_offset_inited) {
5914                 tls_offset_inited = TRUE;
5915
5916                 lmf_tls_offset = read_tls_offset_from_method (mono_get_lmf_addr);
5917                 appdomain_tls_offset = read_tls_offset_from_method (mono_domain_get);
5918                 thread_tls_offset = read_tls_offset_from_method (mono_thread_current);
5919         }               
5920
5921 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5922         setup_stack (tls);
5923 #endif
5924 }
5925
5926 void
5927 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5928 {
5929 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
5930         struct sigaltstack sa;
5931
5932         sa.ss_sp = tls->signal_stack;
5933         sa.ss_size = SIGNAL_STACK_SIZE;
5934         sa.ss_flags = SS_DISABLE;
5935         sigaltstack  (&sa, NULL);
5936
5937         if (tls->signal_stack)
5938                 munmap (tls->signal_stack, SIGNAL_STACK_SIZE);
5939 #endif
5940 }
5941
5942 void
5943 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
5944 {
5945         MonoCallInst *call = (MonoCallInst*)inst;
5946         int out_reg = param_regs [0];
5947         guint64 regpair;
5948
5949         if (vt_reg != -1) {
5950                 CallInfo * cinfo = get_call_info (inst->signature, FALSE);
5951                 MonoInst *vtarg;
5952
5953                 if (cinfo->ret.storage == ArgValuetypeInReg) {
5954                         /*
5955                          * The valuetype is in RAX:RDX after the call, need to be copied to
5956                          * the stack. Push the address here, so the call instruction can
5957                          * access it.
5958                          */
5959                         MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
5960                         vtarg->sreg1 = vt_reg;
5961                         mono_bblock_add_inst (cfg->cbb, vtarg);
5962
5963                         /* Align stack */
5964                         MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
5965                 }
5966                 else {
5967                         MONO_INST_NEW (cfg, vtarg, OP_SETREG);
5968                         vtarg->sreg1 = vt_reg;
5969                         vtarg->dreg = mono_regstate_next_int (cfg->rs);
5970                         mono_bblock_add_inst (cfg->cbb, vtarg);
5971
5972                         regpair = (((guint64)out_reg) << 32) + vtarg->dreg;
5973                         call->out_ireg_args = g_slist_append (call->out_ireg_args, (gpointer)(regpair));
5974
5975                         out_reg = param_regs [1];
5976                 }
5977
5978                 g_free (cinfo);
5979         }
5980
5981         /* add the this argument */
5982         if (this_reg != -1) {
5983                 MonoInst *this;
5984                 MONO_INST_NEW (cfg, this, OP_SETREG);
5985                 this->type = this_type;
5986                 this->sreg1 = this_reg;
5987                 this->dreg = mono_regstate_next_int (cfg->rs);
5988                 mono_bblock_add_inst (cfg->cbb, this);
5989
5990                 regpair = (((guint64)out_reg) << 32) + this->dreg;
5991                 call->out_ireg_args = g_slist_append (call->out_ireg_args, (gpointer)(regpair));
5992         }
5993 }
5994
5995 MonoInst*
5996 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5997 {
5998         MonoInst *ins = NULL;
5999
6000         if (cmethod->klass == mono_defaults.math_class) {
6001                 if (strcmp (cmethod->name, "Sin") == 0) {
6002                         MONO_INST_NEW (cfg, ins, OP_SIN);
6003                         ins->inst_i0 = args [0];
6004                 } else if (strcmp (cmethod->name, "Cos") == 0) {
6005                         MONO_INST_NEW (cfg, ins, OP_COS);
6006                         ins->inst_i0 = args [0];
6007                 } else if (strcmp (cmethod->name, "Tan") == 0) {
6008                         if (use_sse2)
6009                                 return ins;
6010                         MONO_INST_NEW (cfg, ins, OP_TAN);
6011                         ins->inst_i0 = args [0];
6012                 } else if (strcmp (cmethod->name, "Atan") == 0) {
6013                         if (use_sse2)
6014                                 return ins;
6015                         MONO_INST_NEW (cfg, ins, OP_ATAN);
6016                         ins->inst_i0 = args [0];
6017                 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
6018                         MONO_INST_NEW (cfg, ins, OP_SQRT);
6019                         ins->inst_i0 = args [0];
6020                 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
6021                         MONO_INST_NEW (cfg, ins, OP_ABS);
6022                         ins->inst_i0 = args [0];
6023                 }
6024 #if 0
6025                 /* OP_FREM is not IEEE compatible */
6026                 else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
6027                         MONO_INST_NEW (cfg, ins, OP_FREM);
6028                         ins->inst_i0 = args [0];
6029                         ins->inst_i1 = args [1];
6030                 }
6031 #endif
6032         } else if(cmethod->klass->image == mono_defaults.corlib &&
6033                            (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6034                            (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6035
6036                 if (strcmp (cmethod->name, "Increment") == 0) {
6037                         MonoInst *ins_iconst;
6038                         guint32 opcode;
6039
6040                         if (fsig->params [0]->type == MONO_TYPE_I4)
6041                                 opcode = OP_ATOMIC_ADD_NEW_I4;
6042                         else if (fsig->params [0]->type == MONO_TYPE_I8)
6043                                 opcode = OP_ATOMIC_ADD_NEW_I8;
6044                         else
6045                                 g_assert_not_reached ();
6046                         MONO_INST_NEW (cfg, ins, opcode);
6047                         MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6048                         ins_iconst->inst_c0 = 1;
6049
6050                         ins->inst_i0 = args [0];
6051                         ins->inst_i1 = ins_iconst;
6052                 } else if (strcmp (cmethod->name, "Decrement") == 0) {
6053                         MonoInst *ins_iconst;
6054                         guint32 opcode;
6055
6056                         if (fsig->params [0]->type == MONO_TYPE_I4)
6057                                 opcode = OP_ATOMIC_ADD_NEW_I4;
6058                         else if (fsig->params [0]->type == MONO_TYPE_I8)
6059                                 opcode = OP_ATOMIC_ADD_NEW_I8;
6060                         else
6061                                 g_assert_not_reached ();
6062                         MONO_INST_NEW (cfg, ins, opcode);
6063                         MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6064                         ins_iconst->inst_c0 = -1;
6065
6066                         ins->inst_i0 = args [0];
6067                         ins->inst_i1 = ins_iconst;
6068                 } else if (strcmp (cmethod->name, "Add") == 0) {
6069                         guint32 opcode;
6070
6071                         if (fsig->params [0]->type == MONO_TYPE_I4)
6072                                 opcode = OP_ATOMIC_ADD_I4;
6073                         else if (fsig->params [0]->type == MONO_TYPE_I8)
6074                                 opcode = OP_ATOMIC_ADD_I8;
6075                         else
6076                                 g_assert_not_reached ();
6077                         
6078                         MONO_INST_NEW (cfg, ins, opcode);
6079
6080                         ins->inst_i0 = args [0];
6081                         ins->inst_i1 = args [1];
6082                 } else if (strcmp (cmethod->name, "Exchange") == 0) {
6083                         guint32 opcode;
6084
6085                         if (fsig->params [0]->type == MONO_TYPE_I4)
6086                                 opcode = OP_ATOMIC_EXCHANGE_I4;
6087                         else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
6088                                          (fsig->params [0]->type == MONO_TYPE_I) ||
6089                                          (fsig->params [0]->type == MONO_TYPE_OBJECT))
6090                                 opcode = OP_ATOMIC_EXCHANGE_I8;
6091                         else
6092                                 return NULL;
6093
6094                         MONO_INST_NEW (cfg, ins, opcode);
6095
6096                         ins->inst_i0 = args [0];
6097                         ins->inst_i1 = args [1];
6098                 } else if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6099                         /* 64 bit reads are already atomic */
6100                         MONO_INST_NEW (cfg, ins, CEE_LDIND_I8);
6101                         ins->inst_i0 = args [0];
6102                 }
6103
6104                 /* 
6105                  * Can't implement CompareExchange methods this way since they have
6106                  * three arguments.
6107                  */
6108         }
6109
6110         return ins;
6111 }
6112
6113 gboolean
6114 mono_arch_print_tree (MonoInst *tree, int arity)
6115 {
6116         return 0;
6117 }
6118
6119 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
6120 {
6121         MonoInst* ins;
6122         
6123         if (appdomain_tls_offset == -1)
6124                 return NULL;
6125         
6126         MONO_INST_NEW (cfg, ins, OP_TLS_GET);
6127         ins->inst_offset = appdomain_tls_offset;
6128         return ins;
6129 }
6130
6131 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
6132 {
6133         MonoInst* ins;
6134         
6135         if (thread_tls_offset == -1)
6136                 return NULL;
6137         
6138         MONO_INST_NEW (cfg, ins, OP_TLS_GET);
6139         ins->inst_offset = thread_tls_offset;
6140         return ins;
6141 }